diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index b35b0a4cf7fcb6..09c3b2a4c26a85 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -143,6 +143,7 @@ # Actions /.github/workflows/* @nodejs/actions +/.github/workflows/create-release-proposal.yml @nodejs/releasers /tools/actions/* @nodejs/actions # Test runner diff --git a/.github/workflows/coverage-linux-without-intl.yml b/.github/workflows/coverage-linux-without-intl.yml index ddd85fb8a4ff0e..1977eda3f97e03 100644 --- a/.github/workflows/coverage-linux-without-intl.yml +++ b/.github/workflows/coverage-linux-without-intl.yml @@ -79,7 +79,6 @@ jobs: - name: Clean tmp run: rm -rf coverage/tmp && rm -rf out - name: Upload - uses: codecov/codecov-action@b9fd7d16f6d7d1b5d2bec1a2887e65ceed900238 # v4.6.0 + uses: codecov/codecov-action@015f24e6818733317a2da2edd6290ab26238649a # v5.0.7 with: directory: ./coverage - token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/coverage-linux.yml b/.github/workflows/coverage-linux.yml index 153504ba4280d6..164c0b540a9f45 100644 --- a/.github/workflows/coverage-linux.yml +++ b/.github/workflows/coverage-linux.yml @@ -79,7 +79,6 @@ jobs: - name: Clean tmp run: rm -rf coverage/tmp && rm -rf out - name: Upload - uses: codecov/codecov-action@b9fd7d16f6d7d1b5d2bec1a2887e65ceed900238 # v4.6.0 + uses: codecov/codecov-action@015f24e6818733317a2da2edd6290ab26238649a # v5.0.7 with: directory: ./coverage - token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/coverage-windows.yml b/.github/workflows/coverage-windows.yml index 84feb7b09018de..fada006e321520 100644 --- a/.github/workflows/coverage-windows.yml +++ b/.github/workflows/coverage-windows.yml @@ -71,7 +71,6 @@ jobs: - name: Clean tmp run: npx rimraf ./coverage/tmp - name: Upload - uses: codecov/codecov-action@b9fd7d16f6d7d1b5d2bec1a2887e65ceed900238 # v4.6.0 + uses: codecov/codecov-action@015f24e6818733317a2da2edd6290ab26238649a # v5.0.7 with: directory: ./coverage - token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/create-release-proposal.yml b/.github/workflows/create-release-proposal.yml new file mode 100644 index 00000000000000..d3ffa3ad49b5e2 --- /dev/null +++ b/.github/workflows/create-release-proposal.yml @@ -0,0 +1,85 @@ +# This action requires the following secrets to be set on the repository: +# GH_USER_NAME: GitHub user whose Jenkins and GitHub token are defined below +# GH_USER_TOKEN: GitHub user token, to be used by ncu and to push changes + +name: Create Release Proposal + +on: + workflow_dispatch: + inputs: + release-line: + required: true + type: number + description: 'The release line (without dots or prefix). e.g: 22' + release-date: + required: true + type: string + description: The release date in YYYY-MM-DD format + +concurrency: ${{ github.workflow }} + +env: + NODE_VERSION: lts/* + +permissions: + contents: write + pull-requests: write + +jobs: + releasePrepare: + env: + STAGING_BRANCH: v${{ inputs.release-line }}.x-staging + RELEASE_BRANCH: v${{ inputs.release-line }}.x + RELEASE_DATE: ${{ inputs.release-date }} + RELEASE_LINE: ${{ inputs.release-line }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + ref: ${{ env.STAGING_BRANCH }} + persist-credentials: false + + # Install dependencies + - name: Install Node.js + uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4.1.0 + with: + node-version: ${{ env.NODE_VERSION }} + + - name: Install @node-core/utils + run: npm install -g @node-core/utils + + - name: Configure @node-core/utils + run: | + ncu-config set branch "${RELEASE_BRANCH}" + ncu-config set upstream origin + ncu-config set username "$USERNAME" + ncu-config set token "$GH_TOKEN" + ncu-config set repo "$(echo "$GITHUB_REPOSITORY" | cut -d/ -f2)" + ncu-config set owner "${GITHUB_REPOSITORY_OWNER}" + env: + USERNAME: ${{ secrets.JENKINS_USER }} + GH_TOKEN: ${{ github.token }} + + - name: Set up ghauth config (Ubuntu) + run: | + mkdir -p "${XDG_CONFIG_HOME:-~/.config}/changelog-maker" + echo '{}' | jq '{user: env.USERNAME, token: env.TOKEN}' > "${XDG_CONFIG_HOME:-~/.config}/changelog-maker/config.json" + env: + USERNAME: ${{ secrets.JENKINS_USER }} + TOKEN: ${{ github.token }} + + - name: Setup git author + run: | + git config --local user.email "github-bot@iojs.org" + git config --local user.name "Node.js GitHub Bot" + + - name: Start git node release prepare + # The curl command is to make sure we run the version of the script corresponding to the current workflow. + run: | + git update-index --assume-unchanged tools/actions/create-release.sh + curl -fsSLo tools/actions/create-release.sh https://github.com/${GITHUB_REPOSITORY}/raw/${GITHUB_SHA}/tools/actions/create-release.sh + ./tools/actions/create-release.sh "${RELEASE_DATE}" "${RELEASE_LINE}" + env: + GH_TOKEN: ${{ github.token }} + # We want the bot to push the push the release commit so CI runs on it. + BOT_TOKEN: ${{ secrets.GH_USER_TOKEN }} diff --git a/.github/workflows/lint-release-proposal.yml b/.github/workflows/lint-release-proposal.yml new file mode 100644 index 00000000000000..bc2ac2d0127865 --- /dev/null +++ b/.github/workflows/lint-release-proposal.yml @@ -0,0 +1,59 @@ +name: Linters (release proposals) + +on: + push: + branches: + - v[0-9]+.[0-9]+.[0-9]+-proposal + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +env: + PYTHON_VERSION: '3.12' + NODE_VERSION: lts/* + +permissions: + contents: read + +jobs: + lint-release-commit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false + - name: Lint release commit title format + run: | + EXPECTED_TITLE='^[[:digit:]]{4}-[[:digit:]]{2}-[[:digit:]]{2}, Version [[:digit:]]+\.[[:digit:]]+\.[[:digit:]]+ (\(Current|'.+' \(LTS)\)$' + echo "Expected commit title format: $EXPECTED_TITLE" + COMMIT_SUBJECT="$(git --no-pager log -1 --format=%s)" + echo "Actual: $ACTUAL" + echo "$COMMIT_SUBJECT" | grep -q -E "$EXPECTED_TITLE" + echo "COMMIT_SUBJECT=$COMMIT_SUBJECT" >> "$GITHUB_ENV" + - name: Lint release commit message trailers + run: | + EXPECTED_TRAILER="^PR-URL: $GITHUB_SERVER_URL/$GITHUB_REPOSITORY/pull/[[:digit:]]+\$" + echo "Expected trailer format: $EXPECTED_TRAILER" + ACTUAL="$(git --no-pager log -1 --format=%b | git interpret-trailers --parse --no-divider)" + echo "Actual: $ACTUAL" + echo "$ACTUAL" | grep -E -q "$EXPECTED_TRAILER" + + PR_URL="${ACTUAL:8}" + PR_HEAD="$(gh pr view "$PR_URL" --json headRefOid -q .headRefOid)" + echo "Head of $PR_URL: $PR_HEAD" + echo "Current commit: $GITHUB_SHA" + [[ "$PR_HEAD" == "$GITHUB_SHA" ]] + env: + GH_TOKEN: ${{ github.token }} + - name: Validate CHANGELOG + id: releaser-info + run: | + EXPECTED_CHANGELOG_TITLE_INTRO="## $COMMIT_SUBJECT, @" + echo "Expected CHANGELOG section title: $EXPECTED_CHANGELOG_TITLE_INTRO" + CHANGELOG_TITLE="$(grep "$EXPECTED_CHANGELOG_TITLE_INTRO" "doc/changelogs/CHANGELOG_V${COMMIT_SUBJECT:20:2}.md")" + echo "Actual: $CHANGELOG_TITLE" + [[ "${CHANGELOG_TITLE%@*}@" == "$EXPECTED_CHANGELOG_TITLE_INTRO" ]] + - name: Verify NODE_VERSION_IS_RELEASE bit is correctly set + run: | + grep -q '^#define NODE_VERSION_IS_RELEASE 1$' src/node_version.h diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 6fbf46e7f3e22f..75c67cc238dbc3 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -33,7 +33,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@91182cccc01eb5e619899d80e4e971d6181294a7 # v2.10.1 + uses: step-security/harden-runner@0080882f6c36860b6ba35c610c98ce87d4e2f26f # v2.10.2 with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs @@ -73,6 +73,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: Upload to code-scanning - uses: github/codeql-action/upload-sarif@662472033e021d55d94146f66f6058822b0b39fd # v3.27.0 + uses: github/codeql-action/upload-sarif@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5 with: sarif_file: results.sarif diff --git a/.github/workflows/tools.yml b/.github/workflows/tools.yml index 7cd0c496c6ca68..04c46541546ece 100644 --- a/.github/workflows/tools.yml +++ b/.github/workflows/tools.yml @@ -51,7 +51,7 @@ permissions: jobs: tools-deps-update: - if: github.repository == 'nodejs/node' + if: github.repository == 'nodejs/node' || github.event_name == 'workflow_dispatch' runs-on: ubuntu-latest strategy: fail-fast: false # Prevent other jobs from aborting if one fails diff --git a/.github/workflows/update-wpt.yml b/.github/workflows/update-wpt.yml new file mode 100644 index 00000000000000..72ec030e9d645a --- /dev/null +++ b/.github/workflows/update-wpt.yml @@ -0,0 +1,83 @@ +name: WPT update + +on: + schedule: + # Run once a week at 12:00 AM UTC on Sunday. + - cron: 0 0 * * * + workflow_dispatch: + inputs: + subsystems: + description: Subsystem to run the update for + required: false + default: '["url", "WebCryptoAPI"]' + +permissions: + contents: read + +env: + NODE_VERSION: lts/* + +jobs: + wpt-subsystem-update: + if: github.repository == 'nodejs/node' || github.event_name == 'workflow_dispatch' + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + subsystem: ${{ fromJSON(github.event.inputs.subsystems || '["url", "WebCryptoAPI"]') }} + + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false + + - name: Install Node.js + uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4.1.0 + with: + node-version: ${{ env.NODE_VERSION }} + + - name: Install @node-core/utils + run: npm install -g @node-core/utils + + - name: Setup @node-core/utils + run: | + ncu-config set username "$USERNAME" + ncu-config set token "$GH_TOKEN" + ncu-config set owner "${GITHUB_REPOSITORY_OWNER}" + ncu-config set repo "$(echo "$GITHUB_REPOSITORY" | cut -d/ -f2)" + env: + USERNAME: ${{ secrets.JENKINS_USER }} + GH_TOKEN: ${{ secrets.GH_USER_TOKEN }} + + - name: Update WPT for subsystem ${{ matrix.subsystem }} + run: | + git node wpt "$SUBSYSTEM" + env: + SUBSYSTEM: ${{ matrix.subsystem }} + + - name: Retrieve new version commit + run: | + new_version="$( + node -p 'require("./test/fixtures/wpt/versions.json")[process.argv[1]].commit' "$SUBSYSTEM" + )" + { + echo "long_version=$new_version" + echo "short_version=${new_version:0:10}" + } >> "$GITHUB_ENV" + env: + SUBSYSTEM: ${{ matrix.subsystem }} + + - name: Open or update PR for the subsystem update + uses: gr2m/create-or-update-pull-request-action@77596e3166f328b24613f7082ab30bf2d93079d5 + with: + branch: actions/update-wpt-${{ matrix.subsystem }} + author: Node.js GitHub Bot + title: 'test: update WPT for ${{ matrix.subsystem }} to ${{ env.short_version }}' + commit-message: 'test: update WPT for ${{ matrix.subsystem }} to ${{ env.short_version }}' + labels: test + update-pull-request-title-and-body: true + body: > + This is an automated update of the WPT for ${{ matrix.subsystem }} to + https://github.com/web-platform-tests/wpt/commit/${{ env.long_version }}. + env: + GITHUB_TOKEN: ${{ secrets.GH_USER_TOKEN }} diff --git a/CHANGELOG.md b/CHANGELOG.md index b9aa947a756025..ced3dfd153426c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,7 +38,9 @@ release. 18 (LTS) -23.3.0
+ +23.4.0
+23.3.0
23.2.0
23.1.0
23.0.0
diff --git a/README.md b/README.md index 69777ccb2dad40..35a30716eb668c 100644 --- a/README.md +++ b/README.md @@ -379,6 +379,8 @@ For information about the governance of the Node.js project, see **Nitzan Uziely** <> * [LiviaMedeiros](https://github.com/LiviaMedeiros) - **LiviaMedeiros** <> +* [ljharb](https://github.com/ljharb) - + **Jordan Harband** <> * [lpinca](https://github.com/lpinca) - **Luigi Pinca** <> (he/him) * [lukekarrys](https://github.com/lukekarrys) - @@ -757,8 +759,6 @@ maintaining the Node.js project. **Mert Can Altin** <> * [preveen-stack](https://github.com/preveen-stack) - **Preveen Padmanabhan** <> (he/him) -* [RedYetiDev](https://github.com/RedYetiDev) - - **Aviv Keller** <> (they/them) * [VoltrexKeyva](https://github.com/VoltrexKeyva) - **Mohammed Keyvanzadeh** <> (he/him) diff --git a/benchmark/util/text-decoder.js b/benchmark/util/text-decoder.js index dd4f02016df077..1aa60f2dd0bcd6 100644 --- a/benchmark/util/text-decoder.js +++ b/benchmark/util/text-decoder.js @@ -3,7 +3,7 @@ const common = require('../common.js'); const bench = common.createBenchmark(main, { - encoding: ['utf-8', 'latin1', 'iso-8859-3'], + encoding: ['utf-8', 'windows-1252', 'iso-8859-3'], ignoreBOM: [0, 1], fatal: [0, 1], len: [256, 1024 * 16, 1024 * 128], diff --git a/configure.py b/configure.py index a4e210261e2e76..e2b12d8823bb64 100755 --- a/configure.py +++ b/configure.py @@ -131,6 +131,12 @@ default=None, help='use the prefix to look for pre-installed headers') +parser.add_argument('--use_clang', + action='store_true', + dest='use_clang', + default=None, + help='use clang instead of gcc') + parser.add_argument('--dest-os', action='store', dest='dest_os', @@ -1407,6 +1413,10 @@ def configure_node(o): o['variables']['target_arch'] = target_arch o['variables']['node_byteorder'] = sys.byteorder + # Allow overriding the compiler - needed by embedders. + if options.use_clang: + o['variables']['clang'] = 1 + cross_compiling = (options.cross_compiling if options.cross_compiling is not None else target_arch != host_arch) diff --git a/deps/corepack/CHANGELOG.md b/deps/corepack/CHANGELOG.md index 7de934c0d2c0db..941d0b6b7e5e25 100644 --- a/deps/corepack/CHANGELOG.md +++ b/deps/corepack/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## [0.30.0](https://github.com/nodejs/corepack/compare/v0.29.4...v0.30.0) (2024-11-23) + + +### Features + +* update package manager versions ([#578](https://github.com/nodejs/corepack/issues/578)) ([a286c8f](https://github.com/nodejs/corepack/commit/a286c8f5537ea9ecf9b6ff53c7bc3e8da4e3c8bb)) + + +### Performance Improvements + +* prefer `module.enableCompileCache` over `v8-compile-cache` ([#574](https://github.com/nodejs/corepack/issues/574)) ([cba6905](https://github.com/nodejs/corepack/commit/cba690575bd606faeee54bd512ccb8797d49055f)) + ## [0.29.4](https://github.com/nodejs/corepack/compare/v0.29.3...v0.29.4) (2024-09-07) diff --git a/deps/corepack/dist/corepack.js b/deps/corepack/dist/corepack.js index b1b22662466f86..6179b11c083cb5 100755 --- a/deps/corepack/dist/corepack.js +++ b/deps/corepack/dist/corepack.js @@ -1,3 +1,4 @@ #!/usr/bin/env node process.env.COREPACK_ENABLE_DOWNLOAD_PROMPT??='0'; +require('module').enableCompileCache?.(); require('./lib/corepack.cjs').runMain(process.argv.slice(2)); \ No newline at end of file diff --git a/deps/corepack/dist/lib/corepack.cjs b/deps/corepack/dist/lib/corepack.cjs index 2978fc336232e0..e1919339dc38bd 100644 --- a/deps/corepack/dist/lib/corepack.cjs +++ b/deps/corepack/dist/lib/corepack.cjs @@ -21260,7 +21260,7 @@ function String2(descriptor, ...args) { } // package.json -var version = "0.29.4"; +var version = "0.30.0"; // sources/Engine.ts var import_fs9 = __toESM(require("fs")); @@ -21274,7 +21274,7 @@ var import_valid3 = __toESM(require_valid2()); var config_default = { definitions: { npm: { - default: "10.8.3+sha1.e6085b2864fcfd9b1aad7b602601b5a2fc116699", + default: "10.9.1+sha1.ab141c1229765c11c8c59060fc9cf450a2207bd6", fetchLatestFrom: { type: "npm", package: "npm" @@ -21311,7 +21311,7 @@ var config_default = { } }, pnpm: { - default: "9.9.0+sha1.3edbe440f4e570aa8f049adbd06b9483d55cc2d2", + default: "9.14.2+sha1.5202b50ab92394b3c922d2e293f196e2df6d441b", fetchLatestFrom: { type: "npm", package: "pnpm" @@ -21375,7 +21375,7 @@ var config_default = { package: "yarn" }, transparent: { - default: "4.4.1+sha224.fd21d9eb5fba020083811af1d4953acc21eeb9f6ff97efd1b3f9d4de", + default: "4.5.2+sha224.c2e2e9ed3cdadd6ec250589b3393f71ae56d5ec297af11cec1eba3b4", commands: [ [ "yarn", @@ -21965,8 +21965,11 @@ async function runVersion(locator, installSpec, binName, args) { } if (!binPath) throw new Error(`Assertion failed: Unable to locate path for bin '${binName}'`); - if (locator.name !== `npm` || (0, import_lt.default)(locator.reference, `9.7.0`)) - await Promise.resolve().then(() => __toESM(require_v8_compile_cache())); + if (!import_module.default.enableCompileCache) { + if (locator.name !== `npm` || (0, import_lt.default)(locator.reference, `9.7.0`)) { + await Promise.resolve().then(() => __toESM(require_v8_compile_cache())); + } + } process.env.COREPACK_ROOT = import_path7.default.dirname(require.resolve("corepack/package.json")); process.argv = [ process.execPath, @@ -21976,6 +21979,9 @@ async function runVersion(locator, installSpec, binName, args) { process.execArgv = []; process.mainModule = void 0; process.nextTick(import_module.default.runMain, binPath); + if (import_module.default.flushCompileCache) { + setImmediate(import_module.default.flushCompileCache); + } } function shouldSkipIntegrityCheck() { return process.env.COREPACK_INTEGRITY_KEYS === `` || process.env.COREPACK_INTEGRITY_KEYS === `0`; @@ -22553,7 +22559,7 @@ var EnableCommand = class extends Command { [`enable`] ]; static usage = Command.Usage({ - description: `Add the Corepack shims to the install directories`, + description: `Add the Corepack shims to the install directory`, details: ` When run, this command will check whether the shims for the specified package managers can be found with the correct values inside the install directory. If not, or if they don't exist, they will be created. diff --git a/deps/corepack/dist/npm.js b/deps/corepack/dist/npm.js index 7d10ba5bdf36b2..75f68b058f2dd6 100755 --- a/deps/corepack/dist/npm.js +++ b/deps/corepack/dist/npm.js @@ -1,3 +1,4 @@ #!/usr/bin/env node process.env.COREPACK_ENABLE_DOWNLOAD_PROMPT??='1' +require('module').enableCompileCache?.(); require('./lib/corepack.cjs').runMain(['npm', ...process.argv.slice(2)]); \ No newline at end of file diff --git a/deps/corepack/dist/npx.js b/deps/corepack/dist/npx.js index a8bd3e69014313..b1138bb48e1a82 100755 --- a/deps/corepack/dist/npx.js +++ b/deps/corepack/dist/npx.js @@ -1,3 +1,4 @@ #!/usr/bin/env node process.env.COREPACK_ENABLE_DOWNLOAD_PROMPT??='1' +require('module').enableCompileCache?.(); require('./lib/corepack.cjs').runMain(['npx', ...process.argv.slice(2)]); \ No newline at end of file diff --git a/deps/corepack/dist/pnpm.js b/deps/corepack/dist/pnpm.js index a0a87263435562..56ba509405033d 100755 --- a/deps/corepack/dist/pnpm.js +++ b/deps/corepack/dist/pnpm.js @@ -1,3 +1,4 @@ #!/usr/bin/env node process.env.COREPACK_ENABLE_DOWNLOAD_PROMPT??='1' +require('module').enableCompileCache?.(); require('./lib/corepack.cjs').runMain(['pnpm', ...process.argv.slice(2)]); \ No newline at end of file diff --git a/deps/corepack/dist/pnpx.js b/deps/corepack/dist/pnpx.js index 57ad4842631cd7..ee36be2e99c686 100755 --- a/deps/corepack/dist/pnpx.js +++ b/deps/corepack/dist/pnpx.js @@ -1,3 +1,4 @@ #!/usr/bin/env node process.env.COREPACK_ENABLE_DOWNLOAD_PROMPT??='1' +require('module').enableCompileCache?.(); require('./lib/corepack.cjs').runMain(['pnpx', ...process.argv.slice(2)]); \ No newline at end of file diff --git a/deps/corepack/dist/yarn.js b/deps/corepack/dist/yarn.js index eaed8596eabaa3..ce628c82b6a782 100755 --- a/deps/corepack/dist/yarn.js +++ b/deps/corepack/dist/yarn.js @@ -1,3 +1,4 @@ #!/usr/bin/env node process.env.COREPACK_ENABLE_DOWNLOAD_PROMPT??='1' +require('module').enableCompileCache?.(); require('./lib/corepack.cjs').runMain(['yarn', ...process.argv.slice(2)]); \ No newline at end of file diff --git a/deps/corepack/dist/yarnpkg.js b/deps/corepack/dist/yarnpkg.js index aada6032fa67ff..9541ed726aaa3b 100755 --- a/deps/corepack/dist/yarnpkg.js +++ b/deps/corepack/dist/yarnpkg.js @@ -1,3 +1,4 @@ #!/usr/bin/env node process.env.COREPACK_ENABLE_DOWNLOAD_PROMPT??='1' +require('module').enableCompileCache?.(); require('./lib/corepack.cjs').runMain(['yarnpkg', ...process.argv.slice(2)]); \ No newline at end of file diff --git a/deps/corepack/package.json b/deps/corepack/package.json index 571c359407e07a..c9c6662e99e6c9 100644 --- a/deps/corepack/package.json +++ b/deps/corepack/package.json @@ -1,6 +1,6 @@ { "name": "corepack", - "version": "0.29.4", + "version": "0.30.0", "homepage": "https://github.com/nodejs/corepack#readme", "bugs": { "url": "https://github.com/nodejs/corepack/issues" diff --git a/deps/ncrypto/unofficial.gni b/deps/ncrypto/unofficial.gni index ea024af73e215b..7cb27d22b9b8e0 100644 --- a/deps/ncrypto/unofficial.gni +++ b/deps/ncrypto/unofficial.gni @@ -27,6 +27,6 @@ template("ncrypto_gn_build") { forward_variables_from(invoker, "*") public_configs = [ ":ncrypto_config" ] sources = gypi_values.ncrypto_sources - deps = [ "../openssl" ] + deps = [ "$node_openssl_path" ] } } diff --git a/deps/ngtcp2/ngtcp2.gyp b/deps/ngtcp2/ngtcp2.gyp index 0f2929b75478f1..68013580874b09 100644 --- a/deps/ngtcp2/ngtcp2.gyp +++ b/deps/ngtcp2/ngtcp2.gyp @@ -13,7 +13,6 @@ 'ngtcp2/lib/ngtcp2_cid.c', 'ngtcp2/lib/ngtcp2_conn.c', 'ngtcp2/lib/ngtcp2_conv.c', - 'ngtcp2/lib/ngtcp2_conversion.c', 'ngtcp2/lib/ngtcp2_crypto.c', 'ngtcp2/lib/ngtcp2_err.c', 'ngtcp2/lib/ngtcp2_frame_chain.c', @@ -37,8 +36,10 @@ 'ngtcp2/lib/ngtcp2_rob.c', 'ngtcp2/lib/ngtcp2_rst.c', 'ngtcp2/lib/ngtcp2_rtb.c', + 'ngtcp2/lib/ngtcp2_settings.c', 'ngtcp2/lib/ngtcp2_str.c', 'ngtcp2/lib/ngtcp2_strm.c', + 'ngtcp2/lib/ngtcp2_transport_params.c', 'ngtcp2/lib/ngtcp2_unreachable.c', 'ngtcp2/lib/ngtcp2_vec.c', 'ngtcp2/lib/ngtcp2_version.c', diff --git a/deps/ngtcp2/ngtcp2/crypto/boringssl/boringssl.c b/deps/ngtcp2/ngtcp2/crypto/boringssl/boringssl.c index 50b89110e36ff7..283063f738e2af 100644 --- a/deps/ngtcp2/ngtcp2/crypto/boringssl/boringssl.c +++ b/deps/ngtcp2/ngtcp2/crypto/boringssl/boringssl.c @@ -24,7 +24,7 @@ */ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include #include @@ -52,15 +52,15 @@ typedef struct ngtcp2_crypto_boringssl_cipher { } ngtcp2_crypto_boringssl_cipher; static ngtcp2_crypto_boringssl_cipher crypto_cipher_aes_128 = { - NGTCP2_CRYPTO_BORINGSSL_CIPHER_TYPE_AES_128, + NGTCP2_CRYPTO_BORINGSSL_CIPHER_TYPE_AES_128, }; static ngtcp2_crypto_boringssl_cipher crypto_cipher_aes_256 = { - NGTCP2_CRYPTO_BORINGSSL_CIPHER_TYPE_AES_256, + NGTCP2_CRYPTO_BORINGSSL_CIPHER_TYPE_AES_256, }; static ngtcp2_crypto_boringssl_cipher crypto_cipher_chacha20 = { - NGTCP2_CRYPTO_BORINGSSL_CIPHER_TYPE_CHACHA20, + NGTCP2_CRYPTO_BORINGSSL_CIPHER_TYPE_CHACHA20, }; ngtcp2_crypto_aead *ngtcp2_crypto_aead_aes_128_gcm(ngtcp2_crypto_aead *aead) { @@ -175,7 +175,7 @@ static ngtcp2_crypto_ctx *crypto_ctx_cipher_id(ngtcp2_crypto_ctx *ctx, ctx->hp.native_handle = (void *)crypto_cipher_id_get_hp(cipher_id); ctx->max_encryption = crypto_cipher_id_get_aead_max_encryption(cipher_id); ctx->max_decryption_failure = - crypto_cipher_id_get_aead_max_decryption_failure(cipher_id); + crypto_cipher_id_get_aead_max_decryption_failure(cipher_id); return ctx; } @@ -413,12 +413,12 @@ int ngtcp2_crypto_hp_mask(uint8_t *dest, const ngtcp2_crypto_cipher *hp, AES_ecb_encrypt(sample, dest, &ctx->aes_key, 1); return 0; case NGTCP2_CRYPTO_BORINGSSL_CIPHER_TYPE_CHACHA20: -#if defined(WORDS_BIGENDIAN) +#ifdef WORDS_BIGENDIAN counter = (uint32_t)sample[0] + (uint32_t)(sample[1] << 8) + (uint32_t)(sample[2] << 16) + (uint32_t)(sample[3] << 24); -#else /* !WORDS_BIGENDIAN */ +#else /* !defined(WORDS_BIGENDIAN) */ memcpy(&counter, sample, sizeof(counter)); -#endif /* !WORDS_BIGENDIAN */ +#endif /* !defined(WORDS_BIGENDIAN) */ CRYPTO_chacha_20(dest, PLAINTEXT, sizeof(PLAINTEXT) - 1, ctx->key, sample + sizeof(counter), counter); return 0; @@ -429,17 +429,16 @@ int ngtcp2_crypto_hp_mask(uint8_t *dest, const ngtcp2_crypto_cipher *hp, } int ngtcp2_crypto_read_write_crypto_data( - ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, - const uint8_t *data, size_t datalen) { + ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, + const uint8_t *data, size_t datalen) { SSL *ssl = ngtcp2_conn_get_tls_native_handle(conn); int rv; int err; if (SSL_provide_quic_data( - ssl, - ngtcp2_crypto_boringssl_from_ngtcp2_encryption_level( - encryption_level), - data, datalen) != 1) { + ssl, + ngtcp2_crypto_boringssl_from_ngtcp2_encryption_level(encryption_level), + data, datalen) != 1) { return -1; } @@ -522,7 +521,7 @@ int ngtcp2_crypto_set_local_transport_params(void *tls, const uint8_t *buf, } ngtcp2_encryption_level ngtcp2_crypto_boringssl_from_ssl_encryption_level( - enum ssl_encryption_level_t ssl_level) { + enum ssl_encryption_level_t ssl_level) { switch (ssl_level) { case ssl_encryption_initial: return NGTCP2_ENCRYPTION_LEVEL_INITIAL; @@ -540,7 +539,7 @@ ngtcp2_encryption_level ngtcp2_crypto_boringssl_from_ssl_encryption_level( enum ssl_encryption_level_t ngtcp2_crypto_boringssl_from_ngtcp2_encryption_level( - ngtcp2_encryption_level encryption_level) { + ngtcp2_encryption_level encryption_level) { switch (encryption_level) { case NGTCP2_ENCRYPTION_LEVEL_INITIAL: return ssl_encryption_initial; @@ -582,7 +581,7 @@ static int set_read_secret(SSL *ssl, enum ssl_encryption_level_t bssl_level, ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); ngtcp2_encryption_level level = - ngtcp2_crypto_boringssl_from_ssl_encryption_level(bssl_level); + ngtcp2_crypto_boringssl_from_ssl_encryption_level(bssl_level); (void)cipher; if (ngtcp2_crypto_derive_and_install_rx_key(conn, NULL, NULL, NULL, level, @@ -599,7 +598,7 @@ static int set_write_secret(SSL *ssl, enum ssl_encryption_level_t bssl_level, ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); ngtcp2_encryption_level level = - ngtcp2_crypto_boringssl_from_ssl_encryption_level(bssl_level); + ngtcp2_crypto_boringssl_from_ssl_encryption_level(bssl_level); (void)cipher; if (ngtcp2_crypto_derive_and_install_tx_key(conn, NULL, NULL, NULL, level, @@ -615,7 +614,7 @@ static int add_handshake_data(SSL *ssl, enum ssl_encryption_level_t bssl_level, ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); ngtcp2_encryption_level level = - ngtcp2_crypto_boringssl_from_ssl_encryption_level(bssl_level); + ngtcp2_crypto_boringssl_from_ssl_encryption_level(bssl_level); int rv; rv = ngtcp2_conn_submit_crypto_data(conn, level, data, datalen); @@ -644,8 +643,8 @@ static int send_alert(SSL *ssl, enum ssl_encryption_level_t bssl_level, } static SSL_QUIC_METHOD quic_method = { - set_read_secret, set_write_secret, add_handshake_data, - flush_flight, send_alert, + set_read_secret, set_write_secret, add_handshake_data, + flush_flight, send_alert, }; static void crypto_boringssl_configure_context(SSL_CTX *ssl_ctx) { diff --git a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto.h b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto.h index 06427d7a7cac70..68093d18b71b14 100644 --- a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto.h +++ b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto.h @@ -29,14 +29,45 @@ #ifdef __cplusplus extern "C" { -#endif +#endif /* defined(__cplusplus) */ #ifdef WIN32 # ifndef WIN32_LEAN_AND_MEAN # define WIN32_LEAN_AND_MEAN -# endif +# endif /* !defined(WIN32_LEAN_AND_MEAN) */ # include -#endif /* WIN32 */ +#endif /* defined(WIN32) */ + +/** + * @macrosection + * + * ngtcp2 crypto library error codes + */ + +/** + * @macro + * + * :macro:`NGTCP2_CRYPTO_ERR_INTERNAL` indicates an internal error. + */ +#define NGTCP2_CRYPTO_ERR_INTERNAL -201 + +/** + * @macro + * + * :macro:`NGTCP2_CRYPTO_ERR_UNREADABLE_TOKEN` indicates that a token + * is unreadable because it is not correctly formatted; or verifying + * the integrity protection failed. + */ +#define NGTCP2_CRYPTO_ERR_UNREADABLE_TOKEN -202 + +/** + * @macro + * + * :macro:`NGTCP2_CRYPTO_ERR_VERIFY_TOKEN` indicates that a token does + * not probe the client address; or the token validity has expired; or + * it contains invalid Connection ID. + */ +#define NGTCP2_CRYPTO_ERR_VERIFY_TOKEN -203 /** * @function @@ -135,12 +166,9 @@ ngtcp2_crypto_hkdf_extract(uint8_t *dest, const ngtcp2_crypto_md *md, * * This function returns 0 if it succeeds, or -1. */ -NGTCP2_EXTERN int ngtcp2_crypto_hkdf_expand(uint8_t *dest, size_t destlen, - const ngtcp2_crypto_md *md, - const uint8_t *secret, - size_t secretlen, - const uint8_t *info, - size_t infolen); +NGTCP2_EXTERN int ngtcp2_crypto_hkdf_expand( + uint8_t *dest, size_t destlen, const ngtcp2_crypto_md *md, + const uint8_t *secret, size_t secretlen, const uint8_t *info, size_t infolen); /** * @function @@ -318,8 +346,8 @@ ngtcp2_crypto_hp_mask_cb(uint8_t *dest, const ngtcp2_crypto_cipher *hp, * This function returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN int ngtcp2_crypto_derive_and_install_rx_key( - ngtcp2_conn *conn, uint8_t *key, uint8_t *iv, uint8_t *hp, - ngtcp2_encryption_level level, const uint8_t *secret, size_t secretlen); + ngtcp2_conn *conn, uint8_t *key, uint8_t *iv, uint8_t *hp, + ngtcp2_encryption_level level, const uint8_t *secret, size_t secretlen); /** * @function @@ -365,8 +393,8 @@ NGTCP2_EXTERN int ngtcp2_crypto_derive_and_install_rx_key( * This function returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN int ngtcp2_crypto_derive_and_install_tx_key( - ngtcp2_conn *conn, uint8_t *key, uint8_t *iv, uint8_t *hp, - ngtcp2_encryption_level level, const uint8_t *secret, size_t secretlen); + ngtcp2_conn *conn, uint8_t *key, uint8_t *iv, uint8_t *hp, + ngtcp2_encryption_level level, const uint8_t *secret, size_t secretlen); /** * @function @@ -405,11 +433,11 @@ NGTCP2_EXTERN int ngtcp2_crypto_derive_and_install_tx_key( * This function returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN int ngtcp2_crypto_update_key( - ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, - ngtcp2_crypto_aead_ctx *rx_aead_ctx, uint8_t *rx_key, uint8_t *rx_iv, - ngtcp2_crypto_aead_ctx *tx_aead_ctx, uint8_t *tx_key, uint8_t *tx_iv, - const uint8_t *current_rx_secret, const uint8_t *current_tx_secret, - size_t secretlen); + ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, + ngtcp2_crypto_aead_ctx *rx_aead_ctx, uint8_t *rx_key, uint8_t *rx_iv, + ngtcp2_crypto_aead_ctx *tx_aead_ctx, uint8_t *tx_key, uint8_t *tx_iv, + const uint8_t *current_rx_secret, const uint8_t *current_tx_secret, + size_t secretlen); /** * @function @@ -422,11 +450,11 @@ NGTCP2_EXTERN int ngtcp2_crypto_update_key( * :macro:`NGTCP2_ERR_CALLBACK_FAILURE`. */ NGTCP2_EXTERN int ngtcp2_crypto_update_key_cb( - ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, - ngtcp2_crypto_aead_ctx *rx_aead_ctx, uint8_t *rx_iv, - ngtcp2_crypto_aead_ctx *tx_aead_ctx, uint8_t *tx_iv, - const uint8_t *current_rx_secret, const uint8_t *current_tx_secret, - size_t secretlen, void *user_data); + ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, + ngtcp2_crypto_aead_ctx *rx_aead_ctx, uint8_t *rx_iv, + ngtcp2_crypto_aead_ctx *tx_aead_ctx, uint8_t *tx_iv, + const uint8_t *current_rx_secret, const uint8_t *current_tx_secret, + size_t secretlen, void *user_data); /** * @function @@ -514,8 +542,8 @@ ngtcp2_crypto_read_write_crypto_data(ngtcp2_conn *conn, * codes. */ NGTCP2_EXTERN int ngtcp2_crypto_recv_crypto_data_cb( - ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, - uint64_t offset, const uint8_t *data, size_t datalen, void *user_data); + ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, uint64_t offset, + const uint8_t *data, size_t datalen, void *user_data); /** * @function @@ -529,8 +557,8 @@ NGTCP2_EXTERN int ngtcp2_crypto_recv_crypto_data_cb( * This function returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN int ngtcp2_crypto_generate_stateless_reset_token( - uint8_t *token, const uint8_t *secret, size_t secretlen, - const ngtcp2_cid *cid); + uint8_t *token, const uint8_t *secret, size_t secretlen, + const ngtcp2_cid *cid); /** * @macro @@ -540,7 +568,7 @@ NGTCP2_EXTERN int ngtcp2_crypto_generate_stateless_reset_token( * `ngtcp2_crypto_generate_retry_token` or * `ngtcp2_crypto_generate_regular_token`. */ -#define NGTCP2_CRYPTO_TOKEN_RAND_DATALEN 32 +#define NGTCP2_CRYPTO_TOKEN_RAND_DATALEN 16 /** * @macro @@ -550,6 +578,14 @@ NGTCP2_EXTERN int ngtcp2_crypto_generate_stateless_reset_token( */ #define NGTCP2_CRYPTO_TOKEN_MAGIC_RETRY 0xb6 +/** + * @macro + * + * :macro:`NGTCP2_CRYPTO_TOKEN_MAGIC_RETRY2` is the magic byte for + * Retry token generated by `ngtcp2_crypto_generate_retry_token2`. + */ +#define NGTCP2_CRYPTO_TOKEN_MAGIC_RETRY2 0xb7 + /** * @macro * @@ -569,6 +605,17 @@ NGTCP2_EXTERN int ngtcp2_crypto_generate_stateless_reset_token( sizeof(ngtcp2_tstamp) + /* aead tag = */ 16 + \ NGTCP2_CRYPTO_TOKEN_RAND_DATALEN) +/** + * @macro + * + * :macro:`NGTCP2_CRYPTO_MAX_RETRY_TOKENLEN2` is the maximum length of + * a token generated by `ngtcp2_crypto_generate_retry_token2`. + */ +#define NGTCP2_CRYPTO_MAX_RETRY_TOKENLEN2 \ + (/* magic = */ 1 + sizeof(ngtcp2_sockaddr_union) + /* cid len = */ 1 + \ + NGTCP2_MAX_CIDLEN + sizeof(ngtcp2_tstamp) + /* aead tag = */ 16 + \ + NGTCP2_CRYPTO_TOKEN_RAND_DATALEN) + /** * @macro * @@ -595,13 +642,15 @@ NGTCP2_EXTERN int ngtcp2_crypto_generate_stateless_reset_token( * is a Destination Connection ID in Initial packet sent by client. * |ts| is the timestamp when the token is generated. * + * See also `ngtcp2_crypto_generate_retry_token2`. + * * This function returns the length of generated token if it succeeds, * or -1. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_crypto_generate_retry_token( - uint8_t *token, const uint8_t *secret, size_t secretlen, uint32_t version, - const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, - const ngtcp2_cid *retry_scid, const ngtcp2_cid *odcid, ngtcp2_tstamp ts); + uint8_t *token, const uint8_t *secret, size_t secretlen, uint32_t version, + const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, + const ngtcp2_cid *retry_scid, const ngtcp2_cid *odcid, ngtcp2_tstamp ts); /** * @function @@ -622,10 +671,76 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_crypto_generate_retry_token( * This function returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN int ngtcp2_crypto_verify_retry_token( - ngtcp2_cid *odcid, const uint8_t *token, size_t tokenlen, - const uint8_t *secret, size_t secretlen, uint32_t version, - const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, - const ngtcp2_cid *dcid, ngtcp2_duration timeout, ngtcp2_tstamp ts); + ngtcp2_cid *odcid, const uint8_t *token, size_t tokenlen, + const uint8_t *secret, size_t secretlen, uint32_t version, + const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, + const ngtcp2_cid *dcid, ngtcp2_duration timeout, ngtcp2_tstamp ts); + +/** + * @function + * + * `ngtcp2_crypto_generate_retry_token2` generates a token in the + * buffer pointed by |token| that is sent with Retry packet. The + * buffer pointed by |token| must have at least + * :macro:`NGTCP2_CRYPTO_MAX_RETRY_TOKENLEN2` bytes long. The + * successfully generated token starts with + * :macro:`NGTCP2_CRYPTO_TOKEN_MAGIC_RETRY2`. |secret| of length + * |secretlen| is a keying material to generate keys to encrypt the + * token. |version| is QUIC version. |remote_addr| of length + * |remote_addrlen| is an address of client. |retry_scid| is a Source + * Connection ID chosen by server, and set in Retry packet. |odcid| + * is a Destination Connection ID in Initial packet sent by client. + * |ts| is the timestamp when the token is generated. + * + * Use this function instead of `ngtcp2_crypto_generate_retry_token` + * if more detailed error handling is required when verifying the + * token. `ngtcp2_crypto_verify_retry_token2` must be used to verify + * the token. + * + * This function returns the length of generated token if it succeeds, + * or -1. + */ +NGTCP2_EXTERN ngtcp2_ssize ngtcp2_crypto_generate_retry_token2( + uint8_t *token, const uint8_t *secret, size_t secretlen, uint32_t version, + const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, + const ngtcp2_cid *retry_scid, const ngtcp2_cid *odcid, ngtcp2_tstamp ts); + +/** + * @function + * + * `ngtcp2_crypto_verify_retry_token2` verifies Retry token stored in + * the buffer pointed by |token| of length |tokenlen|. |secret| of + * length |secretlen| is a keying material to generate keys to decrypt + * the token. |version| is QUIC version of the Initial packet that + * contains this token. |remote_addr| of length |remote_addrlen| is + * an address of client. |dcid| is a Destination Connection ID in + * Initial packet sent by client. |timeout| is the period during + * which the token is valid. |ts| is the current timestamp. When + * validation succeeds, the extracted Destination Connection ID (which + * is the Destination Connection ID in Initial packet sent by client + * that triggered Retry packet) is stored in the buffer pointed by + * |odcid|. + * + * The token must be generated by + * `ngtcp2_crypto_generate_retry_token2`. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGTCP2_CRYPTO_ERR_UNREADABLE_TOKEN` + * A token is badly formatted; or verifying the integrity + * protection failed. + * :macro:`NGTCP2_CRYPTO_ERR_VERIFY_TOKEN` + * A token does not probe the client address; or the token + * validity has expired; or it contains invalid Connection ID. + * :macro:`NGTCP2_CRYPTO_ERR_INTERNAL` + * Internal error occurred. + */ +NGTCP2_EXTERN int ngtcp2_crypto_verify_retry_token2( + ngtcp2_cid *odcid, const uint8_t *token, size_t tokenlen, + const uint8_t *secret, size_t secretlen, uint32_t version, + const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, + const ngtcp2_cid *dcid, ngtcp2_duration timeout, ngtcp2_tstamp ts); /** * @function @@ -644,9 +759,9 @@ NGTCP2_EXTERN int ngtcp2_crypto_verify_retry_token( * or -1. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_crypto_generate_regular_token( - uint8_t *token, const uint8_t *secret, size_t secretlen, - const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, - ngtcp2_tstamp ts); + uint8_t *token, const uint8_t *secret, size_t secretlen, + const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, + ngtcp2_tstamp ts); /** * @function @@ -661,9 +776,9 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_crypto_generate_regular_token( * This function returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN int ngtcp2_crypto_verify_regular_token( - const uint8_t *token, size_t tokenlen, const uint8_t *secret, - size_t secretlen, const ngtcp2_sockaddr *remote_addr, - ngtcp2_socklen remote_addrlen, ngtcp2_duration timeout, ngtcp2_tstamp ts); + const uint8_t *token, size_t tokenlen, const uint8_t *secret, + size_t secretlen, const ngtcp2_sockaddr *remote_addr, + ngtcp2_socklen remote_addrlen, ngtcp2_duration timeout, ngtcp2_tstamp ts); /** * @function @@ -685,9 +800,9 @@ NGTCP2_EXTERN int ngtcp2_crypto_verify_regular_token( * This function returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_crypto_write_connection_close( - uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, - const ngtcp2_cid *scid, uint64_t error_code, const uint8_t *reason, - size_t reasonlen); + uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, + const ngtcp2_cid *scid, uint64_t error_code, const uint8_t *reason, + size_t reasonlen); /** * @function @@ -705,9 +820,9 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_crypto_write_connection_close( * This function returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_crypto_write_retry( - uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, - const ngtcp2_cid *scid, const ngtcp2_cid *odcid, const uint8_t *token, - size_t tokenlen); + uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, + const ngtcp2_cid *scid, const ngtcp2_cid *odcid, const uint8_t *token, + size_t tokenlen); /** * @function @@ -759,7 +874,7 @@ ngtcp2_crypto_aead_ctx_free(ngtcp2_crypto_aead_ctx *aead_ctx); * :member:`ngtcp2_callbacks.delete_crypto_aead_ctx` field. */ NGTCP2_EXTERN void ngtcp2_crypto_delete_crypto_aead_ctx_cb( - ngtcp2_conn *conn, ngtcp2_crypto_aead_ctx *aead_ctx, void *user_data); + ngtcp2_conn *conn, ngtcp2_crypto_aead_ctx *aead_ctx, void *user_data); /** * @function @@ -771,7 +886,7 @@ NGTCP2_EXTERN void ngtcp2_crypto_delete_crypto_aead_ctx_cb( * :member:`ngtcp2_callbacks.delete_crypto_cipher_ctx` field. */ NGTCP2_EXTERN void ngtcp2_crypto_delete_crypto_cipher_ctx_cb( - ngtcp2_conn *conn, ngtcp2_crypto_cipher_ctx *cipher_ctx, void *user_data); + ngtcp2_conn *conn, ngtcp2_crypto_cipher_ctx *cipher_ctx, void *user_data); /** * @function @@ -813,7 +928,7 @@ typedef struct ngtcp2_crypto_conn_ref ngtcp2_crypto_conn_ref; * must return non-NULL :type:`ngtcp2_conn` object. */ typedef ngtcp2_conn *(*ngtcp2_crypto_get_conn)( - ngtcp2_crypto_conn_ref *conn_ref); + ngtcp2_crypto_conn_ref *conn_ref); /** * @struct @@ -836,6 +951,6 @@ typedef struct ngtcp2_crypto_conn_ref { #ifdef __cplusplus } -#endif +#endif /* defined(__cplusplus) */ -#endif /* NGTCP2_CRYPTO_H */ +#endif /* !defined(NGTCP2_CRYPTO_H) */ diff --git a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_boringssl.h b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_boringssl.h index 43a3c36f03a382..89b26f0bc0e346 100644 --- a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_boringssl.h +++ b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_boringssl.h @@ -31,7 +31,7 @@ #ifdef __cplusplus extern "C" { -#endif +#endif /* defined(__cplusplus) */ /** * @function @@ -42,7 +42,7 @@ extern "C" { */ NGTCP2_EXTERN ngtcp2_encryption_level ngtcp2_crypto_boringssl_from_ssl_encryption_level( - enum ssl_encryption_level_t ssl_level); + enum ssl_encryption_level_t ssl_level); /** * @function @@ -53,7 +53,7 @@ ngtcp2_crypto_boringssl_from_ssl_encryption_level( */ NGTCP2_EXTERN enum ssl_encryption_level_t ngtcp2_crypto_boringssl_from_ngtcp2_encryption_level( - ngtcp2_encryption_level encryption_level); + ngtcp2_encryption_level encryption_level); /** * @function @@ -99,6 +99,6 @@ ngtcp2_crypto_boringssl_configure_client_context(SSL_CTX *ssl_ctx); #ifdef __cplusplus } -#endif +#endif /* defined(__cplusplus) */ -#endif /* NGTCP2_CRYPTO_BORINGSSL_H */ +#endif /* !defined(NGTCP2_CRYPTO_BORINGSSL_H) */ diff --git a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_picotls.h b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_picotls.h index 61020bb3a8f376..d3f2f978e79923 100644 --- a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_picotls.h +++ b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_picotls.h @@ -31,7 +31,7 @@ #ifdef __cplusplus extern "C" { -#endif +#endif /* defined(__cplusplus) */ /** * @struct @@ -79,7 +79,7 @@ ngtcp2_crypto_picotls_from_epoch(size_t epoch); * Picotls backend. */ NGTCP2_EXTERN size_t ngtcp2_crypto_picotls_from_ngtcp2_encryption_level( - ngtcp2_encryption_level encryption_level); + ngtcp2_encryption_level encryption_level); /** * @function @@ -160,7 +160,7 @@ ngtcp2_crypto_picotls_configure_client_context(ptls_context_t *ctx); * It returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN int ngtcp2_crypto_picotls_configure_server_session( - ngtcp2_crypto_picotls_ctx *cptls); + ngtcp2_crypto_picotls_ctx *cptls); /** * @function @@ -224,8 +224,8 @@ ngtcp2_crypto_picotls_deconfigure_session(ngtcp2_crypto_picotls_ctx *cptls); * :macro:`NGTCP2_TLSEXT_QUIC_TRANSPORT_PARAMETERS_V1`. */ NGTCP2_EXTERN int ngtcp2_crypto_picotls_collect_extension( - ptls_t *ptls, struct st_ptls_handshake_properties_t *properties, - uint16_t type); + ptls_t *ptls, struct st_ptls_handshake_properties_t *properties, + uint16_t type); /** * @function @@ -236,11 +236,11 @@ NGTCP2_EXTERN int ngtcp2_crypto_picotls_collect_extension( * extensions are ignored. */ NGTCP2_EXTERN int ngtcp2_crypto_picotls_collected_extensions( - ptls_t *ptls, struct st_ptls_handshake_properties_t *properties, - ptls_raw_extension_t *extensions); + ptls_t *ptls, struct st_ptls_handshake_properties_t *properties, + ptls_raw_extension_t *extensions); #ifdef __cplusplus } -#endif +#endif /* defined(__cplusplus) */ -#endif /* NGTCP2_CRYPTO_PICOTLS_H */ +#endif /* !defined(NGTCP2_CRYPTO_PICOTLS_H) */ diff --git a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_quictls.h b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_quictls.h index b25c13b81c8b18..22e3eda0c4160a 100644 --- a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_quictls.h +++ b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_quictls.h @@ -31,7 +31,7 @@ #ifdef __cplusplus extern "C" { -#endif +#endif /* defined(__cplusplus) */ /** * @macrosection @@ -70,7 +70,7 @@ extern "C" { */ NGTCP2_EXTERN ngtcp2_encryption_level ngtcp2_crypto_quictls_from_ossl_encryption_level( - OSSL_ENCRYPTION_LEVEL ossl_level); + OSSL_ENCRYPTION_LEVEL ossl_level); /** * @function @@ -81,7 +81,7 @@ ngtcp2_crypto_quictls_from_ossl_encryption_level( */ NGTCP2_EXTERN OSSL_ENCRYPTION_LEVEL ngtcp2_crypto_quictls_from_ngtcp2_encryption_level( - ngtcp2_encryption_level encryption_level); + ngtcp2_encryption_level encryption_level); /** * @function @@ -142,6 +142,6 @@ NGTCP2_EXTERN int ngtcp2_crypto_quictls_init(void); #ifdef __cplusplus } -#endif +#endif /* defined(__cplusplus) */ -#endif /* NGTCP2_CRYPTO_QUICTLS_H */ +#endif /* !defined(NGTCP2_CRYPTO_QUICTLS_H) */ diff --git a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_wolfssl.h b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_wolfssl.h index e1d621adce94d8..e95056de5926a9 100644 --- a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_wolfssl.h +++ b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_wolfssl.h @@ -33,7 +33,7 @@ #ifdef __cplusplus extern "C" { -#endif +#endif /* defined(__cplusplus) */ /** * @function @@ -44,7 +44,7 @@ extern "C" { */ NGTCP2_EXTERN ngtcp2_encryption_level ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level( - WOLFSSL_ENCRYPTION_LEVEL wolfssl_level); + WOLFSSL_ENCRYPTION_LEVEL wolfssl_level); /** * @function @@ -55,7 +55,7 @@ ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level( */ NGTCP2_EXTERN WOLFSSL_ENCRYPTION_LEVEL ngtcp2_crypto_wolfssl_from_ngtcp2_encryption_level( - ngtcp2_encryption_level encryption_level); + ngtcp2_encryption_level encryption_level); /** * @function @@ -101,6 +101,6 @@ ngtcp2_crypto_wolfssl_configure_client_context(WOLFSSL_CTX *ssl_ctx); #ifdef __cplusplus } -#endif +#endif /* defined(__cplusplus) */ -#endif /* NGTCP2_CRYPTO_WOLFSSL_H */ +#endif /* !defined(NGTCP2_CRYPTO_WOLFSSL_H) */ diff --git a/deps/ngtcp2/ngtcp2/crypto/picotls/picotls.c b/deps/ngtcp2/ngtcp2/crypto/picotls/picotls.c index 35bfb7b2f8fa19..c3f14f5d444bac 100644 --- a/deps/ngtcp2/ngtcp2/crypto/picotls/picotls.c +++ b/deps/ngtcp2/ngtcp2/crypto/picotls/picotls.c @@ -24,7 +24,7 @@ */ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include #include @@ -79,7 +79,7 @@ crypto_cipher_suite_get_aead_max_encryption(ptls_cipher_suite_t *cs) { if (cs->aead == &ptls_openssl_chacha20poly1305) { return NGTCP2_CRYPTO_MAX_ENCRYPTION_CHACHA20_POLY1305; } -#endif /* PTLS_OPENSSL_HAVE_CHACHA20_POLY1305 */ +#endif /* defined(PTLS_OPENSSL_HAVE_CHACHA20_POLY1305) */ return 0; } @@ -95,7 +95,7 @@ crypto_cipher_suite_get_aead_max_decryption_failure(ptls_cipher_suite_t *cs) { if (cs->aead == &ptls_openssl_chacha20poly1305) { return NGTCP2_CRYPTO_MAX_DECRYPTION_FAILURE_CHACHA20_POLY1305; } -#endif /* PTLS_OPENSSL_HAVE_CHACHA20_POLY1305 */ +#endif /* defined(PTLS_OPENSSL_HAVE_CHACHA20_POLY1305) */ return 0; } @@ -114,7 +114,7 @@ crypto_cipher_suite_get_hp(ptls_cipher_suite_t *cs) { if (cs->aead == &ptls_openssl_chacha20poly1305) { return &ptls_openssl_chacha20; } -#endif /* PTLS_OPENSSL_HAVE_CHACHA20_POLY1305 */ +#endif /* defined(PTLS_OPENSSL_HAVE_CHACHA20_POLY1305) */ return NULL; } @@ -124,7 +124,7 @@ static int supported_cipher_suite(ptls_cipher_suite_t *cs) { cs->aead == &ptls_openssl_aes256gcm #ifdef PTLS_OPENSSL_HAVE_CHACHA20_POLY1305 || cs->aead == &ptls_openssl_chacha20poly1305 -#endif /* PTLS_OPENSSL_HAVE_CHACHA20_POLY1305 */ +#endif /* defined(PTLS_OPENSSL_HAVE_CHACHA20_POLY1305) */ ; } diff --git a/deps/ngtcp2/ngtcp2/crypto/quictls/quictls.c b/deps/ngtcp2/ngtcp2/crypto/quictls/quictls.c index 330ca687b44666..592e5a86535356 100644 --- a/deps/ngtcp2/ngtcp2/crypto/quictls/quictls.c +++ b/deps/ngtcp2/ngtcp2/crypto/quictls/quictls.c @@ -24,7 +24,7 @@ */ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -339,7 +339,7 @@ static ngtcp2_crypto_ctx *crypto_ctx_cipher_id(ngtcp2_crypto_ctx *ctx, ctx->hp.native_handle = (void *)crypto_cipher_id_get_hp(cipher_id); ctx->max_encryption = crypto_cipher_id_get_aead_max_encryption(cipher_id); ctx->max_decryption_failure = - crypto_cipher_id_get_aead_max_decryption_failure(cipher_id); + crypto_cipher_id_get_aead_max_decryption_failure(cipher_id); return ctx; } @@ -527,14 +527,14 @@ int ngtcp2_crypto_hkdf_extract(uint8_t *dest, const ngtcp2_crypto_md *md, EVP_KDF_CTX *kctx = EVP_KDF_CTX_new(kdf); int mode = EVP_KDF_HKDF_MODE_EXTRACT_ONLY; OSSL_PARAM params[] = { - OSSL_PARAM_construct_int(OSSL_KDF_PARAM_MODE, &mode), - OSSL_PARAM_construct_utf8_string(OSSL_KDF_PARAM_DIGEST, - (char *)EVP_MD_get0_name(prf), 0), - OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_KEY, (void *)secret, - secretlen), - OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_SALT, (void *)salt, - saltlen), - OSSL_PARAM_construct_end(), + OSSL_PARAM_construct_int(OSSL_KDF_PARAM_MODE, &mode), + OSSL_PARAM_construct_utf8_string(OSSL_KDF_PARAM_DIGEST, + (char *)EVP_MD_get0_name(prf), 0), + OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_KEY, (void *)secret, + secretlen), + OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_SALT, (void *)salt, + saltlen), + OSSL_PARAM_construct_end(), }; int rv = 0; @@ -584,14 +584,14 @@ int ngtcp2_crypto_hkdf_expand(uint8_t *dest, size_t destlen, EVP_KDF_CTX *kctx = EVP_KDF_CTX_new(kdf); int mode = EVP_KDF_HKDF_MODE_EXPAND_ONLY; OSSL_PARAM params[] = { - OSSL_PARAM_construct_int(OSSL_KDF_PARAM_MODE, &mode), - OSSL_PARAM_construct_utf8_string(OSSL_KDF_PARAM_DIGEST, - (char *)EVP_MD_get0_name(prf), 0), - OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_KEY, (void *)secret, - secretlen), - OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_INFO, (void *)info, - infolen), - OSSL_PARAM_construct_end(), + OSSL_PARAM_construct_int(OSSL_KDF_PARAM_MODE, &mode), + OSSL_PARAM_construct_utf8_string(OSSL_KDF_PARAM_DIGEST, + (char *)EVP_MD_get0_name(prf), 0), + OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_KEY, (void *)secret, + secretlen), + OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_INFO, (void *)info, + infolen), + OSSL_PARAM_construct_end(), }; int rv = 0; @@ -639,15 +639,15 @@ int ngtcp2_crypto_hkdf(uint8_t *dest, size_t destlen, EVP_KDF *kdf = crypto_kdf_hkdf(); EVP_KDF_CTX *kctx = EVP_KDF_CTX_new(kdf); OSSL_PARAM params[] = { - OSSL_PARAM_construct_utf8_string(OSSL_KDF_PARAM_DIGEST, - (char *)EVP_MD_get0_name(prf), 0), - OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_KEY, (void *)secret, - secretlen), - OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_SALT, (void *)salt, - saltlen), - OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_INFO, (void *)info, - infolen), - OSSL_PARAM_construct_end(), + OSSL_PARAM_construct_utf8_string(OSSL_KDF_PARAM_DIGEST, + (char *)EVP_MD_get0_name(prf), 0), + OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_KEY, (void *)secret, + secretlen), + OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_SALT, (void *)salt, + saltlen), + OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_INFO, (void *)info, + infolen), + OSSL_PARAM_construct_end(), }; int rv = 0; @@ -672,7 +672,7 @@ int ngtcp2_crypto_hkdf(uint8_t *dest, size_t destlen, if (EVP_PKEY_derive_init(pctx) != 1 || EVP_PKEY_CTX_hkdf_mode(pctx, EVP_PKEY_HKDEF_MODE_EXTRACT_AND_EXPAND) != - 1 || + 1 || EVP_PKEY_CTX_set_hkdf_md(pctx, prf) != 1 || EVP_PKEY_CTX_set1_hkdf_salt(pctx, salt, (int)saltlen) != 1 || EVP_PKEY_CTX_set1_hkdf_key(pctx, secret, (int)secretlen) != 1 || @@ -699,9 +699,9 @@ int ngtcp2_crypto_encrypt(uint8_t *dest, const ngtcp2_crypto_aead *aead, int len; #if OPENSSL_VERSION_NUMBER >= 0x30000000L OSSL_PARAM params[] = { - OSSL_PARAM_construct_octet_string(OSSL_CIPHER_PARAM_AEAD_TAG, - dest + plaintextlen, taglen), - OSSL_PARAM_construct_end(), + OSSL_PARAM_construct_octet_string(OSSL_CIPHER_PARAM_AEAD_TAG, + dest + plaintextlen, taglen), + OSSL_PARAM_construct_end(), }; #endif /* OPENSSL_VERSION_NUMBER >= 0x30000000L */ @@ -794,16 +794,16 @@ int ngtcp2_crypto_hp_mask(uint8_t *dest, const ngtcp2_crypto_cipher *hp, } int ngtcp2_crypto_read_write_crypto_data( - ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, - const uint8_t *data, size_t datalen) { + ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, + const uint8_t *data, size_t datalen) { SSL *ssl = ngtcp2_conn_get_tls_native_handle(conn); int rv; int err; if (SSL_provide_quic_data( - ssl, - ngtcp2_crypto_quictls_from_ngtcp2_encryption_level(encryption_level), - data, datalen) != 1) { + ssl, + ngtcp2_crypto_quictls_from_ngtcp2_encryption_level(encryption_level), + data, datalen) != 1) { return -1; } @@ -874,7 +874,7 @@ int ngtcp2_crypto_set_local_transport_params(void *tls, const uint8_t *buf, } ngtcp2_encryption_level ngtcp2_crypto_quictls_from_ossl_encryption_level( - OSSL_ENCRYPTION_LEVEL ossl_level) { + OSSL_ENCRYPTION_LEVEL ossl_level) { switch (ossl_level) { case ssl_encryption_initial: return NGTCP2_ENCRYPTION_LEVEL_INITIAL; @@ -892,7 +892,7 @@ ngtcp2_encryption_level ngtcp2_crypto_quictls_from_ossl_encryption_level( OSSL_ENCRYPTION_LEVEL ngtcp2_crypto_quictls_from_ngtcp2_encryption_level( - ngtcp2_encryption_level encryption_level) { + ngtcp2_encryption_level encryption_level) { switch (encryption_level) { case NGTCP2_ENCRYPTION_LEVEL_INITIAL: return ssl_encryption_initial; @@ -934,7 +934,7 @@ static int set_encryption_secrets(SSL *ssl, OSSL_ENCRYPTION_LEVEL ossl_level, ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); ngtcp2_encryption_level level = - ngtcp2_crypto_quictls_from_ossl_encryption_level(ossl_level); + ngtcp2_crypto_quictls_from_ossl_encryption_level(ossl_level); if (rx_secret && ngtcp2_crypto_derive_and_install_rx_key(conn, NULL, NULL, NULL, level, @@ -956,7 +956,7 @@ static int add_handshake_data(SSL *ssl, OSSL_ENCRYPTION_LEVEL ossl_level, ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); ngtcp2_encryption_level level = - ngtcp2_crypto_quictls_from_ossl_encryption_level(ossl_level); + ngtcp2_crypto_quictls_from_ossl_encryption_level(ossl_level); int rv; rv = ngtcp2_conn_submit_crypto_data(conn, level, data, datalen); @@ -973,8 +973,7 @@ static int flush_flight(SSL *ssl) { return 1; } -static int send_alert(SSL *ssl, enum ssl_encryption_level_t level, - uint8_t alert) { +static int send_alert(SSL *ssl, OSSL_ENCRYPTION_LEVEL level, uint8_t alert) { ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); (void)level; @@ -985,14 +984,14 @@ static int send_alert(SSL *ssl, enum ssl_encryption_level_t level, } static SSL_QUIC_METHOD quic_method = { - set_encryption_secrets, - add_handshake_data, - flush_flight, - send_alert, + set_encryption_secrets, + add_handshake_data, + flush_flight, + send_alert, #ifdef LIBRESSL_VERSION_NUMBER - NULL, - NULL, -#endif /* LIBRESSL_VERSION_NUMBER */ + NULL, + NULL, +#endif /* defined(LIBRESSL_VERSION_NUMBER) */ }; static void crypto_quictls_configure_context(SSL_CTX *ssl_ctx) { diff --git a/deps/ngtcp2/ngtcp2/crypto/shared.c b/deps/ngtcp2/ngtcp2/crypto/shared.c index 162094a375cb8b..98cd4de7e8097d 100644 --- a/deps/ngtcp2/ngtcp2/crypto/shared.c +++ b/deps/ngtcp2/ngtcp2/crypto/shared.c @@ -27,9 +27,9 @@ #ifdef WIN32 # include # include -#else +#elif defined(HAVE_NETINET_IN_H) # include -#endif +#endif /* defined(HAVE_NETINET_IN_H) */ #include #include @@ -110,13 +110,11 @@ int ngtcp2_crypto_derive_initial_secrets(uint8_t *rx_secret, uint8_t *tx_secret, } if (ngtcp2_crypto_hkdf_expand_label( - client_secret, NGTCP2_CRYPTO_INITIAL_SECRETLEN, &ctx.md, - initial_secret, NGTCP2_CRYPTO_INITIAL_SECRETLEN, CLABEL, - sizeof(CLABEL) - 1) != 0 || + client_secret, NGTCP2_CRYPTO_INITIAL_SECRETLEN, &ctx.md, initial_secret, + NGTCP2_CRYPTO_INITIAL_SECRETLEN, CLABEL, sizeof(CLABEL) - 1) != 0 || ngtcp2_crypto_hkdf_expand_label( - server_secret, NGTCP2_CRYPTO_INITIAL_SECRETLEN, &ctx.md, - initial_secret, NGTCP2_CRYPTO_INITIAL_SECRETLEN, SLABEL, - sizeof(SLABEL) - 1) != 0) { + server_secret, NGTCP2_CRYPTO_INITIAL_SECRETLEN, &ctx.md, initial_secret, + NGTCP2_CRYPTO_INITIAL_SECRETLEN, SLABEL, sizeof(SLABEL) - 1) != 0) { return -1; } @@ -125,13 +123,13 @@ int ngtcp2_crypto_derive_initial_secrets(uint8_t *rx_secret, uint8_t *tx_secret, size_t ngtcp2_crypto_packet_protection_ivlen(const ngtcp2_crypto_aead *aead) { size_t noncelen = ngtcp2_crypto_aead_noncelen(aead); - return ngtcp2_max(8, noncelen); + return ngtcp2_max_size(8, noncelen); } int ngtcp2_crypto_derive_packet_protection_key( - uint8_t *key, uint8_t *iv, uint8_t *hp_key, uint32_t version, - const ngtcp2_crypto_aead *aead, const ngtcp2_crypto_md *md, - const uint8_t *secret, size_t secretlen) { + uint8_t *key, uint8_t *iv, uint8_t *hp_key, uint32_t version, + const ngtcp2_crypto_aead *aead, const ngtcp2_crypto_md *md, + const uint8_t *secret, size_t secretlen) { static const uint8_t KEY_LABEL_V1[] = "quic key"; static const uint8_t IV_LABEL_V1[] = "quic iv"; static const uint8_t HP_KEY_LABEL_V1[] = "quic hp"; @@ -305,8 +303,8 @@ int ngtcp2_crypto_derive_and_install_rx_key(ngtcp2_conn *conn, uint8_t *key, } break; case NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE: - rv = ngtcp2_conn_install_rx_handshake_key(conn, &aead_ctx, iv, ivlen, - &hp_ctx); + rv = + ngtcp2_conn_install_rx_handshake_key(conn, &aead_ctx, iv, ivlen, &hp_ctx); if (rv != 0) { goto fail; } @@ -455,8 +453,8 @@ int ngtcp2_crypto_derive_and_install_tx_key(ngtcp2_conn *conn, uint8_t *key, } break; case NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE: - rv = ngtcp2_conn_install_tx_handshake_key(conn, &aead_ctx, iv, ivlen, - &hp_ctx); + rv = + ngtcp2_conn_install_tx_handshake_key(conn, &aead_ctx, iv, ivlen, &hp_ctx); if (rv != 0) { goto fail; } @@ -489,10 +487,10 @@ int ngtcp2_crypto_derive_and_install_tx_key(ngtcp2_conn *conn, uint8_t *key, } int ngtcp2_crypto_derive_and_install_initial_key( - ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, - uint8_t *initial_secret, uint8_t *rx_key, uint8_t *rx_iv, - uint8_t *rx_hp_key, uint8_t *tx_key, uint8_t *tx_iv, uint8_t *tx_hp_key, - uint32_t version, const ngtcp2_cid *client_dcid) { + ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, + uint8_t *initial_secret, uint8_t *rx_key, uint8_t *rx_iv, uint8_t *rx_hp_key, + uint8_t *tx_key, uint8_t *tx_iv, uint8_t *tx_hp_key, uint32_t version, + const ngtcp2_cid *client_dcid) { uint8_t rx_secretbuf[NGTCP2_CRYPTO_INITIAL_SECRETLEN]; uint8_t tx_secretbuf[NGTCP2_CRYPTO_INITIAL_SECRETLEN]; uint8_t initial_secretbuf[NGTCP2_CRYPTO_INITIAL_SECRETLEN]; @@ -548,21 +546,20 @@ int ngtcp2_crypto_derive_and_install_initial_key( ngtcp2_conn_set_initial_crypto_ctx(conn, &ctx); if (ngtcp2_crypto_derive_initial_secrets( - rx_secret, tx_secret, initial_secret, version, client_dcid, - server ? NGTCP2_CRYPTO_SIDE_SERVER : NGTCP2_CRYPTO_SIDE_CLIENT) != - 0) { + rx_secret, tx_secret, initial_secret, version, client_dcid, + server ? NGTCP2_CRYPTO_SIDE_SERVER : NGTCP2_CRYPTO_SIDE_CLIENT) != 0) { return -1; } if (ngtcp2_crypto_derive_packet_protection_key( - rx_key, rx_iv, rx_hp_key, version, &ctx.aead, &ctx.md, rx_secret, - NGTCP2_CRYPTO_INITIAL_SECRETLEN) != 0) { + rx_key, rx_iv, rx_hp_key, version, &ctx.aead, &ctx.md, rx_secret, + NGTCP2_CRYPTO_INITIAL_SECRETLEN) != 0) { return -1; } if (ngtcp2_crypto_derive_packet_protection_key( - tx_key, tx_iv, tx_hp_key, version, &ctx.aead, &ctx.md, tx_secret, - NGTCP2_CRYPTO_INITIAL_SECRETLEN) != 0) { + tx_key, tx_iv, tx_hp_key, version, &ctx.aead, &ctx.md, tx_secret, + NGTCP2_CRYPTO_INITIAL_SECRETLEN) != 0) { return -1; } @@ -631,10 +628,10 @@ int ngtcp2_crypto_derive_and_install_initial_key( } int ngtcp2_crypto_derive_and_install_vneg_initial_key( - ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, - uint8_t *initial_secret, uint8_t *rx_key, uint8_t *rx_iv, - uint8_t *rx_hp_key, uint8_t *tx_key, uint8_t *tx_iv, uint8_t *tx_hp_key, - uint32_t version, const ngtcp2_cid *client_dcid) { + ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, + uint8_t *initial_secret, uint8_t *rx_key, uint8_t *rx_iv, uint8_t *rx_hp_key, + uint8_t *tx_key, uint8_t *tx_iv, uint8_t *tx_hp_key, uint32_t version, + const ngtcp2_cid *client_dcid) { uint8_t rx_secretbuf[NGTCP2_CRYPTO_INITIAL_SECRETLEN]; uint8_t tx_secretbuf[NGTCP2_CRYPTO_INITIAL_SECRETLEN]; uint8_t initial_secretbuf[NGTCP2_CRYPTO_INITIAL_SECRETLEN]; @@ -682,21 +679,20 @@ int ngtcp2_crypto_derive_and_install_vneg_initial_key( } if (ngtcp2_crypto_derive_initial_secrets( - rx_secret, tx_secret, initial_secret, version, client_dcid, - server ? NGTCP2_CRYPTO_SIDE_SERVER : NGTCP2_CRYPTO_SIDE_CLIENT) != - 0) { + rx_secret, tx_secret, initial_secret, version, client_dcid, + server ? NGTCP2_CRYPTO_SIDE_SERVER : NGTCP2_CRYPTO_SIDE_CLIENT) != 0) { return -1; } if (ngtcp2_crypto_derive_packet_protection_key( - rx_key, rx_iv, rx_hp_key, version, &ctx->aead, &ctx->md, rx_secret, - NGTCP2_CRYPTO_INITIAL_SECRETLEN) != 0) { + rx_key, rx_iv, rx_hp_key, version, &ctx->aead, &ctx->md, rx_secret, + NGTCP2_CRYPTO_INITIAL_SECRETLEN) != 0) { return -1; } if (ngtcp2_crypto_derive_packet_protection_key( - tx_key, tx_iv, tx_hp_key, version, &ctx->aead, &ctx->md, tx_secret, - NGTCP2_CRYPTO_INITIAL_SECRETLEN) != 0) { + tx_key, tx_iv, tx_hp_key, version, &ctx->aead, &ctx->md, tx_secret, + NGTCP2_CRYPTO_INITIAL_SECRETLEN) != 0) { return -1; } @@ -721,8 +717,8 @@ int ngtcp2_crypto_derive_and_install_vneg_initial_key( } rv = ngtcp2_conn_install_vneg_initial_key( - conn, version, &rx_aead_ctx, rx_iv, &rx_hp_ctx, &tx_aead_ctx, tx_iv, - &tx_hp_ctx, NGTCP2_CRYPTO_INITIAL_IVLEN); + conn, version, &rx_aead_ctx, rx_iv, &rx_hp_ctx, &tx_aead_ctx, tx_iv, + &tx_hp_ctx, NGTCP2_CRYPTO_INITIAL_IVLEN); if (rv != 0) { goto fail; } @@ -739,11 +735,11 @@ int ngtcp2_crypto_derive_and_install_vneg_initial_key( } int ngtcp2_crypto_update_key( - ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, - ngtcp2_crypto_aead_ctx *rx_aead_ctx, uint8_t *rx_key, uint8_t *rx_iv, - ngtcp2_crypto_aead_ctx *tx_aead_ctx, uint8_t *tx_key, uint8_t *tx_iv, - const uint8_t *current_rx_secret, const uint8_t *current_tx_secret, - size_t secretlen) { + ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, + ngtcp2_crypto_aead_ctx *rx_aead_ctx, uint8_t *rx_key, uint8_t *rx_iv, + ngtcp2_crypto_aead_ctx *tx_aead_ctx, uint8_t *tx_key, uint8_t *tx_iv, + const uint8_t *current_rx_secret, const uint8_t *current_tx_secret, + size_t secretlen) { const ngtcp2_crypto_ctx *ctx = ngtcp2_conn_get_crypto_ctx(conn); const ngtcp2_crypto_aead *aead = &ctx->aead; const ngtcp2_crypto_md *md = &ctx->md; @@ -756,7 +752,7 @@ int ngtcp2_crypto_update_key( } if (ngtcp2_crypto_derive_packet_protection_key( - rx_key, rx_iv, NULL, version, aead, md, rx_secret, secretlen) != 0) { + rx_key, rx_iv, NULL, version, aead, md, rx_secret, secretlen) != 0) { return -1; } @@ -766,7 +762,7 @@ int ngtcp2_crypto_update_key( } if (ngtcp2_crypto_derive_packet_protection_key( - tx_key, tx_iv, NULL, version, aead, md, tx_secret, secretlen) != 0) { + tx_key, tx_iv, NULL, version, aead, md, tx_secret, secretlen) != 0) { return -1; } @@ -819,20 +815,19 @@ int ngtcp2_crypto_hp_mask_cb(uint8_t *dest, const ngtcp2_crypto_cipher *hp, } int ngtcp2_crypto_update_key_cb( - ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, - ngtcp2_crypto_aead_ctx *rx_aead_ctx, uint8_t *rx_iv, - ngtcp2_crypto_aead_ctx *tx_aead_ctx, uint8_t *tx_iv, - const uint8_t *current_rx_secret, const uint8_t *current_tx_secret, - size_t secretlen, void *user_data) { + ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, + ngtcp2_crypto_aead_ctx *rx_aead_ctx, uint8_t *rx_iv, + ngtcp2_crypto_aead_ctx *tx_aead_ctx, uint8_t *tx_iv, + const uint8_t *current_rx_secret, const uint8_t *current_tx_secret, + size_t secretlen, void *user_data) { uint8_t rx_key[64]; uint8_t tx_key[64]; (void)conn; (void)user_data; - if (ngtcp2_crypto_update_key(conn, rx_secret, tx_secret, rx_aead_ctx, rx_key, - rx_iv, tx_aead_ctx, tx_key, tx_iv, - current_rx_secret, current_tx_secret, - secretlen) != 0) { + if (ngtcp2_crypto_update_key( + conn, rx_secret, tx_secret, rx_aead_ctx, rx_key, rx_iv, tx_aead_ctx, + tx_key, tx_iv, current_rx_secret, current_tx_secret, secretlen) != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } return 0; @@ -920,13 +915,14 @@ static size_t crypto_generate_retry_token_aad(uint8_t *dest, uint32_t version, static const uint8_t retry_token_info_prefix[] = "retry_token"; ngtcp2_ssize ngtcp2_crypto_generate_retry_token( - uint8_t *token, const uint8_t *secret, size_t secretlen, uint32_t version, - const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, - const ngtcp2_cid *retry_scid, const ngtcp2_cid *odcid, ngtcp2_tstamp ts) { - uint8_t plaintext[NGTCP2_CRYPTO_MAX_RETRY_TOKENLEN]; + uint8_t *token, const uint8_t *secret, size_t secretlen, uint32_t version, + const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, + const ngtcp2_cid *retry_scid, const ngtcp2_cid *odcid, ngtcp2_tstamp ts) { + uint8_t + plaintext[/* cid len = */ 1 + NGTCP2_MAX_CIDLEN + sizeof(ngtcp2_tstamp)]; uint8_t rand_data[NGTCP2_CRYPTO_TOKEN_RAND_DATALEN]; - uint8_t key[32]; - uint8_t iv[32]; + uint8_t key[16]; + uint8_t iv[12]; size_t keylen; size_t ivlen; ngtcp2_crypto_aead aead; @@ -934,7 +930,7 @@ ngtcp2_ssize ngtcp2_crypto_generate_retry_token( ngtcp2_crypto_aead_ctx aead_ctx; size_t plaintextlen; uint8_t - aad[sizeof(version) + sizeof(ngtcp2_sockaddr_union) + NGTCP2_MAX_CIDLEN]; + aad[sizeof(version) + sizeof(ngtcp2_sockaddr_union) + NGTCP2_MAX_CIDLEN]; size_t aadlen; uint8_t *p = plaintext; ngtcp2_tstamp ts_be = ngtcp2_htonl64(ts); @@ -962,8 +958,8 @@ ngtcp2_ssize ngtcp2_crypto_generate_retry_token( keylen = ngtcp2_crypto_aead_keylen(&aead); ivlen = ngtcp2_crypto_aead_noncelen(&aead); - assert(sizeof(key) >= keylen); - assert(sizeof(iv) >= ivlen); + assert(sizeof(key) == keylen); + assert(sizeof(iv) == ivlen); if (crypto_derive_token_key(key, keylen, iv, ivlen, &md, secret, secretlen, rand_data, sizeof(rand_data), @@ -999,21 +995,21 @@ ngtcp2_ssize ngtcp2_crypto_generate_retry_token( } int ngtcp2_crypto_verify_retry_token( - ngtcp2_cid *odcid, const uint8_t *token, size_t tokenlen, - const uint8_t *secret, size_t secretlen, uint32_t version, - const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, - const ngtcp2_cid *dcid, ngtcp2_duration timeout, ngtcp2_tstamp ts) { + ngtcp2_cid *odcid, const uint8_t *token, size_t tokenlen, + const uint8_t *secret, size_t secretlen, uint32_t version, + const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, + const ngtcp2_cid *dcid, ngtcp2_duration timeout, ngtcp2_tstamp ts) { uint8_t - plaintext[/* cid len = */ 1 + NGTCP2_MAX_CIDLEN + sizeof(ngtcp2_tstamp)]; - uint8_t key[32]; - uint8_t iv[32]; + plaintext[/* cid len = */ 1 + NGTCP2_MAX_CIDLEN + sizeof(ngtcp2_tstamp)]; + uint8_t key[16]; + uint8_t iv[12]; size_t keylen; size_t ivlen; ngtcp2_crypto_aead_ctx aead_ctx; ngtcp2_crypto_aead aead; ngtcp2_crypto_md md; uint8_t - aad[sizeof(version) + sizeof(ngtcp2_sockaddr_union) + NGTCP2_MAX_CIDLEN]; + aad[sizeof(version) + sizeof(ngtcp2_sockaddr_union) + NGTCP2_MAX_CIDLEN]; size_t aadlen; const uint8_t *rand_data; const uint8_t *ciphertext; @@ -1039,6 +1035,9 @@ int ngtcp2_crypto_verify_retry_token( keylen = ngtcp2_crypto_aead_keylen(&aead); ivlen = ngtcp2_crypto_aead_noncelen(&aead); + assert(sizeof(key) == keylen); + assert(sizeof(iv) == ivlen); + if (crypto_derive_token_key(key, keylen, iv, ivlen, &md, secret, secretlen, rand_data, NGTCP2_CRYPTO_TOKEN_RAND_DATALEN, retry_token_info_prefix, @@ -1081,6 +1080,206 @@ int ngtcp2_crypto_verify_retry_token( return 0; } +static size_t crypto_generate_retry_token_aad2(uint8_t *dest, uint32_t version, + const ngtcp2_cid *retry_scid) { + uint8_t *p = dest; + + version = ngtcp2_htonl(version); + memcpy(p, &version, sizeof(version)); + p += sizeof(version); + memcpy(p, retry_scid->data, retry_scid->datalen); + p += retry_scid->datalen; + + return (size_t)(p - dest); +} + +static const uint8_t retry_token_info_prefix2[] = "retry_token2"; + +ngtcp2_ssize ngtcp2_crypto_generate_retry_token2( + uint8_t *token, const uint8_t *secret, size_t secretlen, uint32_t version, + const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, + const ngtcp2_cid *retry_scid, const ngtcp2_cid *odcid, ngtcp2_tstamp ts) { + uint8_t plaintext[sizeof(ngtcp2_sockaddr_union) + /* cid len = */ 1 + + NGTCP2_MAX_CIDLEN + sizeof(ngtcp2_tstamp)]; + uint8_t rand_data[NGTCP2_CRYPTO_TOKEN_RAND_DATALEN]; + uint8_t key[16]; + uint8_t iv[12]; + size_t keylen; + size_t ivlen; + ngtcp2_crypto_aead aead; + ngtcp2_crypto_md md; + ngtcp2_crypto_aead_ctx aead_ctx; + uint8_t aad[sizeof(version) + NGTCP2_MAX_CIDLEN]; + size_t aadlen; + uint8_t *p = plaintext; + ngtcp2_tstamp ts_be = ngtcp2_htonl64(ts); + int rv; + + assert((size_t)remote_addrlen <= sizeof(ngtcp2_sockaddr_union)); + + memset(plaintext, 0, sizeof(plaintext)); + + memcpy(p, remote_addr, (size_t)remote_addrlen); + p += sizeof(ngtcp2_sockaddr_union); + *p++ = (uint8_t)odcid->datalen; + memcpy(p, odcid->data, odcid->datalen); + p += NGTCP2_MAX_CIDLEN; + memcpy(p, &ts_be, sizeof(ts_be)); + + assert((size_t)(p + sizeof(ts_be) - plaintext) == sizeof(plaintext)); + + if (ngtcp2_crypto_random(rand_data, sizeof(rand_data)) != 0) { + return -1; + } + + ngtcp2_crypto_aead_aes_128_gcm(&aead); + ngtcp2_crypto_md_sha256(&md); + + keylen = ngtcp2_crypto_aead_keylen(&aead); + ivlen = ngtcp2_crypto_aead_noncelen(&aead); + + assert(sizeof(key) == keylen); + assert(sizeof(iv) == ivlen); + + if (crypto_derive_token_key(key, keylen, iv, ivlen, &md, secret, secretlen, + rand_data, sizeof(rand_data), + retry_token_info_prefix2, + sizeof(retry_token_info_prefix2) - 1) != 0) { + return -1; + } + + aadlen = crypto_generate_retry_token_aad2(aad, version, retry_scid); + + p = token; + *p++ = NGTCP2_CRYPTO_TOKEN_MAGIC_RETRY2; + + if (ngtcp2_crypto_aead_ctx_encrypt_init(&aead_ctx, &aead, key, ivlen) != 0) { + return -1; + } + + rv = ngtcp2_crypto_encrypt(p, &aead, &aead_ctx, plaintext, sizeof(plaintext), + iv, ivlen, aad, aadlen); + + ngtcp2_crypto_aead_ctx_free(&aead_ctx); + + if (rv != 0) { + return -1; + } + + p += sizeof(plaintext) + aead.max_overhead; + memcpy(p, rand_data, sizeof(rand_data)); + p += sizeof(rand_data); + + return p - token; +} + +int ngtcp2_crypto_verify_retry_token2( + ngtcp2_cid *odcid, const uint8_t *token, size_t tokenlen, + const uint8_t *secret, size_t secretlen, uint32_t version, + const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, + const ngtcp2_cid *dcid, ngtcp2_duration timeout, ngtcp2_tstamp ts) { + uint8_t plaintext[sizeof(ngtcp2_sockaddr_union) + /* cid len = */ 1 + + NGTCP2_MAX_CIDLEN + sizeof(ngtcp2_tstamp)]; + uint8_t key[16]; + uint8_t iv[12]; + size_t keylen; + size_t ivlen; + ngtcp2_crypto_aead_ctx aead_ctx; + ngtcp2_crypto_aead aead; + ngtcp2_crypto_md md; + uint8_t aad[sizeof(version) + NGTCP2_MAX_CIDLEN]; + size_t aadlen; + const uint8_t *rand_data; + const uint8_t *ciphertext; + size_t ciphertextlen; + size_t cil; + int rv; + ngtcp2_tstamp gen_ts; + ngtcp2_sockaddr_union addr; + size_t addrlen; + uint8_t *p; + + assert((size_t)remote_addrlen <= sizeof(ngtcp2_sockaddr_union)); + + if (tokenlen != NGTCP2_CRYPTO_MAX_RETRY_TOKENLEN2 || + token[0] != NGTCP2_CRYPTO_TOKEN_MAGIC_RETRY2) { + return NGTCP2_CRYPTO_ERR_UNREADABLE_TOKEN; + } + + rand_data = token + tokenlen - NGTCP2_CRYPTO_TOKEN_RAND_DATALEN; + ciphertext = token + 1; + ciphertextlen = (size_t)(rand_data - ciphertext); + + ngtcp2_crypto_aead_aes_128_gcm(&aead); + ngtcp2_crypto_md_sha256(&md); + + keylen = ngtcp2_crypto_aead_keylen(&aead); + ivlen = ngtcp2_crypto_aead_noncelen(&aead); + + assert(sizeof(key) == keylen); + assert(sizeof(iv) == ivlen); + + if (crypto_derive_token_key(key, keylen, iv, ivlen, &md, secret, secretlen, + rand_data, NGTCP2_CRYPTO_TOKEN_RAND_DATALEN, + retry_token_info_prefix2, + sizeof(retry_token_info_prefix2) - 1) != 0) { + return NGTCP2_CRYPTO_ERR_INTERNAL; + } + + aadlen = crypto_generate_retry_token_aad2(aad, version, dcid); + + if (ngtcp2_crypto_aead_ctx_decrypt_init(&aead_ctx, &aead, key, ivlen) != 0) { + return NGTCP2_CRYPTO_ERR_INTERNAL; + } + + rv = ngtcp2_crypto_decrypt(plaintext, &aead, &aead_ctx, ciphertext, + ciphertextlen, iv, ivlen, aad, aadlen); + + ngtcp2_crypto_aead_ctx_free(&aead_ctx); + + if (rv != 0) { + return NGTCP2_CRYPTO_ERR_UNREADABLE_TOKEN; + } + + p = plaintext; + + memcpy(&addr, p, sizeof(addr)); + + switch (addr.sa.sa_family) { + case NGTCP2_AF_INET: + addrlen = sizeof(ngtcp2_sockaddr_in); + break; + case NGTCP2_AF_INET6: + addrlen = sizeof(ngtcp2_sockaddr_in6); + break; + default: + return NGTCP2_CRYPTO_ERR_VERIFY_TOKEN; + } + + if (addrlen != (size_t)remote_addrlen || + memcmp(&addr, remote_addr, addrlen) != 0) { + return NGTCP2_CRYPTO_ERR_VERIFY_TOKEN; + } + + p += sizeof(addr); + cil = *p++; + + if (cil != 0 && (cil < NGTCP2_MIN_CIDLEN || cil > NGTCP2_MAX_CIDLEN)) { + return NGTCP2_CRYPTO_ERR_VERIFY_TOKEN; + } + + memcpy(&gen_ts, p + NGTCP2_MAX_CIDLEN, sizeof(gen_ts)); + + gen_ts = ngtcp2_ntohl64(gen_ts); + if (gen_ts + timeout <= ts) { + return NGTCP2_CRYPTO_ERR_VERIFY_TOKEN; + } + + ngtcp2_cid_init(odcid, p, cil); + + return 0; +} + static size_t crypto_generate_regular_token_aad(uint8_t *dest, const ngtcp2_sockaddr *sa) { const uint8_t *addr; @@ -1093,7 +1292,7 @@ static size_t crypto_generate_regular_token_aad(uint8_t *dest, break; case NGTCP2_AF_INET6: addr = - (const uint8_t *)&((const ngtcp2_sockaddr_in6 *)(void *)sa)->sin6_addr; + (const uint8_t *)&((const ngtcp2_sockaddr_in6 *)(void *)sa)->sin6_addr; addrlen = sizeof(((const ngtcp2_sockaddr_in6 *)(void *)sa)->sin6_addr); break; default: @@ -1109,13 +1308,13 @@ static size_t crypto_generate_regular_token_aad(uint8_t *dest, static const uint8_t regular_token_info_prefix[] = "regular_token"; ngtcp2_ssize ngtcp2_crypto_generate_regular_token( - uint8_t *token, const uint8_t *secret, size_t secretlen, - const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, - ngtcp2_tstamp ts) { + uint8_t *token, const uint8_t *secret, size_t secretlen, + const ngtcp2_sockaddr *remote_addr, ngtcp2_socklen remote_addrlen, + ngtcp2_tstamp ts) { uint8_t plaintext[sizeof(ngtcp2_tstamp)]; uint8_t rand_data[NGTCP2_CRYPTO_TOKEN_RAND_DATALEN]; - uint8_t key[32]; - uint8_t iv[32]; + uint8_t key[16]; + uint8_t iv[12]; size_t keylen; size_t ivlen; ngtcp2_crypto_aead aead; @@ -1144,8 +1343,8 @@ ngtcp2_ssize ngtcp2_crypto_generate_regular_token( keylen = ngtcp2_crypto_aead_keylen(&aead); ivlen = ngtcp2_crypto_aead_noncelen(&aead); - assert(sizeof(key) >= keylen); - assert(sizeof(iv) >= ivlen); + assert(sizeof(key) == keylen); + assert(sizeof(iv) == ivlen); if (crypto_derive_token_key(key, keylen, iv, ivlen, &md, secret, secretlen, rand_data, sizeof(rand_data), @@ -1186,8 +1385,8 @@ int ngtcp2_crypto_verify_regular_token(const uint8_t *token, size_t tokenlen, ngtcp2_duration timeout, ngtcp2_tstamp ts) { uint8_t plaintext[sizeof(ngtcp2_tstamp)]; - uint8_t key[32]; - uint8_t iv[32]; + uint8_t key[16]; + uint8_t iv[12]; size_t keylen; size_t ivlen; ngtcp2_crypto_aead_ctx aead_ctx; @@ -1217,6 +1416,9 @@ int ngtcp2_crypto_verify_regular_token(const uint8_t *token, size_t tokenlen, keylen = ngtcp2_crypto_aead_keylen(&aead); ivlen = ngtcp2_crypto_aead_noncelen(&aead); + assert(sizeof(key) == keylen); + assert(sizeof(iv) == ivlen); + if (crypto_derive_token_key(key, keylen, iv, ivlen, &md, secret, secretlen, rand_data, NGTCP2_CRYPTO_TOKEN_RAND_DATALEN, regular_token_info_prefix, @@ -1250,9 +1452,9 @@ int ngtcp2_crypto_verify_regular_token(const uint8_t *token, size_t tokenlen, } ngtcp2_ssize ngtcp2_crypto_write_connection_close( - uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, - const ngtcp2_cid *scid, uint64_t error_code, const uint8_t *reason, - size_t reasonlen) { + uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, + const ngtcp2_cid *scid, uint64_t error_code, const uint8_t *reason, + size_t reasonlen) { uint8_t rx_secret[NGTCP2_CRYPTO_INITIAL_SECRETLEN]; uint8_t tx_secret[NGTCP2_CRYPTO_INITIAL_SECRETLEN]; uint8_t initial_secret[NGTCP2_CRYPTO_INITIAL_SECRETLEN]; @@ -1273,8 +1475,8 @@ ngtcp2_ssize ngtcp2_crypto_write_connection_close( } if (ngtcp2_crypto_derive_packet_protection_key( - tx_key, tx_iv, tx_hp_key, version, &ctx.aead, &ctx.md, tx_secret, - NGTCP2_CRYPTO_INITIAL_SECRETLEN) != 0) { + tx_key, tx_iv, tx_hp_key, version, &ctx.aead, &ctx.md, tx_secret, + NGTCP2_CRYPTO_INITIAL_SECRETLEN) != 0) { return -1; } @@ -1290,9 +1492,9 @@ ngtcp2_ssize ngtcp2_crypto_write_connection_close( } spktlen = ngtcp2_pkt_write_connection_close( - dest, destlen, version, dcid, scid, error_code, reason, reasonlen, - ngtcp2_crypto_encrypt_cb, &ctx.aead, &aead_ctx, tx_iv, - ngtcp2_crypto_hp_mask_cb, &ctx.hp, &hp_ctx); + dest, destlen, version, dcid, scid, error_code, reason, reasonlen, + ngtcp2_crypto_encrypt_cb, &ctx.aead, &aead_ctx, tx_iv, + ngtcp2_crypto_hp_mask_cb, &ctx.hp, &hp_ctx); if (spktlen < 0) { spktlen = -1; } @@ -1352,8 +1554,8 @@ int ngtcp2_crypto_client_initial_cb(ngtcp2_conn *conn, void *user_data) { (void)user_data; if (ngtcp2_crypto_derive_and_install_initial_key( - conn, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - ngtcp2_conn_get_client_chosen_version(conn), dcid) != 0) { + conn, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + ngtcp2_conn_get_client_chosen_version(conn), dcid) != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -1362,7 +1564,7 @@ int ngtcp2_crypto_client_initial_cb(ngtcp2_conn *conn, void *user_data) { } if (ngtcp2_crypto_read_write_crypto_data( - conn, NGTCP2_ENCRYPTION_LEVEL_INITIAL, NULL, 0) != 0) { + conn, NGTCP2_ENCRYPTION_LEVEL_INITIAL, NULL, 0) != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -1374,8 +1576,8 @@ int ngtcp2_crypto_recv_retry_cb(ngtcp2_conn *conn, const ngtcp2_pkt_hd *hd, (void)user_data; if (ngtcp2_crypto_derive_and_install_initial_key( - conn, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - ngtcp2_conn_get_client_chosen_version(conn), &hd->scid) != 0) { + conn, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + ngtcp2_conn_get_client_chosen_version(conn), &hd->scid) != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -1388,8 +1590,8 @@ int ngtcp2_crypto_recv_client_initial_cb(ngtcp2_conn *conn, (void)user_data; if (ngtcp2_crypto_derive_and_install_initial_key( - conn, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - ngtcp2_conn_get_client_chosen_version(conn), dcid) != 0) { + conn, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + ngtcp2_conn_get_client_chosen_version(conn), dcid) != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -1402,8 +1604,8 @@ int ngtcp2_crypto_version_negotiation_cb(ngtcp2_conn *conn, uint32_t version, (void)user_data; if (ngtcp2_crypto_derive_and_install_vneg_initial_key( - conn, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, version, - client_dcid) != 0) { + conn, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, version, + client_dcid) != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -1420,7 +1622,7 @@ void ngtcp2_crypto_delete_crypto_aead_ctx_cb(ngtcp2_conn *conn, } void ngtcp2_crypto_delete_crypto_cipher_ctx_cb( - ngtcp2_conn *conn, ngtcp2_crypto_cipher_ctx *cipher_ctx, void *user_data) { + ngtcp2_conn *conn, ngtcp2_crypto_cipher_ctx *cipher_ctx, void *user_data) { (void)conn; (void)user_data; diff --git a/deps/ngtcp2/ngtcp2/crypto/shared.h b/deps/ngtcp2/ngtcp2/crypto/shared.h index d69fd21212d7d2..34158d3d02dbc0 100644 --- a/deps/ngtcp2/ngtcp2/crypto/shared.h +++ b/deps/ngtcp2/ngtcp2/crypto/shared.h @@ -22,12 +22,12 @@ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#ifndef NGTCP2_SHARED_H -#define NGTCP2_SHARED_H +#ifndef SHARED_H +#define SHARED_H #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -286,10 +286,10 @@ int ngtcp2_crypto_set_remote_transport_params(ngtcp2_conn *conn, void *tls); * This function returns 0 if it succeeds, or -1. */ int ngtcp2_crypto_derive_and_install_initial_key( - ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, - uint8_t *initial_secret, uint8_t *rx_key, uint8_t *rx_iv, uint8_t *rx_hp, - uint8_t *tx_key, uint8_t *tx_iv, uint8_t *tx_hp, uint32_t version, - const ngtcp2_cid *client_dcid); + ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, + uint8_t *initial_secret, uint8_t *rx_key, uint8_t *rx_iv, uint8_t *rx_hp, + uint8_t *tx_key, uint8_t *tx_iv, uint8_t *tx_hp, uint32_t version, + const ngtcp2_cid *client_dcid); /** * @function @@ -337,10 +337,10 @@ int ngtcp2_crypto_derive_and_install_initial_key( * This function returns 0 if it succeeds, or -1. */ int ngtcp2_crypto_derive_and_install_vneg_initial_key( - ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, - uint8_t *initial_secret, uint8_t *rx_key, uint8_t *rx_iv, uint8_t *rx_hp, - uint8_t *tx_key, uint8_t *tx_iv, uint8_t *tx_hp, uint32_t version, - const ngtcp2_cid *client_dcid); + ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, + uint8_t *initial_secret, uint8_t *rx_key, uint8_t *rx_iv, uint8_t *rx_hp, + uint8_t *tx_key, uint8_t *tx_iv, uint8_t *tx_hp, uint32_t version, + const ngtcp2_cid *client_dcid); /** * @function @@ -394,4 +394,4 @@ int ngtcp2_crypto_hkdf_expand_label(uint8_t *dest, size_t destlen, const uint8_t *secret, size_t secretlen, const uint8_t *label, size_t labellen); -#endif /* NGTCP2_SHARED_H */ +#endif /* !defined(SHARED_H) */ diff --git a/deps/ngtcp2/ngtcp2/crypto/wolfssl/wolfssl.c b/deps/ngtcp2/ngtcp2/crypto/wolfssl/wolfssl.c index 2b7b5321863915..bc9d9d84a862c5 100644 --- a/deps/ngtcp2/ngtcp2/crypto/wolfssl/wolfssl.c +++ b/deps/ngtcp2/ngtcp2/crypto/wolfssl/wolfssl.c @@ -24,7 +24,7 @@ */ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -39,9 +39,9 @@ #define PRINTF_DEBUG 0 #if PRINTF_DEBUG # define DEBUG_MSG(...) fprintf(stderr, __VA_ARGS__) -#else +#else /* !PRINTF_DEBUG */ # define DEBUG_MSG(...) (void)0 -#endif +#endif /* !PRINTF_DEBUG */ ngtcp2_crypto_aead *ngtcp2_crypto_aead_aes_128_gcm(ngtcp2_crypto_aead *aead) { return ngtcp2_crypto_aead_init(aead, (void *)wolfSSL_EVP_aes_128_gcm()); @@ -65,7 +65,7 @@ ngtcp2_crypto_aead *ngtcp2_crypto_aead_init(ngtcp2_crypto_aead *aead, void *aead_native_handle) { aead->native_handle = aead_native_handle; aead->max_overhead = wolfSSL_quic_get_aead_tag_len( - (const WOLFSSL_EVP_CIPHER *)(aead_native_handle)); + (const WOLFSSL_EVP_CIPHER *)(aead_native_handle)); return aead; } @@ -124,7 +124,7 @@ ngtcp2_crypto_ctx *ngtcp2_crypto_ctx_tls(ngtcp2_crypto_ctx *ctx, ctx->hp.native_handle = (void *)wolfSSL_quic_get_hp(ssl); ctx->max_encryption = crypto_aead_get_aead_max_encryption(aead); ctx->max_decryption_failure = - crypto_aead_get_aead_max_decryption_failure(aead); + crypto_aead_get_aead_max_decryption_failure(aead); return ctx; } @@ -203,7 +203,7 @@ int ngtcp2_crypto_cipher_ctx_encrypt_init(ngtcp2_crypto_cipher_ctx *cipher_ctx, WOLFSSL_EVP_CIPHER_CTX *actx; actx = - wolfSSL_quic_crypt_new(cipher->native_handle, key, NULL, /* encrypt */ 1); + wolfSSL_quic_crypt_new(cipher->native_handle, key, NULL, /* encrypt */ 1); if (actx == NULL) { return -1; } @@ -279,7 +279,6 @@ int ngtcp2_crypto_decrypt(uint8_t *dest, const ngtcp2_crypto_aead *aead, if (wolfSSL_quic_aead_decrypt(dest, aead_ctx->native_handle, ciphertext, ciphertextlen, nonce, aad, aadlen) != WOLFSSL_SUCCESS) { - DEBUG_MSG("WOLFSSL: decrypt FAILED\n"); return -1; } @@ -296,11 +295,11 @@ int ngtcp2_crypto_hp_mask(uint8_t *dest, const ngtcp2_crypto_cipher *hp, (void)hp; if (wolfSSL_EVP_EncryptInit_ex(actx, NULL, NULL, NULL, sample) != - WOLFSSL_SUCCESS || + WOLFSSL_SUCCESS || wolfSSL_EVP_CipherUpdate(actx, dest, &len, PLAINTEXT, sizeof(PLAINTEXT) - 1) != WOLFSSL_SUCCESS || wolfSSL_EVP_EncryptFinal_ex(actx, dest + sizeof(PLAINTEXT) - 1, &len) != - WOLFSSL_SUCCESS) { + WOLFSSL_SUCCESS) { DEBUG_MSG("WOLFSSL: hp_mask FAILED\n"); return -1; } @@ -309,11 +308,11 @@ int ngtcp2_crypto_hp_mask(uint8_t *dest, const ngtcp2_crypto_cipher *hp, } int ngtcp2_crypto_read_write_crypto_data( - ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, - const uint8_t *data, size_t datalen) { + ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, + const uint8_t *data, size_t datalen) { WOLFSSL *ssl = ngtcp2_conn_get_tls_native_handle(conn); WOLFSSL_ENCRYPTION_LEVEL level = - ngtcp2_crypto_wolfssl_from_ngtcp2_encryption_level(encryption_level); + ngtcp2_crypto_wolfssl_from_ngtcp2_encryption_level(encryption_level); int rv; int err; @@ -397,7 +396,7 @@ int ngtcp2_crypto_set_local_transport_params(void *tls, const uint8_t *buf, } ngtcp2_encryption_level ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level( - WOLFSSL_ENCRYPTION_LEVEL wolfssl_level) { + WOLFSSL_ENCRYPTION_LEVEL wolfssl_level) { switch (wolfssl_level) { case wolfssl_encryption_initial: return NGTCP2_ENCRYPTION_LEVEL_INITIAL; @@ -415,7 +414,7 @@ ngtcp2_encryption_level ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level( WOLFSSL_ENCRYPTION_LEVEL ngtcp2_crypto_wolfssl_from_ngtcp2_encryption_level( - ngtcp2_encryption_level encryption_level) { + ngtcp2_encryption_level encryption_level) { switch (encryption_level) { case NGTCP2_ENCRYPTION_LEVEL_INITIAL: return wolfssl_encryption_initial; @@ -458,7 +457,7 @@ static int set_encryption_secrets(WOLFSSL *ssl, ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); ngtcp2_encryption_level level = - ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level(wolfssl_level); + ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level(wolfssl_level); DEBUG_MSG("WOLFSSL: set encryption secrets, level=%d, rxlen=%lu, txlen=%lu\n", wolfssl_level, rx_secret ? secretlen : 0, @@ -484,7 +483,7 @@ static int add_handshake_data(WOLFSSL *ssl, ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); ngtcp2_encryption_level level = - ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level(wolfssl_level); + ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level(wolfssl_level); int rv; DEBUG_MSG("WOLFSSL: add handshake data, level=%d len=%lu\n", wolfssl_level, @@ -516,10 +515,10 @@ static int send_alert(WOLFSSL *ssl, enum wolfssl_encryption_level_t level, } static WOLFSSL_QUIC_METHOD quic_method = { - set_encryption_secrets, - add_handshake_data, - flush_flight, - send_alert, + set_encryption_secrets, + add_handshake_data, + flush_flight, + send_alert, }; static void crypto_wolfssl_configure_context(WOLFSSL_CTX *ssl_ctx) { @@ -532,7 +531,7 @@ int ngtcp2_crypto_wolfssl_configure_server_context(WOLFSSL_CTX *ssl_ctx) { crypto_wolfssl_configure_context(ssl_ctx); #if PRINTF_DEBUG wolfSSL_Debugging_ON(); -#endif +#endif /* PRINTF_DEBUG */ return 0; } @@ -541,6 +540,6 @@ int ngtcp2_crypto_wolfssl_configure_client_context(WOLFSSL_CTX *ssl_ctx) { wolfSSL_CTX_UseSessionTicket(ssl_ctx); #if PRINTF_DEBUG wolfSSL_Debugging_ON(); -#endif +#endif /* PRINTF_DEBUG */ return 0; } diff --git a/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h b/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h index 72c8142a5a5aa7..acc79be6e0b375 100644 --- a/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h +++ b/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h @@ -30,12 +30,12 @@ libcurl) */ #if (defined(_WIN32) || defined(__WIN32__)) && !defined(WIN32) # define WIN32 -#endif +#endif /* (defined(_WIN32) || defined(__WIN32__)) && !defined(WIN32) */ #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable : 4324) -#endif +#endif /* defined(_MSC_VER) */ #include #if defined(_MSC_VER) && (_MSC_VER < 1800) @@ -43,9 +43,9 @@ compliant. See compiler macros and version number in https://sourceforge.net/p/predef/wiki/Compilers/ */ # include -#else /* !defined(_MSC_VER) || (_MSC_VER >= 1800) */ +#else /* !(defined(_MSC_VER) && (_MSC_VER < 1800)) */ # include -#endif /* !defined(_MSC_VER) || (_MSC_VER >= 1800) */ +#endif /* !(defined(_MSC_VER) && (_MSC_VER < 1800)) */ #include #include #include @@ -54,13 +54,13 @@ # ifdef WIN32 # ifndef WIN32_LEAN_AND_MEAN # define WIN32_LEAN_AND_MEAN -# endif /* WIN32_LEAN_AND_MEAN */ +# endif /* !defined(WIN32_LEAN_AND_MEAN) */ # include -# else /* !WIN32 */ +# else /* !defined(WIN32) */ # include # include -# endif /* !WIN32 */ -#endif /* NGTCP2_USE_GENERIC_SOCKADDR */ +# endif /* !defined(WIN32) */ +#endif /* !defined(NGTCP2_USE_GENERIC_SOCKADDR) */ #include @@ -69,26 +69,26 @@ #elif defined(WIN32) # ifdef BUILDING_NGTCP2 # define NGTCP2_EXTERN __declspec(dllexport) -# else /* !BUILDING_NGTCP2 */ +# else /* !defined(BUILDING_NGTCP2) */ # define NGTCP2_EXTERN __declspec(dllimport) -# endif /* !BUILDING_NGTCP2 */ -#else /* !defined(WIN32) */ +# endif /* !defined(BUILDING_NGTCP2) */ +#else /* !(defined(NGTCP2_STATICLIB) || defined(WIN32)) */ # ifdef BUILDING_NGTCP2 # define NGTCP2_EXTERN __attribute__((visibility("default"))) -# else /* !BUILDING_NGTCP2 */ +# else /* !defined(BUILDING_NGTCP2) */ # define NGTCP2_EXTERN -# endif /* !BUILDING_NGTCP2 */ -#endif /* !defined(WIN32) */ +# endif /* !defined(BUILDING_NGTCP2) */ +#endif /* !(defined(NGTCP2_STATICLIB) || defined(WIN32)) */ #ifdef _MSC_VER # define NGTCP2_ALIGN(N) __declspec(align(N)) -#else /* !_MSC_VER */ +#else /* !defined(_MSC_VER) */ # define NGTCP2_ALIGN(N) __attribute__((aligned(N))) -#endif /* !_MSC_VER */ +#endif /* !defined(_MSC_VER) */ #ifdef __cplusplus extern "C" { -#endif +#endif /* defined(__cplusplus) */ /** * @typedef @@ -1229,11 +1229,11 @@ typedef struct ngtcp2_pkt_stateless_reset { #ifdef NGTCP2_USE_GENERIC_SOCKADDR # ifndef NGTCP2_AF_INET # error NGTCP2_AF_INET must be defined -# endif /* !NGTCP2_AF_INET */ +# endif /* !defined(NGTCP2_AF_INET) */ # ifndef NGTCP2_AF_INET6 # error NGTCP2_AF_INET6 must be defined -# endif /* !NGTCP2_AF_INET6 */ +# endif /* !defined(NGTCP2_AF_INET6) */ typedef unsigned short int ngtcp2_sa_family; typedef uint16_t ngtcp2_in_port; @@ -1267,7 +1267,7 @@ typedef struct ngtcp2_sockaddr_in6 { } ngtcp2_sockaddr_in6; typedef uint32_t ngtcp2_socklen; -#else /* !NGTCP2_USE_GENERIC_SOCKADDR */ +#else /* !defined(NGTCP2_USE_GENERIC_SOCKADDR) */ # define NGTCP2_AF_INET AF_INET # define NGTCP2_AF_INET6 AF_INET6 @@ -1303,7 +1303,7 @@ typedef struct sockaddr_in6 ngtcp2_sockaddr_in6; * uint32_t. */ typedef socklen_t ngtcp2_socklen; -#endif /* !NGTCP2_USE_GENERIC_SOCKADDR */ +#endif /* !defined(NGTCP2_USE_GENERIC_SOCKADDR) */ /** * @struct @@ -1689,7 +1689,8 @@ typedef enum ngtcp2_token_type { } ngtcp2_token_type; #define NGTCP2_SETTINGS_V1 1 -#define NGTCP2_SETTINGS_VERSION NGTCP2_SETTINGS_V1 +#define NGTCP2_SETTINGS_V2 2 +#define NGTCP2_SETTINGS_VERSION NGTCP2_SETTINGS_V2 /** * @struct @@ -1723,8 +1724,7 @@ typedef struct ngtcp2_settings { ngtcp2_printf log_printf; /** * :member:`max_tx_udp_payload_size` is the maximum size of UDP - * datagram payload that the local endpoint transmits. It is used - * by congestion controller to compute congestion window. + * datagram payload that the local endpoint transmits. */ size_t max_tx_udp_payload_size; /** @@ -1777,6 +1777,13 @@ typedef struct ngtcp2_settings { * or :member:`ngtcp2_transport_params.initial_max_stream_data_uni`, * depending on the type of stream. The window size is scaled up to * the value specified in this field. + * + * Please note that the auto-tuning is done per stream. Even if the + * previous stream gets larger window as a result of auto-tuning, + * the new stream still starts with the initial value set in + * transport parameters. This might become a bottleneck if + * congestion window of a remote server is wide open. If this + * causes an issue, do not enable auto-tuning. */ uint64_t max_stream_window; /** @@ -1873,10 +1880,29 @@ typedef struct ngtcp2_settings { */ uint8_t no_pmtud; /** - * :member:`pkt_num` is the initial packet number for each packet - * number space. It must be in range [0, INT32_MAX], inclusive. + * :member:`initial_pkt_num` is the initial packet number for each + * packet number space. It must be in range [0, INT32_MAX], + * inclusive. */ uint32_t initial_pkt_num; + /* The following fields have been added since NGTCP2_SETTINGS_V2. */ + /** + * :member:`pmtud_probes` is the array of UDP datagram payload size + * to probe during Path MTU Discovery. The discovery is done in the + * order appeared in this array. The size must be strictly larger + * than 1200, otherwise the behavior is undefined. The maximum + * value in this array should be set to + * :member:`max_tx_udp_payload_size`. If this field is not set, the + * predefined PMTUD probes are made. This field has been available + * since v1.4.0. + */ + const uint16_t *pmtud_probes; + /** + * :member:`pmtud_probeslen` is the number of elements that are + * contained in the array pointed by :member:`pmtud_probes`. This + * field has been available since v1.4.0. + */ + size_t pmtud_probeslen; } ngtcp2_settings; /** @@ -2080,8 +2106,8 @@ typedef struct ngtcp2_crypto_ctx { * Buffer is too small. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_transport_params_encode_versioned( - uint8_t *dest, size_t destlen, int transport_params_version, - const ngtcp2_transport_params *params); + uint8_t *dest, size_t destlen, int transport_params_version, + const ngtcp2_transport_params *params); /** * @function @@ -2324,8 +2350,8 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_pkt_decode_hd_short(ngtcp2_pkt_hd *dest, * :macro:`NGTCP2_MIN_STATELESS_RESET_RANDLEN`. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_pkt_write_stateless_reset( - uint8_t *dest, size_t destlen, const uint8_t *stateless_reset_token, - const uint8_t *rand, size_t randlen); + uint8_t *dest, size_t destlen, const uint8_t *stateless_reset_token, + const uint8_t *rand, size_t randlen); /** * @function @@ -2347,9 +2373,9 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_pkt_write_stateless_reset( * Buffer is too small. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_pkt_write_version_negotiation( - uint8_t *dest, size_t destlen, uint8_t unused_random, const uint8_t *dcid, - size_t dcidlen, const uint8_t *scid, size_t scidlen, const uint32_t *sv, - size_t nsv); + uint8_t *dest, size_t destlen, uint8_t unused_random, const uint8_t *dcid, + size_t dcidlen, const uint8_t *scid, size_t scidlen, const uint32_t *sv, + size_t nsv); /** * @struct @@ -2770,8 +2796,8 @@ typedef int (*ngtcp2_stream_reset)(ngtcp2_conn *conn, int64_t stream_id, * call return immediately. */ typedef int (*ngtcp2_acked_stream_data_offset)( - ngtcp2_conn *conn, int64_t stream_id, uint64_t offset, uint64_t datalen, - void *user_data, void *stream_user_data); + ngtcp2_conn *conn, int64_t stream_id, uint64_t offset, uint64_t datalen, + void *user_data, void *stream_user_data); /** * @functypedef @@ -2893,11 +2919,11 @@ typedef int (*ngtcp2_remove_connection_id)(ngtcp2_conn *conn, * immediately. */ typedef int (*ngtcp2_update_key)( - ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, - ngtcp2_crypto_aead_ctx *rx_aead_ctx, uint8_t *rx_iv, - ngtcp2_crypto_aead_ctx *tx_aead_ctx, uint8_t *tx_iv, - const uint8_t *current_rx_secret, const uint8_t *current_tx_secret, - size_t secretlen, void *user_data); + ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, + ngtcp2_crypto_aead_ctx *rx_aead_ctx, uint8_t *rx_iv, + ngtcp2_crypto_aead_ctx *tx_aead_ctx, uint8_t *tx_iv, + const uint8_t *current_rx_secret, const uint8_t *current_tx_secret, + size_t secretlen, void *user_data); /** * @macrosection @@ -3024,8 +3050,8 @@ typedef enum ngtcp2_connection_id_status_type { * immediately. */ typedef int (*ngtcp2_connection_id_status)( - ngtcp2_conn *conn, ngtcp2_connection_id_status_type type, uint64_t seq, - const ngtcp2_cid *cid, const uint8_t *token, void *user_data); + ngtcp2_conn *conn, ngtcp2_connection_id_status_type type, uint64_t seq, + const ngtcp2_cid *cid, const uint8_t *token, void *user_data); /** * @functypedef @@ -3064,7 +3090,7 @@ typedef void (*ngtcp2_delete_crypto_aead_ctx)(ngtcp2_conn *conn, * `. */ typedef void (*ngtcp2_delete_crypto_cipher_ctx)( - ngtcp2_conn *conn, ngtcp2_crypto_cipher_ctx *cipher_ctx, void *user_data); + ngtcp2_conn *conn, ngtcp2_crypto_cipher_ctx *cipher_ctx, void *user_data); /** * @macrosection @@ -3510,12 +3536,12 @@ typedef struct ngtcp2_callbacks { * Callback function failed. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_pkt_write_connection_close( - uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, - const ngtcp2_cid *scid, uint64_t error_code, const uint8_t *reason, - size_t reasonlen, ngtcp2_encrypt encrypt, const ngtcp2_crypto_aead *aead, - const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, - ngtcp2_hp_mask hp_mask, const ngtcp2_crypto_cipher *hp, - const ngtcp2_crypto_cipher_ctx *hp_ctx); + uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, + const ngtcp2_cid *scid, uint64_t error_code, const uint8_t *reason, + size_t reasonlen, ngtcp2_encrypt encrypt, const ngtcp2_crypto_aead *aead, + const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, + ngtcp2_hp_mask hp_mask, const ngtcp2_crypto_cipher *hp, + const ngtcp2_crypto_cipher_ctx *hp_ctx); /** * @function @@ -3542,10 +3568,10 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_pkt_write_connection_close( * :macro:`NGTCP2_MIN_INITIAL_DCIDLEN`. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_pkt_write_retry( - uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, - const ngtcp2_cid *scid, const ngtcp2_cid *odcid, const uint8_t *token, - size_t tokenlen, ngtcp2_encrypt encrypt, const ngtcp2_crypto_aead *aead, - const ngtcp2_crypto_aead_ctx *aead_ctx); + uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, + const ngtcp2_cid *scid, const ngtcp2_cid *odcid, const uint8_t *token, + size_t tokenlen, ngtcp2_encrypt encrypt, const ngtcp2_crypto_aead *aead, + const ngtcp2_crypto_aead_ctx *aead_ctx); /** * @function @@ -3594,12 +3620,12 @@ NGTCP2_EXTERN int ngtcp2_accept(ngtcp2_pkt_hd *dest, const uint8_t *pkt, * Out of memory. */ NGTCP2_EXTERN int ngtcp2_conn_client_new_versioned( - ngtcp2_conn **pconn, const ngtcp2_cid *dcid, const ngtcp2_cid *scid, - const ngtcp2_path *path, uint32_t client_chosen_version, - int callbacks_version, const ngtcp2_callbacks *callbacks, - int settings_version, const ngtcp2_settings *settings, - int transport_params_version, const ngtcp2_transport_params *params, - const ngtcp2_mem *mem, void *user_data); + ngtcp2_conn **pconn, const ngtcp2_cid *dcid, const ngtcp2_cid *scid, + const ngtcp2_path *path, uint32_t client_chosen_version, + int callbacks_version, const ngtcp2_callbacks *callbacks, + int settings_version, const ngtcp2_settings *settings, + int transport_params_version, const ngtcp2_transport_params *params, + const ngtcp2_mem *mem, void *user_data); /** * @function @@ -3629,12 +3655,12 @@ NGTCP2_EXTERN int ngtcp2_conn_client_new_versioned( * Out of memory. */ NGTCP2_EXTERN int ngtcp2_conn_server_new_versioned( - ngtcp2_conn **pconn, const ngtcp2_cid *dcid, const ngtcp2_cid *scid, - const ngtcp2_path *path, uint32_t client_chosen_version, - int callbacks_version, const ngtcp2_callbacks *callbacks, - int settings_version, const ngtcp2_settings *settings, - int transport_params_version, const ngtcp2_transport_params *params, - const ngtcp2_mem *mem, void *user_data); + ngtcp2_conn **pconn, const ngtcp2_cid *dcid, const ngtcp2_cid *scid, + const ngtcp2_path *path, uint32_t client_chosen_version, + int callbacks_version, const ngtcp2_callbacks *callbacks, + int settings_version, const ngtcp2_settings *settings, + int transport_params_version, const ngtcp2_transport_params *params, + const ngtcp2_mem *mem, void *user_data); /** * @function @@ -3698,8 +3724,8 @@ ngtcp2_conn_read_pkt_versioned(ngtcp2_conn *conn, const ngtcp2_path *path, * and :macro:`NGTCP2_WRITE_STREAM_FLAG_NONE` as flags. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_pkt_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_tstamp ts); + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_tstamp ts); /** * @function @@ -3755,10 +3781,10 @@ NGTCP2_EXTERN int ngtcp2_conn_get_handshake_completed(ngtcp2_conn *conn); * Out of memory. */ NGTCP2_EXTERN int ngtcp2_conn_install_initial_key( - ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *rx_aead_ctx, - const uint8_t *rx_iv, const ngtcp2_crypto_cipher_ctx *rx_hp_ctx, - const ngtcp2_crypto_aead_ctx *tx_aead_ctx, const uint8_t *tx_iv, - const ngtcp2_crypto_cipher_ctx *tx_hp_ctx, size_t ivlen); + ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *rx_aead_ctx, + const uint8_t *rx_iv, const ngtcp2_crypto_cipher_ctx *rx_hp_ctx, + const ngtcp2_crypto_aead_ctx *tx_aead_ctx, const uint8_t *tx_iv, + const ngtcp2_crypto_cipher_ctx *tx_hp_ctx, size_t ivlen); /** * @function @@ -3790,11 +3816,11 @@ NGTCP2_EXTERN int ngtcp2_conn_install_initial_key( * Out of memory. */ NGTCP2_EXTERN int ngtcp2_conn_install_vneg_initial_key( - ngtcp2_conn *conn, uint32_t version, - const ngtcp2_crypto_aead_ctx *rx_aead_ctx, const uint8_t *rx_iv, - const ngtcp2_crypto_cipher_ctx *rx_hp_ctx, - const ngtcp2_crypto_aead_ctx *tx_aead_ctx, const uint8_t *tx_iv, - const ngtcp2_crypto_cipher_ctx *tx_hp_ctx, size_t ivlen); + ngtcp2_conn *conn, uint32_t version, + const ngtcp2_crypto_aead_ctx *rx_aead_ctx, const uint8_t *rx_iv, + const ngtcp2_crypto_cipher_ctx *rx_hp_ctx, + const ngtcp2_crypto_aead_ctx *tx_aead_ctx, const uint8_t *tx_iv, + const ngtcp2_crypto_cipher_ctx *tx_hp_ctx, size_t ivlen); /** * @function @@ -3821,8 +3847,8 @@ NGTCP2_EXTERN int ngtcp2_conn_install_vneg_initial_key( * Out of memory. */ NGTCP2_EXTERN int ngtcp2_conn_install_rx_handshake_key( - ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, - const uint8_t *iv, size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx); + ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, + size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx); /** * @function @@ -3849,8 +3875,8 @@ NGTCP2_EXTERN int ngtcp2_conn_install_rx_handshake_key( * Out of memory. */ NGTCP2_EXTERN int ngtcp2_conn_install_tx_handshake_key( - ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, - const uint8_t *iv, size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx); + ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, + size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx); /** * @function @@ -3876,8 +3902,8 @@ NGTCP2_EXTERN int ngtcp2_conn_install_tx_handshake_key( * Out of memory. */ NGTCP2_EXTERN int ngtcp2_conn_install_0rtt_key( - ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, - const uint8_t *iv, size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx); + ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, + size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx); /** * @function @@ -3906,9 +3932,9 @@ NGTCP2_EXTERN int ngtcp2_conn_install_0rtt_key( * Out of memory. */ NGTCP2_EXTERN int ngtcp2_conn_install_rx_key( - ngtcp2_conn *conn, const uint8_t *secret, size_t secretlen, - const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, size_t ivlen, - const ngtcp2_crypto_cipher_ctx *hp_ctx); + ngtcp2_conn *conn, const uint8_t *secret, size_t secretlen, + const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, size_t ivlen, + const ngtcp2_crypto_cipher_ctx *hp_ctx); /** * @function @@ -3937,9 +3963,9 @@ NGTCP2_EXTERN int ngtcp2_conn_install_rx_key( * Out of memory. */ NGTCP2_EXTERN int ngtcp2_conn_install_tx_key( - ngtcp2_conn *conn, const uint8_t *secret, size_t secretlen, - const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, size_t ivlen, - const ngtcp2_crypto_cipher_ctx *hp_ctx); + ngtcp2_conn *conn, const uint8_t *secret, size_t secretlen, + const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, size_t ivlen, + const ngtcp2_crypto_cipher_ctx *hp_ctx); /** * @function @@ -4063,7 +4089,7 @@ NGTCP2_EXTERN ngtcp2_duration ngtcp2_conn_get_pto(ngtcp2_conn *conn); * User callback failed */ NGTCP2_EXTERN int ngtcp2_conn_decode_and_set_remote_transport_params( - ngtcp2_conn *conn, const uint8_t *data, size_t datalen); + ngtcp2_conn *conn, const uint8_t *data, size_t datalen); /** * @function @@ -4148,7 +4174,7 @@ ngtcp2_ssize ngtcp2_conn_encode_0rtt_transport_params(ngtcp2_conn *conn, * The input is malformed. */ NGTCP2_EXTERN int ngtcp2_conn_decode_and_set_0rtt_transport_params( - ngtcp2_conn *conn, const uint8_t *data, size_t datalen); + ngtcp2_conn *conn, const uint8_t *data, size_t datalen); /** * @function @@ -4168,8 +4194,8 @@ NGTCP2_EXTERN int ngtcp2_conn_decode_and_set_0rtt_transport_params( * `ngtcp2_conn_install_tx_handshake_key` has been called. */ NGTCP2_EXTERN int ngtcp2_conn_set_local_transport_params_versioned( - ngtcp2_conn *conn, int transport_params_version, - const ngtcp2_transport_params *params); + ngtcp2_conn *conn, int transport_params_version, + const ngtcp2_transport_params *params); /** * @function @@ -4193,7 +4219,7 @@ ngtcp2_conn_get_local_transport_params(ngtcp2_conn *conn); * Buffer is too small. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_encode_local_transport_params( - ngtcp2_conn *conn, uint8_t *dest, size_t destlen); + ngtcp2_conn *conn, uint8_t *dest, size_t destlen); /** * @function @@ -4374,10 +4400,10 @@ NGTCP2_EXTERN int ngtcp2_conn_shutdown_stream_read(ngtcp2_conn *conn, * conveniently accepts a single buffer. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_stream_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_ssize *pdatalen, - uint32_t flags, int64_t stream_id, const uint8_t *data, size_t datalen, - ngtcp2_tstamp ts); + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_ssize *pdatalen, + uint32_t flags, int64_t stream_id, const uint8_t *data, size_t datalen, + ngtcp2_tstamp ts); /** * @function @@ -4388,7 +4414,8 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_stream_versioned( * handshake as well. * * |destlen| should be at least - * :member:`ngtcp2_settings.max_tx_udp_payload_size`. + * :member:`ngtcp2_settings.max_tx_udp_payload_size`. It must be at + * least :macro:`NGTCP2_MAX_UDP_PAYLOAD_SIZE`. * * Specifying -1 to |stream_id| means no new stream data to send. * @@ -4418,8 +4445,10 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_stream_versioned( * In that case, |*pdatalen| would be -1 if |pdatalen| is not * ``NULL``. * - * If |flags| & :macro:`NGTCP2_WRITE_STREAM_FLAG_FIN` is nonzero, and - * 0 length STREAM frame is successfully serialized, |*pdatalen| would + * Empty data is treated specially, and it is only accepted if no + * data, including the empty data, is submitted to a stream or + * :macro:`NGTCP2_WRITE_STREAM_FLAG_FIN` is set in |flags|. If 0 + * length STREAM frame is successfully serialized, |*pdatalen| would * be 0. * * The number of data encoded in STREAM frame is stored in |*pdatalen| @@ -4525,10 +4554,10 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_stream_versioned( * sending it makes QUIC connection enter the closing state. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_writev_stream_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_ssize *pdatalen, - uint32_t flags, int64_t stream_id, const ngtcp2_vec *datav, size_t datavcnt, - ngtcp2_tstamp ts); + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_ssize *pdatalen, + uint32_t flags, int64_t stream_id, const ngtcp2_vec *datav, size_t datavcnt, + ngtcp2_tstamp ts); /** * @macrosection @@ -4559,10 +4588,10 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_writev_stream_versioned( * conveniently accepts a single buffer. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_datagram_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, int *paccepted, - uint32_t flags, uint64_t dgram_id, const uint8_t *data, size_t datalen, - ngtcp2_tstamp ts); + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, int *paccepted, + uint32_t flags, uint64_t dgram_id, const uint8_t *data, size_t datalen, + ngtcp2_tstamp ts); /** * @function @@ -4573,7 +4602,8 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_datagram_versioned( * as well. * * |destlen| should be at least - * :member:`ngtcp2_settings.max_tx_udp_payload_size`. + * :member:`ngtcp2_settings.max_tx_udp_payload_size`. It must be at + * least :macro:`NGTCP2_MAX_UDP_PAYLOAD_SIZE`. * * For |path| and |pi| parameters, refer to * `ngtcp2_conn_writev_stream`. @@ -4655,10 +4685,10 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_datagram_versioned( * sending it makes QUIC connection enter the closing state. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_writev_datagram_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, int *paccepted, - uint32_t flags, uint64_t dgram_id, const ngtcp2_vec *datav, size_t datavcnt, - ngtcp2_tstamp ts); + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, int *paccepted, + uint32_t flags, uint64_t dgram_id, const ngtcp2_vec *datav, size_t datavcnt, + ngtcp2_tstamp ts); /** * @function @@ -4992,7 +5022,7 @@ ngtcp2_conn_get_path_max_tx_udp_payload_size(ngtcp2_conn *conn); * Out of memory */ NGTCP2_EXTERN int ngtcp2_conn_initiate_immediate_migration( - ngtcp2_conn *conn, const ngtcp2_path *path, ngtcp2_tstamp ts); + ngtcp2_conn *conn, const ngtcp2_path *path, ngtcp2_tstamp ts); /** * @function @@ -5193,7 +5223,19 @@ typedef enum ngtcp2_ccerr_type { * transport error, and it indicates that connection is closed * because of idle timeout. */ - NGTCP2_CCERR_TYPE_IDLE_CLOSE + NGTCP2_CCERR_TYPE_IDLE_CLOSE, + /** + * :enum:`NGTCP2_CCERR_TYPE_DROP_CONN` is a special case of QUIC + * transport error, and it indicates that connection should be + * dropped without sending a CONNECTION_CLOSE frame. + */ + NGTCP2_CCERR_TYPE_DROP_CONN, + /** + * :enum:`NGTCP2_CCERR_TYPE_RETRY` is a special case of QUIC + * transport error, and it indicates that RETRY packet should be + * sent to a client. + */ + NGTCP2_CCERR_TYPE_RETRY } ngtcp2_ccerr_type; /** @@ -5284,6 +5326,18 @@ NGTCP2_EXTERN void ngtcp2_ccerr_set_transport_error(ngtcp2_ccerr *ccerr, * :member:`ccerr->error_code ` to * :macro:`NGTCP2_NO_ERROR`. * + * If |liberr| is :macro:`NGTCP2_ERR_DROP_CONN`, :member:`ccerr->type + * ` is set to + * :enum:`ngtcp2_ccerr_type.NGTCP2_CCERR_TYPE_DROP_CONN`, and + * :member:`ccerr->error_code ` to + * :macro:`NGTCP2_NO_ERROR`. + * + * If |liberr| is :macro:`NGTCP2_ERR_RETRY`, :member:`ccerr->type + * ` is set to + * :enum:`ngtcp2_ccerr_type.NGTCP2_CCERR_TYPE_RETRY`, and + * :member:`ccerr->error_type ` to + * :macro:`NGTCP2_NO_ERROR`. + * * Otherwise, :member:`ccerr->type ` is set to * :enum:`ngtcp2_ccerr_type.NGTCP2_CCERR_TYPE_TRANSPORT`, and * :member:`ccerr->error_code ` is set to an @@ -5380,9 +5434,9 @@ NGTCP2_EXTERN void ngtcp2_ccerr_set_application_error(ngtcp2_ccerr *ccerr, * User callback failed */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_connection_close_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, - const ngtcp2_ccerr *ccerr, ngtcp2_tstamp ts); + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, const ngtcp2_ccerr *ccerr, + ngtcp2_tstamp ts); /** * @function @@ -5734,8 +5788,8 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, #define ngtcp2_conn_write_stream(CONN, PATH, PI, DEST, DESTLEN, PDATALEN, \ FLAGS, STREAM_ID, DATA, DATALEN, TS) \ ngtcp2_conn_write_stream_versioned( \ - (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), \ - (PDATALEN), (FLAGS), (STREAM_ID), (DATA), (DATALEN), (TS)) + (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), \ + (PDATALEN), (FLAGS), (STREAM_ID), (DATA), (DATALEN), (TS)) /* * `ngtcp2_conn_writev_stream` is a wrapper around @@ -5745,8 +5799,8 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, #define ngtcp2_conn_writev_stream(CONN, PATH, PI, DEST, DESTLEN, PDATALEN, \ FLAGS, STREAM_ID, DATAV, DATAVCNT, TS) \ ngtcp2_conn_writev_stream_versioned( \ - (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), \ - (PDATALEN), (FLAGS), (STREAM_ID), (DATAV), (DATAVCNT), (TS)) + (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), \ + (PDATALEN), (FLAGS), (STREAM_ID), (DATAV), (DATAVCNT), (TS)) /* * `ngtcp2_conn_write_datagram` is a wrapper around @@ -5756,8 +5810,8 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, #define ngtcp2_conn_write_datagram(CONN, PATH, PI, DEST, DESTLEN, PACCEPTED, \ FLAGS, DGRAM_ID, DATA, DATALEN, TS) \ ngtcp2_conn_write_datagram_versioned( \ - (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), \ - (PACCEPTED), (FLAGS), (DGRAM_ID), (DATA), (DATALEN), (TS)) + (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), \ + (PACCEPTED), (FLAGS), (DGRAM_ID), (DATA), (DATALEN), (TS)) /* * `ngtcp2_conn_writev_datagram` is a wrapper around @@ -5767,8 +5821,8 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, #define ngtcp2_conn_writev_datagram(CONN, PATH, PI, DEST, DESTLEN, PACCEPTED, \ FLAGS, DGRAM_ID, DATAV, DATAVCNT, TS) \ ngtcp2_conn_writev_datagram_versioned( \ - (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), \ - (PACCEPTED), (FLAGS), (DGRAM_ID), (DATAV), (DATAVCNT), (TS)) + (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), \ + (PACCEPTED), (FLAGS), (DGRAM_ID), (DATAV), (DATAVCNT), (TS)) /* * `ngtcp2_conn_write_connection_close` is a wrapper around @@ -5778,8 +5832,8 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, #define ngtcp2_conn_write_connection_close(CONN, PATH, PI, DEST, DESTLEN, \ CCERR, TS) \ ngtcp2_conn_write_connection_close_versioned( \ - (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), \ - (CCERR), (TS)) + (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), (CCERR), \ + (TS)) /* * `ngtcp2_transport_params_encode` is a wrapper around @@ -5788,7 +5842,7 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, */ #define ngtcp2_transport_params_encode(DEST, DESTLEN, PARAMS) \ ngtcp2_transport_params_encode_versioned( \ - (DEST), (DESTLEN), NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS)) + (DEST), (DESTLEN), NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS)) /* * `ngtcp2_transport_params_decode` is a wrapper around @@ -5807,9 +5861,9 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, #define ngtcp2_conn_client_new(PCONN, DCID, SCID, PATH, VERSION, CALLBACKS, \ SETTINGS, PARAMS, MEM, USER_DATA) \ ngtcp2_conn_client_new_versioned( \ - (PCONN), (DCID), (SCID), (PATH), (VERSION), NGTCP2_CALLBACKS_VERSION, \ - (CALLBACKS), NGTCP2_SETTINGS_VERSION, (SETTINGS), \ - NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS), (MEM), (USER_DATA)) + (PCONN), (DCID), (SCID), (PATH), (VERSION), NGTCP2_CALLBACKS_VERSION, \ + (CALLBACKS), NGTCP2_SETTINGS_VERSION, (SETTINGS), \ + NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS), (MEM), (USER_DATA)) /* * `ngtcp2_conn_server_new` is a wrapper around @@ -5819,9 +5873,9 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, #define ngtcp2_conn_server_new(PCONN, DCID, SCID, PATH, VERSION, CALLBACKS, \ SETTINGS, PARAMS, MEM, USER_DATA) \ ngtcp2_conn_server_new_versioned( \ - (PCONN), (DCID), (SCID), (PATH), (VERSION), NGTCP2_CALLBACKS_VERSION, \ - (CALLBACKS), NGTCP2_SETTINGS_VERSION, (SETTINGS), \ - NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS), (MEM), (USER_DATA)) + (PCONN), (DCID), (SCID), (PATH), (VERSION), NGTCP2_CALLBACKS_VERSION, \ + (CALLBACKS), NGTCP2_SETTINGS_VERSION, (SETTINGS), \ + NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS), (MEM), (USER_DATA)) /* * `ngtcp2_conn_set_local_transport_params` is a wrapper around @@ -5830,7 +5884,7 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, */ #define ngtcp2_conn_set_local_transport_params(CONN, PARAMS) \ ngtcp2_conn_set_local_transport_params_versioned( \ - (CONN), NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS)) + (CONN), NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS)) /* * `ngtcp2_transport_params_default` is a wrapper around @@ -5859,10 +5913,10 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, #ifdef _MSC_VER # pragma warning(pop) -#endif +#endif /* defined(_MSC_VER) */ #ifdef __cplusplus } -#endif +#endif /* defined(__cplusplus) */ -#endif /* NGTCP2_H */ +#endif /* !defined(NGTCP2_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h b/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h index 801c6cb2681386..e9e7f9bf86e742 100644 --- a/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h +++ b/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h @@ -22,8 +22,8 @@ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#ifndef VERSION_H -#define VERSION_H +#ifndef NGTCP2_VERSION_H +#define NGTCP2_VERSION_H /** * @macrosection @@ -36,7 +36,7 @@ * * Version number of the ngtcp2 library release. */ -#define NGTCP2_VERSION "1.3.0" +#define NGTCP2_VERSION "1.9.1" /** * @macro @@ -46,6 +46,6 @@ * number, 8 bits for minor and 8 bits for patch. Version 1.2.3 * becomes 0x010203. */ -#define NGTCP2_VERSION_NUM 0x010300 +#define NGTCP2_VERSION_NUM 0x010901 -#endif /* VERSION_H */ +#endif /* !defined(NGTCP2_VERSION_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.c index d4778d66accf31..0e5bfe069e9fab 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.c @@ -29,7 +29,7 @@ #include "ngtcp2_macro.h" #include "ngtcp2_tstamp.h" -ngtcp2_objalloc_def(acktr_entry, ngtcp2_acktr_entry, oplent); +ngtcp2_objalloc_def(acktr_entry, ngtcp2_acktr_entry, oplent) static void acktr_entry_init(ngtcp2_acktr_entry *ent, int64_t pkt_num, ngtcp2_tstamp tstamp) { @@ -56,41 +56,26 @@ void ngtcp2_acktr_entry_objalloc_del(ngtcp2_acktr_entry *ent, ngtcp2_objalloc_acktr_entry_release(objalloc, ent); } -static int greater(const ngtcp2_ksl_key *lhs, const ngtcp2_ksl_key *rhs) { - return *(int64_t *)lhs > *(int64_t *)rhs; -} - -int ngtcp2_acktr_init(ngtcp2_acktr *acktr, ngtcp2_log *log, - const ngtcp2_mem *mem) { - int rv; +void ngtcp2_acktr_init(ngtcp2_acktr *acktr, ngtcp2_log *log, + const ngtcp2_mem *mem) { + ngtcp2_objalloc_acktr_entry_init(&acktr->objalloc, NGTCP2_ACKTR_MAX_ENT + 1, + mem); - ngtcp2_objalloc_acktr_entry_init(&acktr->objalloc, 32, mem); + ngtcp2_static_ringbuf_acks_init(&acktr->acks); - rv = ngtcp2_ringbuf_init(&acktr->acks, 32, sizeof(ngtcp2_acktr_ack_entry), - mem); - if (rv != 0) { - goto fail_acks_init; - } - - ngtcp2_ksl_init(&acktr->ents, greater, sizeof(int64_t), mem); + ngtcp2_ksl_init(&acktr->ents, ngtcp2_ksl_int64_greater, + ngtcp2_ksl_int64_greater_search, sizeof(int64_t), mem); acktr->log = log; - acktr->mem = mem; acktr->flags = NGTCP2_ACKTR_FLAG_NONE; acktr->first_unacked_ts = UINT64_MAX; acktr->rx_npkt = 0; - - return 0; - -fail_acks_init: - ngtcp2_objalloc_free(&acktr->objalloc); - return rv; } void ngtcp2_acktr_free(ngtcp2_acktr *acktr) { #ifdef NOMEMPOOL ngtcp2_ksl_it it; -#endif /* NOMEMPOOL */ +#endif /* defined(NOMEMPOOL) */ if (acktr == NULL) { return; @@ -101,12 +86,10 @@ void ngtcp2_acktr_free(ngtcp2_acktr *acktr) { ngtcp2_ksl_it_next(&it)) { ngtcp2_acktr_entry_objalloc_del(ngtcp2_ksl_it_get(&it), &acktr->objalloc); } -#endif /* NOMEMPOOL */ +#endif /* defined(NOMEMPOOL) */ ngtcp2_ksl_free(&acktr->ents); - ngtcp2_ringbuf_free(&acktr->acks); - ngtcp2_objalloc_free(&acktr->objalloc); } @@ -213,11 +196,11 @@ void ngtcp2_acktr_forget(ngtcp2_acktr *acktr, ngtcp2_acktr_entry *ent) { } } -ngtcp2_ksl_it ngtcp2_acktr_get(ngtcp2_acktr *acktr) { +ngtcp2_ksl_it ngtcp2_acktr_get(const ngtcp2_acktr *acktr) { return ngtcp2_ksl_begin(&acktr->ents); } -int ngtcp2_acktr_empty(ngtcp2_acktr *acktr) { +int ngtcp2_acktr_empty(const ngtcp2_acktr *acktr) { ngtcp2_ksl_it it = ngtcp2_ksl_begin(&acktr->ents); return ngtcp2_ksl_it_end(&it); } @@ -225,7 +208,7 @@ int ngtcp2_acktr_empty(ngtcp2_acktr *acktr) { ngtcp2_acktr_ack_entry *ngtcp2_acktr_add_ack(ngtcp2_acktr *acktr, int64_t pkt_num, int64_t largest_ack) { - ngtcp2_acktr_ack_entry *ent = ngtcp2_ringbuf_push_front(&acktr->acks); + ngtcp2_acktr_ack_entry *ent = ngtcp2_ringbuf_push_front(&acktr->acks.rb); ent->largest_ack = largest_ack; ent->pkt_num = pkt_num; @@ -266,8 +249,10 @@ static void acktr_on_ack(ngtcp2_acktr *acktr, ngtcp2_ringbuf *rb, ngtcp2_ksl_it_prev(&it); ent = ngtcp2_ksl_it_get(&it); - if (ent->pkt_num > ack_ent->largest_ack && - ack_ent->largest_ack >= ent->pkt_num - (int64_t)(ent->len - 1)) { + + assert(ent->pkt_num > ack_ent->largest_ack); + + if (ack_ent->largest_ack + (int64_t)ent->len > ent->pkt_num) { ent->len = (size_t)(ent->pkt_num - ack_ent->largest_ack); } } @@ -279,7 +264,7 @@ void ngtcp2_acktr_recv_ack(ngtcp2_acktr *acktr, const ngtcp2_ack *fr) { ngtcp2_acktr_ack_entry *ent; int64_t largest_ack = fr->largest_ack, min_ack; size_t i, j; - ngtcp2_ringbuf *rb = &acktr->acks; + ngtcp2_ringbuf *rb = &acktr->acks.rb; size_t nacks = ngtcp2_ringbuf_len(rb); /* Assume that ngtcp2_pkt_validate_ack(fr) returns 0 */ @@ -295,7 +280,7 @@ void ngtcp2_acktr_recv_ack(ngtcp2_acktr *acktr, const ngtcp2_ack *fr) { min_ack = largest_ack - (int64_t)fr->first_ack_range; - if (min_ack <= ent->pkt_num && ent->pkt_num <= largest_ack) { + if (min_ack <= ent->pkt_num) { acktr_on_ack(acktr, rb, j); return; } @@ -306,8 +291,7 @@ void ngtcp2_acktr_recv_ack(ngtcp2_acktr *acktr, const ngtcp2_ack *fr) { for (;;) { if (ent->pkt_num > largest_ack) { - ++j; - if (j == nacks) { + if (++j == nacks) { return; } ent = ngtcp2_ringbuf_get(rb, j); @@ -330,7 +314,7 @@ void ngtcp2_acktr_commit_ack(ngtcp2_acktr *acktr) { acktr->rx_npkt = 0; } -int ngtcp2_acktr_require_active_ack(ngtcp2_acktr *acktr, +int ngtcp2_acktr_require_active_ack(const ngtcp2_acktr *acktr, ngtcp2_duration max_ack_delay, ngtcp2_tstamp ts) { return ngtcp2_tstamp_elapsed(acktr->first_unacked_ts, max_ack_delay, ts); diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.h index 809fb692adc3c8..16aee42f275f67 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -39,7 +39,7 @@ /* NGTCP2_ACKTR_MAX_ENT is the maximum number of ngtcp2_acktr_entry which ngtcp2_acktr stores. */ -#define NGTCP2_ACKTR_MAX_ENT 1024 +#define NGTCP2_ACKTR_MAX_ENT (NGTCP2_MAX_ACK_RANGES + 1) typedef struct ngtcp2_log ngtcp2_log; @@ -65,7 +65,7 @@ typedef struct ngtcp2_acktr_entry { }; } ngtcp2_acktr_entry; -ngtcp2_objalloc_decl(acktr_entry, ngtcp2_acktr_entry, oplent); +ngtcp2_objalloc_decl(acktr_entry, ngtcp2_acktr_entry, oplent) /* * ngtcp2_acktr_entry_objalloc_new allocates memory for ent, and @@ -108,17 +108,18 @@ typedef struct ngtcp2_acktr_ack_entry { expired and canceled. */ #define NGTCP2_ACKTR_FLAG_CANCEL_TIMER 0x0100u +ngtcp2_static_ringbuf_def(acks, 32, sizeof(ngtcp2_acktr_ack_entry)) + /* * ngtcp2_acktr tracks received packets which we have to send ack. */ typedef struct ngtcp2_acktr { ngtcp2_objalloc objalloc; - ngtcp2_ringbuf acks; + ngtcp2_static_ringbuf_acks acks; /* ents includes ngtcp2_acktr_entry sorted by decreasing order of packet number. */ ngtcp2_ksl ents; ngtcp2_log *log; - const ngtcp2_mem *mem; /* flags is bitwise OR of zero, or more of NGTCP2_ACKTR_FLAG_*. */ uint16_t flags; /* first_unacked_ts is timestamp when ngtcp2_acktr_entry is added @@ -131,15 +132,9 @@ typedef struct ngtcp2_acktr { /* * ngtcp2_acktr_init initializes |acktr|. - * - * This function returns 0 if it succeeds, or one of the following - * negative error codes: - * - * NGTCP2_ERR_NOMEM - * Out of memory. */ -int ngtcp2_acktr_init(ngtcp2_acktr *acktr, ngtcp2_log *log, - const ngtcp2_mem *mem); +void ngtcp2_acktr_init(ngtcp2_acktr *acktr, ngtcp2_log *log, + const ngtcp2_mem *mem); /* * ngtcp2_acktr_free frees resources allocated for |acktr|. It frees @@ -174,13 +169,13 @@ void ngtcp2_acktr_forget(ngtcp2_acktr *acktr, ngtcp2_acktr_entry *ent); * has the largest packet number to be acked. If there is no entry, * returned value satisfies ngtcp2_ksl_it_end(&it) != 0. */ -ngtcp2_ksl_it ngtcp2_acktr_get(ngtcp2_acktr *acktr); +ngtcp2_ksl_it ngtcp2_acktr_get(const ngtcp2_acktr *acktr); /* * ngtcp2_acktr_empty returns nonzero if it has no packet to * acknowledge. */ -int ngtcp2_acktr_empty(ngtcp2_acktr *acktr); +int ngtcp2_acktr_empty(const ngtcp2_acktr *acktr); /* * ngtcp2_acktr_add_ack records outgoing ACK frame whose largest @@ -208,7 +203,7 @@ void ngtcp2_acktr_commit_ack(ngtcp2_acktr *acktr); * ngtcp2_acktr_require_active_ack returns nonzero if ACK frame should * be generated actively. */ -int ngtcp2_acktr_require_active_ack(ngtcp2_acktr *acktr, +int ngtcp2_acktr_require_active_ack(const ngtcp2_acktr *acktr, ngtcp2_duration max_ack_delay, ngtcp2_tstamp ts); @@ -218,4 +213,4 @@ int ngtcp2_acktr_require_active_ack(ngtcp2_acktr *acktr, */ void ngtcp2_acktr_immediate_ack(ngtcp2_acktr *acktr); -#endif /* NGTCP2_ACKTR_H */ +#endif /* !defined(NGTCP2_ACKTR_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_addr.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_addr.h index 8e3a9f591d9977..65ee7cd9f3006b 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_addr.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_addr.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -68,4 +68,4 @@ uint32_t ngtcp2_addr_compare(const ngtcp2_addr *a, const ngtcp2_addr *b); */ int ngtcp2_addr_empty(const ngtcp2_addr *addr); -#endif /* NGTCP2_ADDR_H */ +#endif /* !defined(NGTCP2_ADDR_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_balloc.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_balloc.c index 5cc39ee3b03da4..4a6797689fcc01 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_balloc.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_balloc.c @@ -66,7 +66,7 @@ int ngtcp2_balloc_get(ngtcp2_balloc *balloc, void **pbuf, size_t n) { if (ngtcp2_buf_left(&balloc->buf) < n) { p = ngtcp2_mem_malloc(balloc->mem, - sizeof(ngtcp2_memblock_hd) + 0x10u + balloc->blklen); + sizeof(ngtcp2_memblock_hd) + 0x8u + balloc->blklen); if (p == NULL) { return NGTCP2_ERR_NOMEM; } @@ -75,10 +75,10 @@ int ngtcp2_balloc_get(ngtcp2_balloc *balloc, void **pbuf, size_t n) { hd->next = balloc->head; balloc->head = hd; ngtcp2_buf_init( - &balloc->buf, - (uint8_t *)(((uintptr_t)p + sizeof(ngtcp2_memblock_hd) + 0xfu) & - ~(uintptr_t)0xfu), - balloc->blklen); + &balloc->buf, + (uint8_t *)(((uintptr_t)p + sizeof(ngtcp2_memblock_hd) + 0xfu) & + ~(uintptr_t)0xfu), + balloc->blklen); } assert(((uintptr_t)balloc->buf.last & 0xfu) == 0); diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_balloc.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_balloc.h index 1fb16325d9953d..c0e2a3f7562968 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_balloc.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_balloc.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -39,7 +39,10 @@ typedef struct ngtcp2_memblock_hd ngtcp2_memblock_hd; * ngtcp2_memblock_hd is the header of memory block. */ struct ngtcp2_memblock_hd { - ngtcp2_memblock_hd *next; + union { + ngtcp2_memblock_hd *next; + uint64_t pad; + }; }; /* @@ -60,7 +63,7 @@ typedef struct ngtcp2_balloc { /* * ngtcp2_balloc_init initializes |balloc| with |blklen| which is the - * size of memory block. + * size of memory block. |blklen| must be divisible by 16. */ void ngtcp2_balloc_init(ngtcp2_balloc *balloc, size_t blklen, const ngtcp2_mem *mem); @@ -88,4 +91,4 @@ int ngtcp2_balloc_get(ngtcp2_balloc *balloc, void **pbuf, size_t n); */ void ngtcp2_balloc_clear(ngtcp2_balloc *balloc); -#endif /* NGTCP2_BALLOC_H */ +#endif /* !defined(NGTCP2_BALLOC_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.c index 27c4667c03924a..8777ca4c8a1632 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.c @@ -39,8 +39,9 @@ #define NGTCP2_BBR_EXTRA_ACKED_FILTERLEN 10 #define NGTCP2_BBR_STARTUP_PACING_GAIN_H 277 +#define NGTCP2_BBR_DRAIN_PACING_GAIN_H 35 -#define NGTCP2_BBR_STARTUP_CWND_GAIN_H 200 +#define NGTCP2_BBR_DEFAULT_CWND_GAIN_H 200 #define NGTCP2_BBR_PROBE_RTT_CWND_GAIN_H 50 @@ -72,7 +73,7 @@ static void bbr_reset_lower_bounds(ngtcp2_cc_bbr *bbr); static void bbr_init_round_counting(ngtcp2_cc_bbr *bbr); -static void bbr_init_full_pipe(ngtcp2_cc_bbr *bbr); +static void bbr_reset_full_bw(ngtcp2_cc_bbr *bbr); static void bbr_init_pacing_rate(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); @@ -84,8 +85,7 @@ static void bbr_set_pacing_rate(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); static void bbr_enter_startup(ngtcp2_cc_bbr *bbr); -static void bbr_check_startup_done(ngtcp2_cc_bbr *bbr, - const ngtcp2_cc_ack *ack); +static void bbr_check_startup_done(ngtcp2_cc_bbr *bbr); static void bbr_update_on_ack(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); @@ -148,16 +148,17 @@ static void bbr_start_probe_bw_cruise(ngtcp2_cc_bbr *bbr); static void bbr_start_probe_bw_refill(ngtcp2_cc_bbr *bbr); -static void bbr_start_probe_bw_up(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); +static void bbr_start_probe_bw_up(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); static void bbr_update_probe_bw_cycle_phase(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); -static int bbr_check_time_to_cruise(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); +static int bbr_is_time_to_cruise(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts); + +static int bbr_is_time_to_go_down(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); static int bbr_has_elapsed_in_phase(ngtcp2_cc_bbr *bbr, ngtcp2_duration interval, ngtcp2_tstamp ts); @@ -175,9 +176,8 @@ static void bbr_probe_inflight_hi_upward(ngtcp2_cc_bbr *bbr, static void bbr_adapt_upper_bounds(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); -static int bbr_check_time_to_probe_bw(ngtcp2_cc_bbr *bbr, - ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); +static int bbr_is_time_to_probe_bw(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts); static void bbr_pick_probe_wait(ngtcp2_cc_bbr *bbr); @@ -197,6 +197,8 @@ static void bbr_handle_inflight_too_high(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, const ngtcp2_rs *rs, ngtcp2_tstamp ts); +static void bbr_note_loss(ngtcp2_cc_bbr *bbr); + static void bbr_handle_lost_packet(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts); @@ -227,15 +229,14 @@ static void bbr_handle_restart_from_idle(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts); -static uint64_t bbr_bdp_multiple(ngtcp2_cc_bbr *bbr, uint64_t bw, - uint64_t gain_h); +static uint64_t bbr_bdp_multiple(ngtcp2_cc_bbr *bbr, uint64_t gain_h); static uint64_t bbr_quantization_budget(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, uint64_t inflight); static uint64_t bbr_inflight(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, - uint64_t bw, uint64_t gain_h); + uint64_t gain_h); static void bbr_update_max_inflight(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); @@ -247,10 +248,6 @@ static uint64_t min_pipe_cwnd(size_t max_udp_payload_size); static void bbr_advance_max_bw_filter(ngtcp2_cc_bbr *bbr); -static void bbr_modulate_cwnd_for_recovery(ngtcp2_cc_bbr *bbr, - ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack); - static void bbr_save_cwnd(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); static void bbr_restore_cwnd(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); @@ -272,7 +269,7 @@ static int in_congestion_recovery(const ngtcp2_conn_stat *cstat, ngtcp2_tstamp sent_time); static void bbr_handle_recovery(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); + const ngtcp2_cc_ack *ack); static void bbr_on_init(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, ngtcp2_tstamp initial_ts) { @@ -289,11 +286,12 @@ static void bbr_on_init(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, bbr->idle_restart = 0; bbr->extra_acked_interval_start = initial_ts; bbr->extra_acked_delivered = 0; + bbr->full_bw_reached = 0; bbr_reset_congestion_signals(bbr); bbr_reset_lower_bounds(bbr); bbr_init_round_counting(bbr); - bbr_init_full_pipe(bbr); + bbr_reset_full_bw(bbr); bbr_init_pacing_rate(bbr, cstat); bbr_enter_startup(bbr); @@ -326,23 +324,17 @@ static void bbr_on_init(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, bbr->bw_probe_up_acks = 0; bbr->inflight_hi = UINT64_MAX; - bbr->bw_hi = UINT64_MAX; bbr->probe_rtt_expired = 0; bbr->probe_rtt_min_delay = UINT64_MAX; bbr->probe_rtt_min_stamp = initial_ts; bbr->in_loss_recovery = 0; - bbr->packet_conservation = 0; + bbr->round_count_at_recovery = UINT64_MAX; bbr->max_inflight = 0; bbr->congestion_recovery_start_ts = UINT64_MAX; - bbr->congestion_recovery_next_round_delivered = 0; - - bbr->prior_inflight_lo = 0; - bbr->prior_inflight_hi = 0; - bbr->prior_bw_lo = 0; } static void bbr_reset_congestion_signals(ngtcp2_cc_bbr *bbr) { @@ -362,48 +354,53 @@ static void bbr_init_round_counting(ngtcp2_cc_bbr *bbr) { bbr->round_count = 0; } -static void bbr_init_full_pipe(ngtcp2_cc_bbr *bbr) { - bbr->filled_pipe = 0; +static void bbr_reset_full_bw(ngtcp2_cc_bbr *bbr) { bbr->full_bw = 0; bbr->full_bw_count = 0; + bbr->full_bw_now = 0; } -static void bbr_check_startup_full_bandwidth(ngtcp2_cc_bbr *bbr) { - if (bbr->filled_pipe || !bbr->round_start || bbr->rst->rs.is_app_limited) { +static void bbr_check_full_bw_reached(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat) { + if (bbr->full_bw_now || bbr->rst->rs.is_app_limited) { return; } - if (bbr->max_bw * 100 >= bbr->full_bw * 125) { - bbr->full_bw = bbr->max_bw; - bbr->full_bw_count = 0; - } - - ++bbr->full_bw_count; - - if (bbr->full_bw_count >= 3) { - bbr->filled_pipe = 1; + if (cstat->delivery_rate_sec * 100 >= bbr->full_bw * 125) { + bbr_reset_full_bw(bbr); + bbr->full_bw = cstat->delivery_rate_sec; - ngtcp2_log_info(bbr->cc.log, NGTCP2_LOG_EVENT_CCA, - "bbr filled pipe, full_bw=%" PRIu64, bbr->full_bw); + return; } -} -static void bbr_check_startup_high_loss(ngtcp2_cc_bbr *bbr, - const ngtcp2_cc_ack *ack) { - if (bbr->filled_pipe || !bbr->round_start || bbr->rst->rs.is_app_limited) { + if (!bbr->round_start) { return; } - if (bbr->loss_events_in_round <= 3) { + ++bbr->full_bw_count; + + bbr->full_bw_now = bbr->full_bw_count >= 3; + if (!bbr->full_bw_now) { return; } - /* loss_thresh = 2% */ - if (bbr->bytes_lost_in_round * 100 <= ack->prior_bytes_in_flight * 2) { + bbr->full_bw_reached = 1; + + ngtcp2_log_info(bbr->cc.log, NGTCP2_LOG_EVENT_CCA, + "bbr reached full bandwidth, full_bw=%" PRIu64, bbr->full_bw); +} + +static void bbr_check_startup_high_loss(ngtcp2_cc_bbr *bbr) { + if (bbr->full_bw_reached || bbr->loss_events_in_round <= 6 || + (bbr->in_loss_recovery && + bbr->round_count <= bbr->round_count_at_recovery) || + !is_inflight_too_high(&bbr->rst->rs)) { return; } - bbr->filled_pipe = 1; + bbr->full_bw_reached = 1; + bbr->inflight_hi = ngtcp2_max_uint64(bbr_bdp_multiple(bbr, bbr->cwnd_gain_h), + bbr->inflight_latest); } static void bbr_init_pacing_rate(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { @@ -423,7 +420,7 @@ static void bbr_set_pacing_rate_with_gain(ngtcp2_cc_bbr *bbr, interval = NGTCP2_SECONDS * 100 * 100 / pacing_gain_h / bbr->bw / (100 - NGTCP2_BBR_PACING_MARGIN_PERCENT); - if (bbr->filled_pipe || interval < cstat->pacing_interval) { + if (bbr->full_bw_reached || interval < cstat->pacing_interval) { cstat->pacing_interval = interval; } } @@ -437,15 +434,13 @@ static void bbr_enter_startup(ngtcp2_cc_bbr *bbr) { bbr->state = NGTCP2_BBR_STATE_STARTUP; bbr->pacing_gain_h = NGTCP2_BBR_STARTUP_PACING_GAIN_H; - bbr->cwnd_gain_h = NGTCP2_BBR_STARTUP_CWND_GAIN_H; + bbr->cwnd_gain_h = NGTCP2_BBR_DEFAULT_CWND_GAIN_H; } -static void bbr_check_startup_done(ngtcp2_cc_bbr *bbr, - const ngtcp2_cc_ack *ack) { - bbr_check_startup_full_bandwidth(bbr); - bbr_check_startup_high_loss(bbr, ack); +static void bbr_check_startup_done(ngtcp2_cc_bbr *bbr) { + bbr_check_startup_high_loss(bbr); - if (bbr->state == NGTCP2_BBR_STATE_STARTUP && bbr->filled_pipe) { + if (bbr->state == NGTCP2_BBR_STATE_STARTUP && bbr->full_bw_reached) { bbr_enter_drain(bbr); } } @@ -468,7 +463,8 @@ static void bbr_update_model_and_state(ngtcp2_cc_bbr *bbr, bbr_update_latest_delivery_signals(bbr, cstat); bbr_update_congestion_signals(bbr, cstat, ack); bbr_update_ack_aggregation(bbr, cstat, ack, ts); - bbr_check_startup_done(bbr, ack); + bbr_check_full_bw_reached(bbr, cstat); + bbr_check_startup_done(bbr); bbr_check_drain(bbr, cstat, ts); bbr_update_probe_bw_cycle_phase(bbr, cstat, ack, ts); bbr_update_min_rtt(bbr, ack, ts); @@ -493,9 +489,9 @@ static void bbr_update_on_loss(ngtcp2_cc_bbr *cc, ngtcp2_conn_stat *cstat, static void bbr_update_latest_delivery_signals(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { bbr->loss_round_start = 0; - bbr->bw_latest = ngtcp2_max(bbr->bw_latest, cstat->delivery_rate_sec); + bbr->bw_latest = ngtcp2_max_uint64(bbr->bw_latest, cstat->delivery_rate_sec); bbr->inflight_latest = - ngtcp2_max(bbr->inflight_latest, bbr->rst->rs.delivered); + ngtcp2_max_uint64(bbr->inflight_latest, bbr->rst->rs.delivered); if (bbr->rst->rs.prior_delivered >= bbr->loss_round_delivered) { bbr->loss_round_delivered = bbr->rst->delivered; @@ -519,11 +515,6 @@ static void bbr_update_congestion_signals(ngtcp2_cc_bbr *bbr, if (ack->bytes_lost) { bbr->bytes_lost_in_round += ack->bytes_lost; ++bbr->loss_events_in_round; - - if (!bbr->loss_in_round) { - bbr->loss_in_round = 1; - bbr->loss_round_delivered = bbr->rst->delivered; - } } if (!bbr->loss_round_start) { @@ -558,16 +549,15 @@ static void bbr_init_lower_bounds(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { } static void bbr_loss_lower_bounds(ngtcp2_cc_bbr *bbr) { - bbr->bw_lo = ngtcp2_max(bbr->bw_latest, bbr->bw_lo * NGTCP2_BBR_BETA_NUMER / - NGTCP2_BBR_BETA_DENOM); - bbr->inflight_lo = ngtcp2_max(bbr->inflight_latest, - bbr->inflight_lo * NGTCP2_BBR_BETA_NUMER / - NGTCP2_BBR_BETA_DENOM); + bbr->bw_lo = ngtcp2_max_uint64( + bbr->bw_latest, bbr->bw_lo * NGTCP2_BBR_BETA_NUMER / NGTCP2_BBR_BETA_DENOM); + bbr->inflight_lo = ngtcp2_max_uint64( + bbr->inflight_latest, + bbr->inflight_lo * NGTCP2_BBR_BETA_NUMER / NGTCP2_BBR_BETA_DENOM); } static void bbr_bound_bw_for_model(ngtcp2_cc_bbr *bbr) { - bbr->bw = ngtcp2_min(bbr->max_bw, bbr->bw_lo); - bbr->bw = ngtcp2_min(bbr->bw, bbr->bw_hi); + bbr->bw = ngtcp2_min_uint64(bbr->max_bw, bbr->bw_lo); } static void bbr_update_max_bw(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, @@ -633,7 +623,13 @@ static void bbr_update_ack_aggregation(ngtcp2_cc_bbr *bbr, bbr->extra_acked_delivered += ack->bytes_delivered; extra = bbr->extra_acked_delivered - expected_delivered; - extra = ngtcp2_min(extra, cstat->cwnd); + extra = ngtcp2_min_uint64(extra, cstat->cwnd); + + if (bbr->full_bw_reached) { + bbr->extra_acked_filter.window_length = NGTCP2_BBR_EXTRA_ACKED_FILTERLEN; + } else { + bbr->extra_acked_filter.window_length = 1; + } ngtcp2_window_filter_update(&bbr->extra_acked_filter, extra, bbr->round_count); @@ -645,14 +641,14 @@ static void bbr_enter_drain(ngtcp2_cc_bbr *bbr) { ngtcp2_log_info(bbr->cc.log, NGTCP2_LOG_EVENT_CCA, "bbr enter Drain"); bbr->state = NGTCP2_BBR_STATE_DRAIN; - bbr->pacing_gain_h = 100 * 100 / NGTCP2_BBR_STARTUP_CWND_GAIN_H; - bbr->cwnd_gain_h = NGTCP2_BBR_STARTUP_CWND_GAIN_H; + bbr->pacing_gain_h = NGTCP2_BBR_DRAIN_PACING_GAIN_H; + bbr->cwnd_gain_h = NGTCP2_BBR_DEFAULT_CWND_GAIN_H; } static void bbr_check_drain(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts) { if (bbr->state == NGTCP2_BBR_STATE_DRAIN && - cstat->bytes_in_flight <= bbr_inflight(bbr, cstat, bbr->bw, 100)) { + cstat->bytes_in_flight <= bbr_inflight(bbr, cstat, 100)) { bbr_enter_probe_bw(bbr, ts); } } @@ -677,7 +673,7 @@ static void bbr_start_probe_bw_down(ngtcp2_cc_bbr *bbr, ngtcp2_tstamp ts) { bbr->state = NGTCP2_BBR_STATE_PROBE_BW_DOWN; bbr->pacing_gain_h = 90; - bbr->cwnd_gain_h = 200; + bbr->cwnd_gain_h = NGTCP2_BBR_DEFAULT_CWND_GAIN_H; } static void bbr_start_probe_bw_cruise(ngtcp2_cc_bbr *bbr) { @@ -686,7 +682,7 @@ static void bbr_start_probe_bw_cruise(ngtcp2_cc_bbr *bbr) { bbr->state = NGTCP2_BBR_STATE_PROBE_BW_CRUISE; bbr->pacing_gain_h = 100; - bbr->cwnd_gain_h = 200; + bbr->cwnd_gain_h = NGTCP2_BBR_DEFAULT_CWND_GAIN_H; } static void bbr_start_probe_bw_refill(ngtcp2_cc_bbr *bbr) { @@ -703,18 +699,18 @@ static void bbr_start_probe_bw_refill(ngtcp2_cc_bbr *bbr) { bbr->state = NGTCP2_BBR_STATE_PROBE_BW_REFILL; bbr->pacing_gain_h = 100; - bbr->cwnd_gain_h = 200; + bbr->cwnd_gain_h = NGTCP2_BBR_DEFAULT_CWND_GAIN_H; } -static void bbr_start_probe_bw_up(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { +static void bbr_start_probe_bw_up(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { ngtcp2_log_info(bbr->cc.log, NGTCP2_LOG_EVENT_CCA, "bbr start ProbeBW_UP"); bbr->ack_phase = NGTCP2_BBR_ACK_PHASE_ACKS_PROBE_STARTING; bbr_start_round(bbr); + bbr_reset_full_bw(bbr); - bbr->cycle_stamp = ts; + bbr->full_bw = cstat->delivery_rate_sec; bbr->state = NGTCP2_BBR_STATE_PROBE_BW_UP; bbr->pacing_gain_h = 125; bbr->cwnd_gain_h = 225; @@ -726,7 +722,7 @@ static void bbr_update_probe_bw_cycle_phase(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { - if (!bbr->filled_pipe) { + if (!bbr->full_bw_reached) { return; } @@ -738,17 +734,17 @@ static void bbr_update_probe_bw_cycle_phase(ngtcp2_cc_bbr *bbr, switch (bbr->state) { case NGTCP2_BBR_STATE_PROBE_BW_DOWN: - if (bbr_check_time_to_probe_bw(bbr, cstat, ts)) { + if (bbr_is_time_to_probe_bw(bbr, cstat, ts)) { return; } - if (bbr_check_time_to_cruise(bbr, cstat, ts)) { + if (bbr_is_time_to_cruise(bbr, cstat, ts)) { bbr_start_probe_bw_cruise(bbr); } break; case NGTCP2_BBR_STATE_PROBE_BW_CRUISE: - if (bbr_check_time_to_probe_bw(bbr, cstat, ts)) { + if (bbr_is_time_to_probe_bw(bbr, cstat, ts)) { return; } @@ -756,13 +752,12 @@ static void bbr_update_probe_bw_cycle_phase(ngtcp2_cc_bbr *bbr, case NGTCP2_BBR_STATE_PROBE_BW_REFILL: if (bbr->round_start) { bbr->bw_probe_samples = 1; - bbr_start_probe_bw_up(bbr, cstat, ts); + bbr_start_probe_bw_up(bbr, cstat); } break; case NGTCP2_BBR_STATE_PROBE_BW_UP: - if (bbr_has_elapsed_in_phase(bbr, bbr->min_rtt, ts) && - cstat->bytes_in_flight > bbr_inflight(bbr, cstat, bbr->max_bw, 125)) { + if (bbr_is_time_to_go_down(bbr, cstat)) { bbr_start_probe_bw_down(bbr, ts); } @@ -772,15 +767,26 @@ static void bbr_update_probe_bw_cycle_phase(ngtcp2_cc_bbr *bbr, } } -static int bbr_check_time_to_cruise(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { +static int bbr_is_time_to_cruise(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { (void)ts; if (cstat->bytes_in_flight > bbr_inflight_with_headroom(bbr, cstat)) { return 0; } - if (cstat->bytes_in_flight <= bbr_inflight(bbr, cstat, bbr->max_bw, 100)) { + if (cstat->bytes_in_flight <= bbr_inflight(bbr, cstat, 100)) { + return 1; + } + + return 0; +} + +static int bbr_is_time_to_go_down(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { + if (bbr->rst->is_cwnd_limited && cstat->cwnd >= bbr->inflight_hi) { + bbr_reset_full_bw(bbr); + bbr->full_bw = cstat->delivery_rate_sec; + } else if (bbr->full_bw_now) { return 1; } @@ -801,13 +807,13 @@ static uint64_t bbr_inflight_with_headroom(ngtcp2_cc_bbr *bbr, return UINT64_MAX; } - headroom = ngtcp2_max(cstat->max_tx_udp_payload_size, - bbr->inflight_hi * NGTCP2_BBR_HEADROOM_NUMER / - NGTCP2_BBR_HEADROOM_DENOM); + headroom = ngtcp2_max_uint64(cstat->max_tx_udp_payload_size, + bbr->inflight_hi * NGTCP2_BBR_HEADROOM_NUMER / + NGTCP2_BBR_HEADROOM_DENOM); mpcwnd = min_pipe_cwnd(cstat->max_tx_udp_payload_size); if (bbr->inflight_hi > headroom) { - return ngtcp2_max(bbr->inflight_hi - headroom, mpcwnd); + return ngtcp2_max_uint64(bbr->inflight_hi - headroom, mpcwnd); } return mpcwnd; @@ -818,8 +824,8 @@ static void bbr_raise_inflight_hi_slope(ngtcp2_cc_bbr *bbr, uint64_t growth_this_round = cstat->max_tx_udp_payload_size << bbr->bw_probe_up_rounds; - bbr->bw_probe_up_rounds = ngtcp2_min(bbr->bw_probe_up_rounds + 1, 30); - bbr->probe_up_cnt = ngtcp2_max(cstat->cwnd / growth_this_round, 1) * + bbr->bw_probe_up_rounds = ngtcp2_min_size(bbr->bw_probe_up_rounds + 1, 30); + bbr->probe_up_cnt = ngtcp2_max_uint64(cstat->cwnd / growth_this_round, 1) * cstat->max_tx_udp_payload_size; } @@ -860,8 +866,7 @@ static void bbr_adapt_upper_bounds(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, } if (!bbr_check_inflight_too_high(bbr, cstat, ts)) { - /* bbr->bw_hi never be updated */ - if (bbr->inflight_hi == UINT64_MAX /* || bbr->bw_hi == UINT64_MAX */) { + if (bbr->inflight_hi == UINT64_MAX) { return; } @@ -869,19 +874,14 @@ static void bbr_adapt_upper_bounds(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, bbr->inflight_hi = bbr->rst->rs.tx_in_flight; } - if (cstat->delivery_rate_sec > bbr->bw_hi) { - bbr->bw_hi = cstat->delivery_rate_sec; - } - if (bbr->state == NGTCP2_BBR_STATE_PROBE_BW_UP) { bbr_probe_inflight_hi_upward(bbr, cstat, ack); } } } -static int bbr_check_time_to_probe_bw(ngtcp2_cc_bbr *bbr, - ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { +static int bbr_is_time_to_probe_bw(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { if (bbr_has_elapsed_in_phase(bbr, bbr->bw_probe_wait, ts) || bbr_is_reno_coexistence_probe_time(bbr, cstat)) { bbr_start_probe_bw_refill(bbr); @@ -902,22 +902,22 @@ static void bbr_pick_probe_wait(ngtcp2_cc_bbr *bbr) { bbr->rand(&rand, 1, &bbr->rand_ctx); bbr->bw_probe_wait = - 2 * NGTCP2_SECONDS + (ngtcp2_tstamp)(NGTCP2_SECONDS * rand / 255); + 2 * NGTCP2_SECONDS + (ngtcp2_tstamp)(NGTCP2_SECONDS * rand / 255); } static int bbr_is_reno_coexistence_probe_time(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { uint64_t reno_rounds = - bbr_target_inflight(bbr, cstat) / cstat->max_tx_udp_payload_size; + bbr_target_inflight(bbr, cstat) / cstat->max_tx_udp_payload_size; - return bbr->rounds_since_bw_probe >= ngtcp2_min(reno_rounds, 63); + return bbr->rounds_since_bw_probe >= ngtcp2_min_uint64(reno_rounds, 63); } static uint64_t bbr_target_inflight(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { - uint64_t bdp = bbr_inflight(bbr, cstat, bbr->bw, 100); + uint64_t bdp = bbr_bdp_multiple(bbr, bbr->cwnd_gain_h); - return ngtcp2_min(bdp, cstat->cwnd); + return ngtcp2_min_uint64(bdp, cstat->cwnd); } static int bbr_check_inflight_too_high(ngtcp2_cc_bbr *bbr, @@ -946,9 +946,9 @@ static void bbr_handle_inflight_too_high(ngtcp2_cc_bbr *bbr, bbr->bw_probe_samples = 0; if (!rs->is_app_limited) { - bbr->inflight_hi = ngtcp2_max( - rs->tx_in_flight, bbr_target_inflight(bbr, cstat) * - NGTCP2_BBR_BETA_NUMER / NGTCP2_BBR_BETA_DENOM); + bbr->inflight_hi = ngtcp2_max_uint64( + rs->tx_in_flight, bbr_target_inflight(bbr, cstat) * + NGTCP2_BBR_BETA_NUMER / NGTCP2_BBR_BETA_DENOM); } if (bbr->state == NGTCP2_BBR_STATE_PROBE_BW_UP) { @@ -956,10 +956,21 @@ static void bbr_handle_inflight_too_high(ngtcp2_cc_bbr *bbr, } } +static void bbr_note_loss(ngtcp2_cc_bbr *bbr) { + if (bbr->loss_in_round) { + return; + } + + bbr->loss_in_round = 1; + bbr->loss_round_delivered = bbr->rst->delivered; +} + static void bbr_handle_lost_packet(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts) { ngtcp2_rs rs = {0}; + bbr_note_loss(bbr); + if (!bbr->bw_probe_samples) { return; } @@ -1007,7 +1018,7 @@ static void bbr_update_min_rtt(ngtcp2_cc_bbr *bbr, const ngtcp2_cc_ack *ack, int min_rtt_expired; bbr->probe_rtt_expired = - ts > bbr->probe_rtt_min_stamp + NGTCP2_BBR_PROBE_RTT_INTERVAL; + ts > bbr->probe_rtt_min_stamp + NGTCP2_BBR_PROBE_RTT_INTERVAL; if (ack->rtt != UINT64_MAX && (ack->rtt < bbr->probe_rtt_min_delay || bbr->probe_rtt_expired)) { @@ -1106,7 +1117,7 @@ static void bbr_mark_connection_app_limited(ngtcp2_cc_bbr *bbr, static void bbr_exit_probe_rtt(ngtcp2_cc_bbr *bbr, ngtcp2_tstamp ts) { bbr_reset_lower_bounds(bbr); - if (bbr->filled_pipe) { + if (bbr->full_bw_reached) { bbr_start_probe_bw_down(bbr, ts); bbr_start_probe_bw_cruise(bbr); } else { @@ -1131,15 +1142,14 @@ static void bbr_handle_restart_from_idle(ngtcp2_cc_bbr *bbr, } } -static uint64_t bbr_bdp_multiple(ngtcp2_cc_bbr *bbr, uint64_t bw, - uint64_t gain_h) { +static uint64_t bbr_bdp_multiple(ngtcp2_cc_bbr *bbr, uint64_t gain_h) { uint64_t bdp; if (bbr->min_rtt == UINT64_MAX) { return bbr->initial_cwnd; } - bdp = bw * bbr->min_rtt / NGTCP2_SECONDS; + bdp = ngtcp2_max_uint64(bbr->bw * bbr->min_rtt / NGTCP2_SECONDS, 1); return (uint64_t)(bdp * gain_h / 100); } @@ -1153,9 +1163,9 @@ static uint64_t bbr_quantization_budget(ngtcp2_cc_bbr *bbr, uint64_t inflight) { bbr_update_offload_budget(bbr, cstat); - inflight = ngtcp2_max(inflight, bbr->offload_budget); + inflight = ngtcp2_max_uint64(inflight, bbr->offload_budget); inflight = - ngtcp2_max(inflight, min_pipe_cwnd(cstat->max_tx_udp_payload_size)); + ngtcp2_max_uint64(inflight, min_pipe_cwnd(cstat->max_tx_udp_payload_size)); if (bbr->state == NGTCP2_BBR_STATE_PROBE_BW_UP) { inflight += 2 * cstat->max_tx_udp_payload_size; @@ -1165,8 +1175,8 @@ static uint64_t bbr_quantization_budget(ngtcp2_cc_bbr *bbr, } static uint64_t bbr_inflight(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, - uint64_t bw, uint64_t gain_h) { - uint64_t inflight = bbr_bdp_multiple(bbr, bw, gain_h); + uint64_t gain_h) { + uint64_t inflight = bbr_bdp_multiple(bbr, gain_h); return bbr_quantization_budget(bbr, cstat, inflight); } @@ -1178,8 +1188,7 @@ static void bbr_update_max_inflight(ngtcp2_cc_bbr *bbr, /* Not documented */ /* bbr_update_aggregation_budget(bbr); */ - inflight = - bbr_bdp_multiple(bbr, bbr->bw, bbr->cwnd_gain_h) + bbr->extra_acked; + inflight = bbr_bdp_multiple(bbr, bbr->cwnd_gain_h) + bbr->extra_acked; bbr->max_inflight = bbr_quantization_budget(bbr, cstat, inflight); } @@ -1192,44 +1201,26 @@ static void bbr_advance_max_bw_filter(ngtcp2_cc_bbr *bbr) { ++bbr->cycle_count; } -static void bbr_modulate_cwnd_for_recovery(ngtcp2_cc_bbr *bbr, - ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack) { - if (ack->bytes_lost > 0) { - if (cstat->cwnd > ack->bytes_lost) { - cstat->cwnd -= ack->bytes_lost; - cstat->cwnd = ngtcp2_max(cstat->cwnd, 2 * cstat->max_tx_udp_payload_size); - } else { - cstat->cwnd = 2 * cstat->max_tx_udp_payload_size; - } - } - - if (bbr->packet_conservation) { - cstat->cwnd = - ngtcp2_max(cstat->cwnd, cstat->bytes_in_flight + ack->bytes_delivered); - } -} - static void bbr_save_cwnd(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { if (!bbr->in_loss_recovery && bbr->state != NGTCP2_BBR_STATE_PROBE_RTT) { bbr->prior_cwnd = cstat->cwnd; return; } - bbr->prior_cwnd = ngtcp2_max(bbr->prior_cwnd, cstat->cwnd); + bbr->prior_cwnd = ngtcp2_max_uint64(bbr->prior_cwnd, cstat->cwnd); } static void bbr_restore_cwnd(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { - cstat->cwnd = ngtcp2_max(cstat->cwnd, bbr->prior_cwnd); + cstat->cwnd = ngtcp2_max_uint64(cstat->cwnd, bbr->prior_cwnd); } static uint64_t bbr_probe_rtt_cwnd(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { uint64_t probe_rtt_cwnd = - bbr_bdp_multiple(bbr, bbr->bw, NGTCP2_BBR_PROBE_RTT_CWND_GAIN_H); + bbr_bdp_multiple(bbr, NGTCP2_BBR_PROBE_RTT_CWND_GAIN_H); uint64_t mpcwnd = min_pipe_cwnd(cstat->max_tx_udp_payload_size); - return ngtcp2_max(probe_rtt_cwnd, mpcwnd); + return ngtcp2_max_uint64(probe_rtt_cwnd, mpcwnd); } static void bbr_bound_cwnd_for_probe_rtt(ngtcp2_cc_bbr *bbr, @@ -1239,7 +1230,7 @@ static void bbr_bound_cwnd_for_probe_rtt(ngtcp2_cc_bbr *bbr, if (bbr->state == NGTCP2_BBR_STATE_PROBE_RTT) { probe_rtt_cwnd = bbr_probe_rtt_cwnd(bbr, cstat); - cstat->cwnd = ngtcp2_min(cstat->cwnd, probe_rtt_cwnd); + cstat->cwnd = ngtcp2_min_uint64(cstat->cwnd, probe_rtt_cwnd); } } @@ -1248,21 +1239,18 @@ static void bbr_set_cwnd(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, uint64_t mpcwnd; bbr_update_max_inflight(bbr, cstat); - bbr_modulate_cwnd_for_recovery(bbr, cstat, ack); - - if (!bbr->packet_conservation) { - if (bbr->filled_pipe) { - cstat->cwnd += ack->bytes_delivered; - cstat->cwnd = ngtcp2_min(cstat->cwnd, bbr->max_inflight); - } else if (cstat->cwnd < bbr->max_inflight || - bbr->rst->delivered < bbr->initial_cwnd) { - cstat->cwnd += ack->bytes_delivered; - } - mpcwnd = min_pipe_cwnd(cstat->max_tx_udp_payload_size); - cstat->cwnd = ngtcp2_max(cstat->cwnd, mpcwnd); + if (bbr->full_bw_reached) { + cstat->cwnd += ack->bytes_delivered; + cstat->cwnd = ngtcp2_min_uint64(cstat->cwnd, bbr->max_inflight); + } else if (cstat->cwnd < bbr->max_inflight || + bbr->rst->delivered < bbr->initial_cwnd) { + cstat->cwnd += ack->bytes_delivered; } + mpcwnd = min_pipe_cwnd(cstat->max_tx_udp_payload_size); + cstat->cwnd = ngtcp2_max_uint64(cstat->cwnd, mpcwnd); + bbr_bound_cwnd_for_probe_rtt(bbr, cstat); bbr_bound_cwnd_for_model(bbr, cstat); } @@ -1280,30 +1268,23 @@ static void bbr_bound_cwnd_for_model(ngtcp2_cc_bbr *bbr, cap = bbr_inflight_with_headroom(bbr, cstat); } - cap = ngtcp2_min(cap, bbr->inflight_lo); - cap = ngtcp2_max(cap, mpcwnd); + cap = ngtcp2_min_uint64(cap, bbr->inflight_lo); + cap = ngtcp2_max_uint64(cap, mpcwnd); - cstat->cwnd = ngtcp2_min(cstat->cwnd, cap); + cstat->cwnd = ngtcp2_min_uint64(cstat->cwnd, cap); } static void bbr_set_send_quantum(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { - size_t floor, send_quantum; + size_t send_quantum = 64 * 1024; (void)bbr; - if (cstat->pacing_interval > (NGTCP2_SECONDS * 8 * 10 / 12) >> 20) { - floor = cstat->max_tx_udp_payload_size; - } else { - floor = 2 * cstat->max_tx_udp_payload_size; - } - if (cstat->pacing_interval) { - send_quantum = (size_t)(NGTCP2_MILLISECONDS / cstat->pacing_interval); - send_quantum = ngtcp2_min(send_quantum, 64 * 1024); - } else { - send_quantum = 64 * 1024; + send_quantum = ngtcp2_min_size( + send_quantum, (size_t)(NGTCP2_MILLISECONDS / cstat->pacing_interval)); } - cstat->send_quantum = ngtcp2_max(send_quantum, floor); + cstat->send_quantum = + ngtcp2_max_size(send_quantum, 2 * cstat->max_tx_udp_payload_size); } static int in_congestion_recovery(const ngtcp2_conn_stat *cstat, @@ -1313,16 +1294,12 @@ static int in_congestion_recovery(const ngtcp2_conn_stat *cstat, } static void bbr_handle_recovery(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { + const ngtcp2_cc_ack *ack) { if (bbr->in_loss_recovery) { - if (ts - cstat->congestion_recovery_start_ts >= cstat->smoothed_rtt) { - bbr->packet_conservation = 0; - } - if (ack->largest_pkt_sent_ts != UINT64_MAX && !in_congestion_recovery(cstat, ack->largest_pkt_sent_ts)) { bbr->in_loss_recovery = 0; - bbr->packet_conservation = 0; + bbr->round_count_at_recovery = UINT64_MAX; bbr_restore_cwnd(bbr, cstat); } @@ -1331,18 +1308,15 @@ static void bbr_handle_recovery(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, if (bbr->congestion_recovery_start_ts != UINT64_MAX) { bbr->in_loss_recovery = 1; + bbr->round_count_at_recovery = + bbr->round_start ? bbr->round_count : bbr->round_count + 1; bbr_save_cwnd(bbr, cstat); cstat->cwnd = - cstat->bytes_in_flight + - ngtcp2_max(ack->bytes_delivered, cstat->max_tx_udp_payload_size); + cstat->bytes_in_flight + + ngtcp2_max_uint64(ack->bytes_delivered, cstat->max_tx_udp_payload_size); cstat->congestion_recovery_start_ts = bbr->congestion_recovery_start_ts; bbr->congestion_recovery_start_ts = UINT64_MAX; - bbr->packet_conservation = 1; - bbr->congestion_recovery_next_round_delivered = bbr->rst->delivered; - bbr->prior_inflight_hi = bbr->inflight_hi; - bbr->prior_inflight_lo = bbr->inflight_lo; - bbr->prior_bw_lo = bbr->bw_lo; } } @@ -1350,12 +1324,18 @@ static void bbr_cc_on_pkt_lost(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts) { ngtcp2_cc_bbr *bbr = ngtcp2_struct_of(cc, ngtcp2_cc_bbr, cc); + if (bbr->state == NGTCP2_BBR_STATE_STARTUP) { + return; + } + bbr_update_on_loss(bbr, cstat, pkt, ts); } static void bbr_cc_congestion_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp sent_ts, ngtcp2_tstamp ts) { + ngtcp2_tstamp sent_ts, uint64_t bytes_lost, + ngtcp2_tstamp ts) { ngtcp2_cc_bbr *bbr = ngtcp2_struct_of(cc, ngtcp2_cc_bbr, cc); + (void)bytes_lost; if (bbr->in_loss_recovery || bbr->congestion_recovery_start_ts != UINT64_MAX || @@ -1377,13 +1357,8 @@ static void bbr_cc_on_spurious_congestion(ngtcp2_cc *cc, if (bbr->in_loss_recovery) { bbr->in_loss_recovery = 0; - bbr->packet_conservation = 0; + bbr->round_count_at_recovery = UINT64_MAX; bbr_restore_cwnd(bbr, cstat); - bbr->full_bw_count = 0; - bbr->loss_in_round = 0; - bbr->inflight_lo = ngtcp2_max(bbr->inflight_lo, bbr->prior_inflight_lo); - bbr->inflight_hi = ngtcp2_max(bbr->inflight_hi, bbr->prior_inflight_hi); - bbr->bw_lo = ngtcp2_max(bbr->bw_lo, bbr->prior_bw_lo); } } @@ -1396,19 +1371,19 @@ static void bbr_cc_on_persistent_congestion(ngtcp2_cc *cc, cstat->congestion_recovery_start_ts = UINT64_MAX; bbr->congestion_recovery_start_ts = UINT64_MAX; bbr->in_loss_recovery = 0; - bbr->packet_conservation = 0; + bbr->round_count_at_recovery = UINT64_MAX; bbr_save_cwnd(bbr, cstat); cstat->cwnd = cstat->bytes_in_flight + cstat->max_tx_udp_payload_size; - cstat->cwnd = - ngtcp2_max(cstat->cwnd, min_pipe_cwnd(cstat->max_tx_udp_payload_size)); + cstat->cwnd = ngtcp2_max_uint64( + cstat->cwnd, min_pipe_cwnd(cstat->max_tx_udp_payload_size)); } static void bbr_cc_on_ack_recv(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { ngtcp2_cc_bbr *bbr = ngtcp2_struct_of(cc, ngtcp2_cc_bbr, cc); - bbr_handle_recovery(bbr, cstat, ack, ts); + bbr_handle_recovery(bbr, cstat, ack); bbr_update_on_ack(bbr, cstat, ack, ts); } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.h index 0017be35010e66..f266ec5d71e4e4 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -95,9 +95,10 @@ typedef struct ngtcp2_cc_bbr { uint64_t round_count; /* Full pipe */ - int filled_pipe; uint64_t full_bw; size_t full_bw_count; + int full_bw_reached; + int full_bw_now; /* Pacing rate */ uint64_t pacing_gain_h; @@ -123,19 +124,13 @@ typedef struct ngtcp2_cc_bbr { size_t bw_probe_up_rounds; uint64_t bw_probe_up_acks; uint64_t inflight_hi; - uint64_t bw_hi; int probe_rtt_expired; ngtcp2_duration probe_rtt_min_delay; ngtcp2_tstamp probe_rtt_min_stamp; int in_loss_recovery; - int packet_conservation; + uint64_t round_count_at_recovery; uint64_t max_inflight; ngtcp2_tstamp congestion_recovery_start_ts; - uint64_t congestion_recovery_next_round_delivered; - - uint64_t prior_inflight_lo; - uint64_t prior_inflight_hi; - uint64_t prior_bw_lo; } ngtcp2_cc_bbr; void ngtcp2_cc_bbr_init(ngtcp2_cc_bbr *bbr, ngtcp2_log *log, @@ -143,4 +138,4 @@ void ngtcp2_cc_bbr_init(ngtcp2_cc_bbr *bbr, ngtcp2_log *log, ngtcp2_tstamp initial_ts, ngtcp2_rand rand, const ngtcp2_rand_ctx *rand_ctx); -#endif /* NGTCP2_BBR_H */ +#endif /* !defined(NGTCP2_BBR_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_buf.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_buf.h index 85b5f4ddf0464a..e87adb119916ca 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_buf.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_buf.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -105,4 +105,4 @@ int ngtcp2_buf_chain_new(ngtcp2_buf_chain **pbufchain, size_t len, */ void ngtcp2_buf_chain_del(ngtcp2_buf_chain *bufchain, const ngtcp2_mem *mem); -#endif /* NGTCP2_BUF_H */ +#endif /* !defined(NGTCP2_BUF_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c index 9ad37fbdb6395a..1ff59f315c5b66 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c @@ -32,16 +32,13 @@ #include "ngtcp2_mem.h" #include "ngtcp2_rcvry.h" #include "ngtcp2_conn_stat.h" +#include "ngtcp2_rst.h" #include "ngtcp2_unreachable.h" -/* NGTCP2_CC_DELIVERY_RATE_SEC_FILTERLEN is the window length of - delivery rate filter driven by ACK clocking. */ -#define NGTCP2_CC_DELIVERY_RATE_SEC_FILTERLEN 10 - uint64_t ngtcp2_cc_compute_initcwnd(size_t max_udp_payload_size) { uint64_t n = 2 * max_udp_payload_size; - n = ngtcp2_max(n, 14720); - return ngtcp2_min(10 * max_udp_payload_size, n); + n = ngtcp2_max_uint64(n, 14720); + return ngtcp2_min_uint64(10 * max_udp_payload_size, n); } ngtcp2_cc_pkt *ngtcp2_cc_pkt_init(ngtcp2_cc_pkt *pkt, int64_t pkt_num, @@ -59,13 +56,7 @@ ngtcp2_cc_pkt *ngtcp2_cc_pkt_init(ngtcp2_cc_pkt *pkt, int64_t pkt_num, return pkt; } -static void reno_cc_reset(ngtcp2_cc_reno *reno) { - ngtcp2_window_filter_init(&reno->delivery_rate_sec_filter, - NGTCP2_CC_DELIVERY_RATE_SEC_FILTERLEN); - reno->ack_count = 0; - reno->target_cwnd = 0; - reno->pending_add = 0; -} +static void reno_cc_reset(ngtcp2_cc_reno *reno) { reno->pending_add = 0; } void ngtcp2_cc_reno_init(ngtcp2_cc_reno *reno, ngtcp2_log *log) { memset(reno, 0, sizeof(*reno)); @@ -74,8 +65,7 @@ void ngtcp2_cc_reno_init(ngtcp2_cc_reno *reno, ngtcp2_log *log) { reno->cc.on_pkt_acked = ngtcp2_cc_reno_cc_on_pkt_acked; reno->cc.congestion_event = ngtcp2_cc_reno_cc_congestion_event; reno->cc.on_persistent_congestion = - ngtcp2_cc_reno_cc_on_persistent_congestion; - reno->cc.on_ack_recv = ngtcp2_cc_reno_cc_on_ack_recv; + ngtcp2_cc_reno_cc_on_persistent_congestion; reno->cc.reset = ngtcp2_cc_reno_cc_reset; reno_cc_reset(reno); @@ -94,11 +84,7 @@ void ngtcp2_cc_reno_cc_on_pkt_acked(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, uint64_t m; (void)ts; - if (in_congestion_recovery(cstat, pkt->sent_ts)) { - return; - } - - if (reno->target_cwnd && reno->target_cwnd < cstat->cwnd) { + if (in_congestion_recovery(cstat, pkt->sent_ts) || pkt->is_app_limited) { return; } @@ -118,9 +104,10 @@ void ngtcp2_cc_reno_cc_on_pkt_acked(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, void ngtcp2_cc_reno_cc_congestion_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp sent_ts, - ngtcp2_tstamp ts) { + uint64_t bytes_lost, ngtcp2_tstamp ts) { ngtcp2_cc_reno *reno = ngtcp2_struct_of(cc, ngtcp2_cc_reno, cc); uint64_t min_cwnd; + (void)bytes_lost; if (in_congestion_recovery(cstat, sent_ts)) { return; @@ -129,7 +116,7 @@ void ngtcp2_cc_reno_cc_congestion_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, cstat->congestion_recovery_start_ts = ts; cstat->cwnd >>= NGTCP2_LOSS_REDUCTION_FACTOR_BITS; min_cwnd = 2 * cstat->max_tx_udp_payload_size; - cstat->cwnd = ngtcp2_max(cstat->cwnd, min_cwnd); + cstat->cwnd = ngtcp2_max_uint64(cstat->cwnd, min_cwnd); cstat->ssthresh = cstat->cwnd; reno->pending_add = 0; @@ -149,35 +136,6 @@ void ngtcp2_cc_reno_cc_on_persistent_congestion(ngtcp2_cc *cc, cstat->congestion_recovery_start_ts = UINT64_MAX; } -void ngtcp2_cc_reno_cc_on_ack_recv(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { - ngtcp2_cc_reno *reno = ngtcp2_struct_of(cc, ngtcp2_cc_reno, cc); - uint64_t target_cwnd, initcwnd; - uint64_t max_delivery_rate_sec; - (void)ack; - (void)ts; - - ++reno->ack_count; - - ngtcp2_window_filter_update(&reno->delivery_rate_sec_filter, - cstat->delivery_rate_sec, reno->ack_count); - - max_delivery_rate_sec = - ngtcp2_window_filter_get_best(&reno->delivery_rate_sec_filter); - - if (cstat->min_rtt != UINT64_MAX && max_delivery_rate_sec) { - target_cwnd = max_delivery_rate_sec * cstat->smoothed_rtt / NGTCP2_SECONDS; - initcwnd = ngtcp2_cc_compute_initcwnd(cstat->max_tx_udp_payload_size); - reno->target_cwnd = ngtcp2_max(initcwnd, target_cwnd) * 289 / 100; - - ngtcp2_log_info(reno->cc.log, NGTCP2_LOG_EVENT_CCA, - "target_cwnd=%" PRIu64 " max_delivery_rate_sec=%" PRIu64 - " smoothed_rtt=%" PRIu64, - reno->target_cwnd, max_delivery_rate_sec, - cstat->smoothed_rtt); - } -} - void ngtcp2_cc_reno_cc_reset(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts) { ngtcp2_cc_reno *reno = ngtcp2_struct_of(cc, ngtcp2_cc_reno, cc); @@ -187,45 +145,49 @@ void ngtcp2_cc_reno_cc_reset(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, reno_cc_reset(reno); } +static void cubic_vars_reset(ngtcp2_cubic_vars *v) { + v->cwnd_prior = 0; + v->w_max = 0; + v->k = 0; + v->epoch_start = UINT64_MAX; + v->w_est = 0; + + v->state = NGTCP2_CUBIC_STATE_INITIAL; + v->app_limited_start_ts = UINT64_MAX; + v->app_limited_duration = 0; + v->pending_bytes_delivered = 0; + v->pending_est_bytes_delivered = 0; +} + static void cubic_cc_reset(ngtcp2_cc_cubic *cubic) { - ngtcp2_window_filter_init(&cubic->delivery_rate_sec_filter, - NGTCP2_CC_DELIVERY_RATE_SEC_FILTERLEN); - cubic->ack_count = 0; - cubic->target_cwnd = 0; - cubic->w_last_max = 0; - cubic->w_tcp = 0; - cubic->origin_point = 0; - cubic->epoch_start = UINT64_MAX; - cubic->k = 0; - - cubic->prior.cwnd = 0; - cubic->prior.ssthresh = 0; - cubic->prior.w_last_max = 0; - cubic->prior.w_tcp = 0; - cubic->prior.origin_point = 0; - cubic->prior.epoch_start = UINT64_MAX; - cubic->prior.k = 0; - - cubic->rtt_sample_count = 0; - cubic->current_round_min_rtt = UINT64_MAX; - cubic->last_round_min_rtt = UINT64_MAX; - cubic->window_end = -1; + cubic_vars_reset(&cubic->current); + cubic_vars_reset(&cubic->undo.v); + cubic->undo.cwnd = 0; + cubic->undo.ssthresh = 0; + + cubic->hs.current_round_min_rtt = UINT64_MAX; + cubic->hs.last_round_min_rtt = UINT64_MAX; + cubic->hs.curr_rtt = UINT64_MAX; + cubic->hs.rtt_sample_count = 0; + cubic->hs.css_baseline_min_rtt = UINT64_MAX; + cubic->hs.css_round = 0; + + cubic->next_round_delivered = 0; } -void ngtcp2_cc_cubic_init(ngtcp2_cc_cubic *cubic, ngtcp2_log *log) { +void ngtcp2_cc_cubic_init(ngtcp2_cc_cubic *cubic, ngtcp2_log *log, + ngtcp2_rst *rst) { memset(cubic, 0, sizeof(*cubic)); cubic->cc.log = log; - cubic->cc.on_pkt_acked = ngtcp2_cc_cubic_cc_on_pkt_acked; + cubic->cc.on_ack_recv = ngtcp2_cc_cubic_cc_on_ack_recv; cubic->cc.congestion_event = ngtcp2_cc_cubic_cc_congestion_event; cubic->cc.on_spurious_congestion = ngtcp2_cc_cubic_cc_on_spurious_congestion; cubic->cc.on_persistent_congestion = - ngtcp2_cc_cubic_cc_on_persistent_congestion; - cubic->cc.on_ack_recv = ngtcp2_cc_cubic_cc_on_ack_recv; - cubic->cc.on_pkt_sent = ngtcp2_cc_cubic_cc_on_pkt_sent; - cubic->cc.new_rtt_sample = ngtcp2_cc_cubic_cc_new_rtt_sample; + ngtcp2_cc_cubic_cc_on_persistent_congestion; cubic->cc.reset = ngtcp2_cc_cubic_cc_reset; - cubic->cc.event = ngtcp2_cc_cubic_cc_event; + + cubic->rst = rst; cubic_cc_reset(cubic); } @@ -254,191 +216,250 @@ uint64_t ngtcp2_cbrt(uint64_t n) { return y; } -/* HyStart++ constants */ -#define NGTCP2_HS_MIN_SSTHRESH 16 +/* RFC 9406 HyStart++ constants */ +#define NGTCP2_HS_MIN_RTT_THRESH (4 * NGTCP2_MILLISECONDS) +#define NGTCP2_HS_MAX_RTT_THRESH (16 * NGTCP2_MILLISECONDS) +#define NGTCP2_HS_MIN_RTT_DIVISOR 8 #define NGTCP2_HS_N_RTT_SAMPLE 8 -#define NGTCP2_HS_MIN_ETA (4 * NGTCP2_MILLISECONDS) -#define NGTCP2_HS_MAX_ETA (16 * NGTCP2_MILLISECONDS) +#define NGTCP2_HS_CSS_GROWTH_DIVISOR 4 +#define NGTCP2_HS_CSS_ROUNDS 5 + +static uint64_t cubic_cc_compute_w_cubic(ngtcp2_cc_cubic *cubic, + const ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { + ngtcp2_duration t = ts - cubic->current.epoch_start; + uint64_t delta; + uint64_t tx = (t << 10) / NGTCP2_SECONDS; + uint64_t kx = (cubic->current.k << 10) / NGTCP2_SECONDS; + uint64_t time_delta; + + if (tx < kx) { + return UINT64_MAX; + } + + time_delta = tx - kx; + + delta = cstat->max_tx_udp_payload_size * + ((((time_delta * time_delta) >> 10) * time_delta) >> 10) * 4 / 10; -void ngtcp2_cc_cubic_cc_on_pkt_acked(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_pkt *pkt, - ngtcp2_tstamp ts) { + return cubic->current.w_max + (delta >> 10); +} + +void ngtcp2_cc_cubic_cc_on_ack_recv(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack, + ngtcp2_tstamp ts) { ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); - ngtcp2_duration t, eta; - uint64_t target, cwnd_thres; - uint64_t tx, kx, time_delta, delta; - uint64_t add, tcp_add; - uint64_t m; + uint64_t w_cubic, w_cubic_next, target, m; + ngtcp2_duration rtt_thresh; + int round_start; - if (pkt->pktns_id == NGTCP2_PKTNS_ID_APPLICATION && cubic->window_end != -1 && - cubic->window_end <= pkt->pkt_num) { - cubic->window_end = -1; + if (in_congestion_recovery(cstat, ack->largest_pkt_sent_ts)) { + return; } - if (in_congestion_recovery(cstat, pkt->sent_ts)) { + if (cubic->current.state == NGTCP2_CUBIC_STATE_CONGESTION_AVOIDANCE) { + if (cubic->rst->rs.is_app_limited && !cubic->rst->is_cwnd_limited) { + if (cubic->current.app_limited_start_ts == UINT64_MAX) { + cubic->current.app_limited_start_ts = ts; + } + + return; + } + + if (cubic->current.app_limited_start_ts != UINT64_MAX) { + cubic->current.app_limited_duration += + ts - cubic->current.app_limited_start_ts; + cubic->current.app_limited_start_ts = UINT64_MAX; + } + } else if (cubic->rst->rs.is_app_limited && !cubic->rst->is_cwnd_limited) { return; } + round_start = ack->pkt_delivered >= cubic->next_round_delivered; + if (round_start) { + cubic->next_round_delivered = cubic->rst->delivered; + + cubic->rst->is_cwnd_limited = 0; + } + if (cstat->cwnd < cstat->ssthresh) { /* slow-start */ - if (cubic->target_cwnd == 0 || cubic->target_cwnd > cstat->cwnd) { - cstat->cwnd += pkt->pktlen; + if (cubic->hs.css_round) { + cstat->cwnd += ack->bytes_delivered / NGTCP2_HS_CSS_GROWTH_DIVISOR; + } else { + cstat->cwnd += ack->bytes_delivered; } ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, - "pkn=%" PRId64 " acked, slow start cwnd=%" PRIu64, - pkt->pkt_num, cstat->cwnd); + "%" PRIu64 " bytes acked, slow start cwnd=%" PRIu64, + ack->bytes_delivered, cstat->cwnd); + + if (round_start) { + cubic->hs.last_round_min_rtt = cubic->hs.current_round_min_rtt; + cubic->hs.current_round_min_rtt = UINT64_MAX; + cubic->hs.rtt_sample_count = 0; - if (cubic->last_round_min_rtt != UINT64_MAX && - cubic->current_round_min_rtt != UINT64_MAX && - cstat->cwnd >= - NGTCP2_HS_MIN_SSTHRESH * cstat->max_tx_udp_payload_size && - cubic->rtt_sample_count >= NGTCP2_HS_N_RTT_SAMPLE) { - eta = cubic->last_round_min_rtt / 8; - - if (eta < NGTCP2_HS_MIN_ETA) { - eta = NGTCP2_HS_MIN_ETA; - } else if (eta > NGTCP2_HS_MAX_ETA) { - eta = NGTCP2_HS_MAX_ETA; + if (cubic->hs.css_round) { + ++cubic->hs.css_round; } + } + + cubic->hs.current_round_min_rtt = + ngtcp2_min_uint64(cubic->hs.current_round_min_rtt, ack->rtt); + ++cubic->hs.rtt_sample_count; - if (cubic->current_round_min_rtt >= cubic->last_round_min_rtt + eta) { + if (cubic->hs.css_round) { + if (cubic->hs.current_round_min_rtt < cubic->hs.css_baseline_min_rtt) { + cubic->hs.css_baseline_min_rtt = UINT64_MAX; + cubic->hs.css_round = 0; + return; + } + + if (cubic->hs.css_round >= NGTCP2_HS_CSS_ROUNDS) { ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, "HyStart++ exit slow start"); - cubic->w_last_max = cstat->cwnd; cstat->ssthresh = cstat->cwnd; } - } - - return; - } - /* congestion avoidance */ - - if (cubic->epoch_start == UINT64_MAX) { - cubic->epoch_start = ts; - if (cstat->cwnd < cubic->w_last_max) { - cubic->k = ngtcp2_cbrt((cubic->w_last_max - cstat->cwnd) * 10 / 4 / - cstat->max_tx_udp_payload_size); - cubic->origin_point = cubic->w_last_max; - } else { - cubic->k = 0; - cubic->origin_point = cstat->cwnd; + return; } - cubic->w_tcp = cstat->cwnd; - - ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, - "cubic-ca epoch_start=%" PRIu64 " k=%" PRIu64 - " origin_point=%" PRIu64, - cubic->epoch_start, cubic->k, cubic->origin_point); + if (cubic->hs.rtt_sample_count >= NGTCP2_HS_N_RTT_SAMPLE && + cubic->hs.current_round_min_rtt != UINT64_MAX && + cubic->hs.last_round_min_rtt != UINT64_MAX) { + rtt_thresh = + ngtcp2_max_uint64(NGTCP2_HS_MIN_RTT_THRESH, + ngtcp2_min_uint64(cubic->hs.last_round_min_rtt / + NGTCP2_HS_MIN_RTT_DIVISOR, + NGTCP2_HS_MAX_RTT_THRESH)); + + if (cubic->hs.current_round_min_rtt >= + cubic->hs.last_round_min_rtt + rtt_thresh) { + cubic->hs.css_baseline_min_rtt = cubic->hs.current_round_min_rtt; + cubic->hs.css_round = 1; + } + } - cubic->pending_add = 0; - cubic->pending_w_add = 0; + return; } - t = ts - cubic->epoch_start; - - tx = (t << 10) / NGTCP2_SECONDS; - kx = (cubic->k << 10); + /* congestion avoidance */ - if (tx > kx) { - time_delta = tx - kx; - } else { - time_delta = kx - tx; + switch (cubic->current.state) { + case NGTCP2_CUBIC_STATE_INITIAL: + m = cstat->max_tx_udp_payload_size * ack->bytes_delivered + + cubic->current.pending_bytes_delivered; + cstat->cwnd += m / cstat->cwnd; + cubic->current.pending_bytes_delivered = m % cstat->cwnd; + return; + case NGTCP2_CUBIC_STATE_RECOVERY: + cubic->current.state = NGTCP2_CUBIC_STATE_CONGESTION_AVOIDANCE; + cubic->current.epoch_start = ts; + break; + default: + break; } - delta = cstat->max_tx_udp_payload_size * - ((((time_delta * time_delta) >> 10) * time_delta) >> 10) * 4 / 10; - delta >>= 10; - - if (tx > kx) { - target = cubic->origin_point + delta; - } else { - target = cubic->origin_point - delta; - } + w_cubic = cubic_cc_compute_w_cubic(cubic, cstat, + ts - cubic->current.app_limited_duration); + w_cubic_next = cubic_cc_compute_w_cubic( + cubic, cstat, + ts - cubic->current.app_limited_duration + cstat->smoothed_rtt); - cwnd_thres = - (target * (((t + cstat->smoothed_rtt) << 10) / NGTCP2_SECONDS)) >> 10; - if (cwnd_thres < cstat->cwnd) { + if (w_cubic_next == UINT64_MAX || w_cubic_next < cstat->cwnd) { target = cstat->cwnd; - } else if (2 * cwnd_thres > 3 * cstat->cwnd) { + } else if (2 * w_cubic_next > 3 * cstat->cwnd) { target = cstat->cwnd * 3 / 2; } else { - target = cwnd_thres; + target = w_cubic_next; } - if (target > cstat->cwnd) { - m = cubic->pending_add + - cstat->max_tx_udp_payload_size * (target - cstat->cwnd); - add = m / cstat->cwnd; - cubic->pending_add = m % cstat->cwnd; - } else { - m = cubic->pending_add + cstat->max_tx_udp_payload_size; - add = m / (100 * cstat->cwnd); - cubic->pending_add = m % (100 * cstat->cwnd); - } - - m = cubic->pending_w_add + cstat->max_tx_udp_payload_size * pkt->pktlen; - - cubic->w_tcp += m / cstat->cwnd; - cubic->pending_w_add = m % cstat->cwnd; + m = ack->bytes_delivered * cstat->max_tx_udp_payload_size + + cubic->current.pending_est_bytes_delivered; + cubic->current.pending_est_bytes_delivered = m % cstat->cwnd; - if (cubic->w_tcp > cstat->cwnd) { - tcp_add = cstat->max_tx_udp_payload_size * (cubic->w_tcp - cstat->cwnd) / - cstat->cwnd; - if (tcp_add > add) { - add = tcp_add; - } + if (cubic->current.w_est < cubic->current.cwnd_prior) { + cubic->current.w_est += m * 9 / 17 / cstat->cwnd; + } else { + cubic->current.w_est += m / cstat->cwnd; } - if (cubic->target_cwnd == 0 || cubic->target_cwnd > cstat->cwnd) { - cstat->cwnd += add; + if (w_cubic == UINT64_MAX || cubic->current.w_est > w_cubic) { + cstat->cwnd = cubic->current.w_est; + } else { + m = (target - cstat->cwnd) * cstat->max_tx_udp_payload_size + + cubic->current.pending_bytes_delivered; + cstat->cwnd += m / cstat->cwnd; + cubic->current.pending_bytes_delivered = m % cstat->cwnd; } ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, - "pkn=%" PRId64 " acked, cubic-ca cwnd=%" PRIu64 " t=%" PRIu64 - " k=%" PRIi64 " time_delta=%" PRIu64 " delta=%" PRIu64 - " target=%" PRIu64 " w_tcp=%" PRIu64, - pkt->pkt_num, cstat->cwnd, t, cubic->k, time_delta >> 4, - delta, target, cubic->w_tcp); + "%" PRIu64 " bytes acked, cubic-ca cwnd=%" PRIu64 + " k=%" PRIi64 " target=%" PRIu64 " w_est=%" PRIu64, + ack->bytes_delivered, cstat->cwnd, cubic->current.k, target, + cubic->current.w_est); } void ngtcp2_cc_cubic_cc_congestion_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp sent_ts, + uint64_t bytes_lost, ngtcp2_tstamp ts) { ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); - uint64_t min_cwnd; + uint64_t flight_size; if (in_congestion_recovery(cstat, sent_ts)) { return; } - if (cubic->prior.cwnd < cstat->cwnd) { - cubic->prior.cwnd = cstat->cwnd; - cubic->prior.ssthresh = cstat->ssthresh; - cubic->prior.w_last_max = cubic->w_last_max; - cubic->prior.w_tcp = cubic->w_tcp; - cubic->prior.origin_point = cubic->origin_point; - cubic->prior.epoch_start = cubic->epoch_start; - cubic->prior.k = cubic->k; + if (cubic->undo.cwnd < cstat->cwnd) { + cubic->undo.v = cubic->current; + cubic->undo.cwnd = cstat->cwnd; + cubic->undo.ssthresh = cstat->ssthresh; } cstat->congestion_recovery_start_ts = ts; - cubic->epoch_start = UINT64_MAX; - if (cstat->cwnd < cubic->w_last_max) { - cubic->w_last_max = cstat->cwnd * 17 / 10 / 2; + cubic->current.state = NGTCP2_CUBIC_STATE_RECOVERY; + cubic->current.epoch_start = UINT64_MAX; + cubic->current.app_limited_start_ts = UINT64_MAX; + cubic->current.app_limited_duration = 0; + cubic->current.pending_bytes_delivered = 0; + cubic->current.pending_est_bytes_delivered = 0; + + if (cstat->cwnd < cubic->current.w_max) { + cubic->current.w_max = cstat->cwnd * 17 / 20; } else { - cubic->w_last_max = cstat->cwnd; + cubic->current.w_max = cstat->cwnd; } - min_cwnd = 2 * cstat->max_tx_udp_payload_size; cstat->ssthresh = cstat->cwnd * 7 / 10; - cstat->ssthresh = ngtcp2_max(cstat->ssthresh, min_cwnd); + + if (cubic->rst->rs.delivered * 2 < cstat->cwnd) { + flight_size = cstat->bytes_in_flight + bytes_lost; + cstat->ssthresh = ngtcp2_min_uint64( + cstat->ssthresh, + ngtcp2_max_uint64(cubic->rst->rs.delivered, flight_size) * 7 / 10); + } + + cstat->ssthresh = + ngtcp2_max_uint64(cstat->ssthresh, 2 * cstat->max_tx_udp_payload_size); + + cubic->current.cwnd_prior = cstat->cwnd; cstat->cwnd = cstat->ssthresh; + cubic->current.w_est = cstat->cwnd; + + if (cstat->cwnd < cubic->current.w_max) { + cubic->current.k = + ngtcp2_cbrt(((cubic->current.w_max - cstat->cwnd) << 10) * 10 / 4 / + cstat->max_tx_udp_payload_size) * + NGTCP2_SECONDS; + cubic->current.k >>= 10; + } else { + cubic->current.k = 0; + } + ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, "reduce cwnd because of packet loss cwnd=%" PRIu64, cstat->cwnd); @@ -450,101 +471,34 @@ void ngtcp2_cc_cubic_cc_on_spurious_congestion(ngtcp2_cc *cc, ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); (void)ts; - if (cstat->cwnd >= cubic->prior.cwnd) { - return; - } - - cstat->congestion_recovery_start_ts = UINT64_MAX; - - cstat->cwnd = cubic->prior.cwnd; - cstat->ssthresh = cubic->prior.ssthresh; - cubic->w_last_max = cubic->prior.w_last_max; - cubic->w_tcp = cubic->prior.w_tcp; - cubic->origin_point = cubic->prior.origin_point; - cubic->epoch_start = cubic->prior.epoch_start; - cubic->k = cubic->prior.k; - - cubic->prior.cwnd = 0; - cubic->prior.ssthresh = 0; - cubic->prior.w_last_max = 0; - cubic->prior.w_tcp = 0; - cubic->prior.origin_point = 0; - cubic->prior.epoch_start = UINT64_MAX; - cubic->prior.k = 0; - - ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, - "spurious congestion is detected and congestion state is " - "restored cwnd=%" PRIu64, - cstat->cwnd); -} - -void ngtcp2_cc_cubic_cc_on_persistent_congestion(ngtcp2_cc *cc, - ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - (void)cc; - (void)ts; - - cstat->cwnd = 2 * cstat->max_tx_udp_payload_size; cstat->congestion_recovery_start_ts = UINT64_MAX; -} - -void ngtcp2_cc_cubic_cc_on_ack_recv(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, - ngtcp2_tstamp ts) { - ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); - uint64_t target_cwnd, initcwnd; - uint64_t max_delivery_rate_sec; - (void)ack; - (void)ts; - - ++cubic->ack_count; - - ngtcp2_window_filter_update(&cubic->delivery_rate_sec_filter, - cstat->delivery_rate_sec, cubic->ack_count); - max_delivery_rate_sec = - ngtcp2_window_filter_get_best(&cubic->delivery_rate_sec_filter); - - if (cstat->min_rtt != UINT64_MAX && max_delivery_rate_sec) { - target_cwnd = max_delivery_rate_sec * cstat->smoothed_rtt / NGTCP2_SECONDS; - initcwnd = ngtcp2_cc_compute_initcwnd(cstat->max_tx_udp_payload_size); - cubic->target_cwnd = ngtcp2_max(initcwnd, target_cwnd) * 289 / 100; + if (cstat->cwnd < cubic->undo.cwnd) { + cubic->current = cubic->undo.v; + cstat->cwnd = cubic->undo.cwnd; + cstat->ssthresh = cubic->undo.ssthresh; ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, - "target_cwnd=%" PRIu64 " max_delivery_rate_sec=%" PRIu64 - " smoothed_rtt=%" PRIu64, - cubic->target_cwnd, max_delivery_rate_sec, - cstat->smoothed_rtt); + "spurious congestion is detected and congestion state is " + "restored cwnd=%" PRIu64, + cstat->cwnd); } -} - -void ngtcp2_cc_cubic_cc_on_pkt_sent(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_pkt *pkt) { - ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); - (void)cstat; - if (pkt->pktns_id != NGTCP2_PKTNS_ID_APPLICATION || cubic->window_end != -1) { - return; - } - - cubic->window_end = pkt->pkt_num; - cubic->last_round_min_rtt = cubic->current_round_min_rtt; - cubic->current_round_min_rtt = UINT64_MAX; - cubic->rtt_sample_count = 0; + cubic_vars_reset(&cubic->undo.v); + cubic->undo.cwnd = 0; + cubic->undo.ssthresh = 0; } -void ngtcp2_cc_cubic_cc_new_rtt_sample(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { +void ngtcp2_cc_cubic_cc_on_persistent_congestion(ngtcp2_cc *cc, + ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); (void)ts; - if (cubic->window_end == -1) { - return; - } + cubic_cc_reset(cubic); - cubic->current_round_min_rtt = - ngtcp2_min(cubic->current_round_min_rtt, cstat->latest_rtt); - ++cubic->rtt_sample_count; + cstat->cwnd = 2 * cstat->max_tx_udp_payload_size; + cstat->congestion_recovery_start_ts = UINT64_MAX; } void ngtcp2_cc_cubic_cc_reset(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, @@ -555,23 +509,3 @@ void ngtcp2_cc_cubic_cc_reset(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, cubic_cc_reset(cubic); } - -void ngtcp2_cc_cubic_cc_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_cc_event_type event, ngtcp2_tstamp ts) { - ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); - ngtcp2_tstamp last_ts; - - if (event != NGTCP2_CC_EVENT_TYPE_TX_START || - cubic->epoch_start == UINT64_MAX) { - return; - } - - last_ts = cstat->last_tx_pkt_ts[NGTCP2_PKTNS_ID_APPLICATION]; - if (last_ts == UINT64_MAX || last_ts <= cubic->epoch_start) { - return; - } - - assert(ts >= last_ts); - - cubic->epoch_start += ts - last_ts; -} diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.h index 524bcdb7e4bf86..e3c363a51bb85a 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.h @@ -27,18 +27,18 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include #include "ngtcp2_pktns_id.h" -#include "ngtcp2_window_filter.h" #define NGTCP2_LOSS_REDUCTION_FACTOR_BITS 1 #define NGTCP2_PERSISTENT_CONGESTION_THRESHOLD 3 typedef struct ngtcp2_log ngtcp2_log; typedef struct ngtcp2_conn_stat ngtcp2_conn_stat; +typedef struct ngtcp2_rst ngtcp2_rst; /** * @struct @@ -144,10 +144,12 @@ typedef void (*ngtcp2_cc_on_pkt_lost)(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, * * :type:`ngtcp2_cc_congestion_event` is a callback function which is * called when congestion event happens (e.g., when packet is lost). + * |bytes_lost| is the number of bytes lost in this congestion event. */ typedef void (*ngtcp2_cc_congestion_event)(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp sent_ts, + uint64_t bytes_lost, ngtcp2_tstamp ts); /** @@ -305,9 +307,6 @@ ngtcp2_cc_pkt *ngtcp2_cc_pkt_init(ngtcp2_cc_pkt *pkt, int64_t pkt_num, /* ngtcp2_cc_reno is the RENO congestion controller. */ typedef struct ngtcp2_cc_reno { ngtcp2_cc cc; - ngtcp2_window_filter delivery_rate_sec_filter; - uint64_t ack_count; - uint64_t target_cwnd; uint64_t pending_add; } ngtcp2_cc_reno; @@ -318,59 +317,81 @@ void ngtcp2_cc_reno_cc_on_pkt_acked(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, void ngtcp2_cc_reno_cc_congestion_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp sent_ts, - ngtcp2_tstamp ts); + uint64_t bytes_lost, ngtcp2_tstamp ts); void ngtcp2_cc_reno_cc_on_persistent_congestion(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts); -void ngtcp2_cc_reno_cc_on_ack_recv(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); - void ngtcp2_cc_reno_cc_reset(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts); +typedef enum ngtcp2_cubic_state { + /* NGTCP2_CUBIC_STATE_INITIAL is the state where CUBIC is in slow + start phase, or congestion avoidance phase before congestion + events occur. */ + NGTCP2_CUBIC_STATE_INITIAL, + /* NGTCP2_CUBIC_STATE_RECOVERY is the state that a connection is in + recovery period. */ + NGTCP2_CUBIC_STATE_RECOVERY, + /* NGTCP2_CUBIC_STATE_CONGESTION_AVOIDANCE is the state where CUBIC + is in congestion avoidance phase after recovery period ends. */ + NGTCP2_CUBIC_STATE_CONGESTION_AVOIDANCE, +} ngtcp2_cubic_state; + +typedef struct ngtcp2_cubic_vars { + uint64_t cwnd_prior; + uint64_t w_max; + ngtcp2_duration k; + ngtcp2_tstamp epoch_start; + uint64_t w_est; + + ngtcp2_cubic_state state; + /* app_limited_start_ts is the timestamp where app limited period + started. */ + ngtcp2_tstamp app_limited_start_ts; + /* app_limited_duration is the cumulative duration where a + connection is under app limited when ACK is received. */ + ngtcp2_duration app_limited_duration; + uint64_t pending_bytes_delivered; + uint64_t pending_est_bytes_delivered; +} ngtcp2_cubic_vars; + /* ngtcp2_cc_cubic is CUBIC congestion controller. */ typedef struct ngtcp2_cc_cubic { ngtcp2_cc cc; - ngtcp2_window_filter delivery_rate_sec_filter; - uint64_t ack_count; - uint64_t target_cwnd; - uint64_t w_last_max; - uint64_t w_tcp; - uint64_t origin_point; - ngtcp2_tstamp epoch_start; - uint64_t k; - /* prior stores the congestion state when a congestion event occurs + ngtcp2_rst *rst; + /* current is a set of variables that are currently in effect. */ + ngtcp2_cubic_vars current; + /* undo stores the congestion state when a congestion event occurs in order to restore the state when it turns out that the event is spurious. */ struct { + ngtcp2_cubic_vars v; uint64_t cwnd; uint64_t ssthresh; - uint64_t w_last_max; - uint64_t w_tcp; - uint64_t origin_point; - ngtcp2_tstamp epoch_start; - uint64_t k; - } prior; + } undo; /* HyStart++ variables */ - size_t rtt_sample_count; - uint64_t current_round_min_rtt; - uint64_t last_round_min_rtt; - int64_t window_end; - uint64_t pending_add; - uint64_t pending_w_add; + struct { + ngtcp2_duration current_round_min_rtt; + ngtcp2_duration last_round_min_rtt; + ngtcp2_duration curr_rtt; + size_t rtt_sample_count; + ngtcp2_duration css_baseline_min_rtt; + size_t css_round; + } hs; + uint64_t next_round_delivered; } ngtcp2_cc_cubic; -void ngtcp2_cc_cubic_init(ngtcp2_cc_cubic *cc, ngtcp2_log *log); +void ngtcp2_cc_cubic_init(ngtcp2_cc_cubic *cc, ngtcp2_log *log, + ngtcp2_rst *rst); -void ngtcp2_cc_cubic_cc_on_pkt_acked(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_pkt *pkt, - ngtcp2_tstamp ts); +void ngtcp2_cc_cubic_cc_on_ack_recv(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); void ngtcp2_cc_cubic_cc_congestion_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp sent_ts, - ngtcp2_tstamp ts); + uint64_t bytes_lost, ngtcp2_tstamp ts); void ngtcp2_cc_cubic_cc_on_spurious_congestion(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, @@ -380,21 +401,9 @@ void ngtcp2_cc_cubic_cc_on_persistent_congestion(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts); -void ngtcp2_cc_cubic_cc_on_ack_recv(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); - -void ngtcp2_cc_cubic_cc_on_pkt_sent(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_pkt *pkt); - -void ngtcp2_cc_cubic_cc_new_rtt_sample(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); - void ngtcp2_cc_cubic_cc_reset(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts); -void ngtcp2_cc_cubic_cc_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_cc_event_type event, ngtcp2_tstamp ts); - uint64_t ngtcp2_cbrt(uint64_t n); -#endif /* NGTCP2_CC_H */ +#endif /* !defined(NGTCP2_CC_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cid.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cid.c index f3b92b569ec928..181850cfcbc87a 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cid.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cid.c @@ -36,6 +36,7 @@ void ngtcp2_cid_init(ngtcp2_cid *cid, const uint8_t *data, size_t datalen) { assert(datalen <= NGTCP2_MAX_CIDLEN); cid->datalen = datalen; + if (datalen) { ngtcp2_cpymem(cid->data, data, datalen); } @@ -74,12 +75,14 @@ void ngtcp2_dcid_init(ngtcp2_dcid *dcid, uint64_t seq, const ngtcp2_cid *cid, const uint8_t *token) { dcid->seq = seq; dcid->cid = *cid; + if (token) { memcpy(dcid->token, token, NGTCP2_STATELESS_RESET_TOKENLEN); dcid->flags = NGTCP2_DCID_FLAG_TOKEN_PRESENT; } else { dcid->flags = NGTCP2_DCID_FLAG_NONE; } + ngtcp2_path_storage_zero(&dcid->ps); dcid->retired_ts = UINT64_MAX; dcid->bound_ts = UINT64_MAX; @@ -115,6 +118,7 @@ void ngtcp2_dcid_copy(ngtcp2_dcid *dest, const ngtcp2_dcid *src) { void ngtcp2_dcid_copy_cid_token(ngtcp2_dcid *dest, const ngtcp2_dcid *src) { dest->seq = src->seq; dest->cid = src->cid; + if (src->flags & NGTCP2_DCID_FLAG_TOKEN_PRESENT) { dest->flags |= NGTCP2_DCID_FLAG_TOKEN_PRESENT; memcpy(dest->token, src->token, NGTCP2_STATELESS_RESET_TOKENLEN); @@ -123,15 +127,14 @@ void ngtcp2_dcid_copy_cid_token(ngtcp2_dcid *dest, const ngtcp2_dcid *src) { } } -int ngtcp2_dcid_verify_uniqueness(ngtcp2_dcid *dcid, uint64_t seq, +int ngtcp2_dcid_verify_uniqueness(const ngtcp2_dcid *dcid, uint64_t seq, const ngtcp2_cid *cid, const uint8_t *token) { if (dcid->seq == seq) { return ngtcp2_cid_eq(&dcid->cid, cid) && - (dcid->flags & NGTCP2_DCID_FLAG_TOKEN_PRESENT) && - memcmp(dcid->token, token, - NGTCP2_STATELESS_RESET_TOKENLEN) == 0 - ? 0 - : NGTCP2_ERR_PROTO; + (dcid->flags & NGTCP2_DCID_FLAG_TOKEN_PRESENT) && + memcmp(dcid->token, token, NGTCP2_STATELESS_RESET_TOKENLEN) == 0 + ? 0 + : NGTCP2_ERR_PROTO; } return !ngtcp2_cid_eq(&dcid->cid, cid) ? 0 : NGTCP2_ERR_PROTO; @@ -140,8 +143,7 @@ int ngtcp2_dcid_verify_uniqueness(ngtcp2_dcid *dcid, uint64_t seq, int ngtcp2_dcid_verify_stateless_reset_token(const ngtcp2_dcid *dcid, const uint8_t *token) { return (dcid->flags & NGTCP2_DCID_FLAG_TOKEN_PRESENT) && - ngtcp2_cmemeq(dcid->token, token, - NGTCP2_STATELESS_RESET_TOKENLEN) - ? 0 - : NGTCP2_ERR_INVALID_ARGUMENT; + ngtcp2_cmemeq(dcid->token, token, NGTCP2_STATELESS_RESET_TOKENLEN) + ? 0 + : NGTCP2_ERR_INVALID_ARGUMENT; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cid.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cid.h index 0b37441178c72a..6372ef113d6454 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cid.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cid.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -37,20 +37,20 @@ /* NGTCP2_SCID_FLAG_NONE indicates that no flag is set. */ #define NGTCP2_SCID_FLAG_NONE 0x00u /* NGTCP2_SCID_FLAG_USED indicates that a local endpoint observed that - a remote endpoint uses a particular Connection ID. */ + a remote endpoint uses this particular Connection ID. */ #define NGTCP2_SCID_FLAG_USED 0x01u -/* NGTCP2_SCID_FLAG_RETIRED indicates that a particular Connection ID - is retired. */ +/* NGTCP2_SCID_FLAG_RETIRED indicates that this particular Connection + ID is retired. */ #define NGTCP2_SCID_FLAG_RETIRED 0x02u typedef struct ngtcp2_scid { ngtcp2_pq_entry pe; - /* seq is the sequence number associated to the CID. */ + /* seq is the sequence number associated to the Connection ID. */ uint64_t seq; /* cid is a connection ID */ ngtcp2_cid cid; - /* retired_ts is the timestamp when peer tells that this CID is - retired. */ + /* retired_ts is the timestamp when a remote endpoint tells that + this Connection ID is retired. */ ngtcp2_tstamp retired_ts; /* flags is the bitwise OR of zero or more of NGTCP2_SCID_FLAG_*. */ uint8_t flags; @@ -66,33 +66,33 @@ typedef struct ngtcp2_scid { #define NGTCP2_DCID_FLAG_TOKEN_PRESENT 0x02u typedef struct ngtcp2_dcid { - /* seq is the sequence number associated to the CID. */ + /* seq is the sequence number associated to the Connection ID. */ uint64_t seq; - /* cid is a connection ID */ + /* cid is a Connection ID */ ngtcp2_cid cid; /* path is a path which cid is bound to. The addresses are zero length if cid has not been bound to a particular path yet. */ ngtcp2_path_storage ps; - /* retired_ts is the timestamp when peer tells that this CID is + /* retired_ts is the timestamp when this Connection ID is retired. */ ngtcp2_tstamp retired_ts; - /* bound_ts is the timestamp when this connection ID is bound to a - particular path. It is only assigned when a connection ID is - used just for sending PATH_RESPONSE and is not zero-length. */ + /* bound_ts is the timestamp when this Connection ID is bound to a + particular path. It is only assigned when a Connection ID is + used just for sending PATH_RESPONSE, and is not zero-length. */ ngtcp2_tstamp bound_ts; /* bytes_sent is the number of bytes sent to an associated path. */ uint64_t bytes_sent; /* bytes_recv is the number of bytes received from an associated path. */ uint64_t bytes_recv; - /* max_udp_payload_size is the maximum size of UDP payload that is - allowed to send to this path. */ + /* max_udp_payload_size is the maximum size of UDP datagram payload + that is allowed to be sent to this path. */ size_t max_udp_payload_size; /* flags is bitwise OR of zero or more of NGTCP2_DCID_FLAG_*. */ uint8_t flags; - /* token is a stateless reset token associated to this CID. - Actually, the stateless reset token is tied to the connection, - not to the particular connection ID. */ + /* token is a stateless reset token received along with this + Connection ID. The stateless reset token is tied to the + connection, not to the particular Connection ID. */ uint8_t token[NGTCP2_STATELESS_RESET_TOKENLEN]; } ngtcp2_dcid; @@ -106,7 +106,7 @@ void ngtcp2_cid_zero(ngtcp2_cid *cid); int ngtcp2_cid_less(const ngtcp2_cid *lhs, const ngtcp2_cid *rhs); /* - * ngtcp2_cid_empty returns nonzero if |cid| includes empty connection + * ngtcp2_cid_empty returns nonzero if |cid| includes empty Connection * ID. */ int ngtcp2_cid_empty(const ngtcp2_cid *cid); @@ -123,7 +123,7 @@ void ngtcp2_scid_copy(ngtcp2_scid *dest, const ngtcp2_scid *src); /* * ngtcp2_dcid_init initializes |dcid| with the given parameters. If - * |token| is NULL, the function fills dcid->token it with 0. |token| + * |token| is NULL, the function fills dcid->token with 0. |token| * must be NGTCP2_STATELESS_RESET_TOKENLEN bytes long. */ void ngtcp2_dcid_init(ngtcp2_dcid *dcid, uint64_t seq, const ngtcp2_cid *cid, @@ -131,14 +131,14 @@ void ngtcp2_dcid_init(ngtcp2_dcid *dcid, uint64_t seq, const ngtcp2_cid *cid, /* * ngtcp2_dcid_set_token sets |token| to |dcid|. |token| must not be - * NULL and must be NGTCP2_STATELESS_RESET_TOKENLEN bytes long. + * NULL, and must be NGTCP2_STATELESS_RESET_TOKENLEN bytes long. */ void ngtcp2_dcid_set_token(ngtcp2_dcid *dcid, const uint8_t *token); /* * ngtcp2_dcid_set_path sets |path| to |dcid|. It sets - * max_udp_payload_size to the minimum UDP payload size supported - * by the IP protocol version. + * max_udp_payload_size to the minimum UDP datagram payload size + * supported by the IP protocol version. */ void ngtcp2_dcid_set_path(ngtcp2_dcid *dcid, const ngtcp2_path *path); @@ -149,7 +149,7 @@ void ngtcp2_dcid_copy(ngtcp2_dcid *dest, const ngtcp2_dcid *src); /* * ngtcp2_dcid_copy_cid_token behaves like ngtcp2_dcid_copy, but it - * only copies cid, seq, and path. + * only copies cid, seq, and token. */ void ngtcp2_dcid_copy_cid_token(ngtcp2_dcid *dest, const ngtcp2_dcid *src); @@ -157,14 +157,15 @@ void ngtcp2_dcid_copy_cid_token(ngtcp2_dcid *dest, const ngtcp2_dcid *src); * ngtcp2_dcid_verify_uniqueness verifies uniqueness of (|seq|, |cid|, * |token|) tuple against |dcid|. */ -int ngtcp2_dcid_verify_uniqueness(ngtcp2_dcid *dcid, uint64_t seq, +int ngtcp2_dcid_verify_uniqueness(const ngtcp2_dcid *dcid, uint64_t seq, const ngtcp2_cid *cid, const uint8_t *token); /* * ngtcp2_dcid_verify_stateless_reset_token verifies stateless reset - * token |token| against the one included in |dcid|. This function - * returns 0 if the verification succeeds, or one of the following - * negative error codes: + * token |token| against the one included in |dcid|. Tokens are + * compared in constant time. This function returns 0 if the + * verification succeeds, or one of the following negative error + * codes: * * NGTCP2_ERR_INVALID_ARGUMENT * Tokens do not match; or |dcid| does not contain a token. @@ -172,4 +173,4 @@ int ngtcp2_dcid_verify_uniqueness(ngtcp2_dcid *dcid, uint64_t seq, int ngtcp2_dcid_verify_stateless_reset_token(const ngtcp2_dcid *dcid, const uint8_t *token); -#endif /* NGTCP2_CID_H */ +#endif /* !defined(NGTCP2_CID_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c index c8caf47ea76232..8b60efabe126d0 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c @@ -37,7 +37,8 @@ #include "ngtcp2_rcvry.h" #include "ngtcp2_unreachable.h" #include "ngtcp2_net.h" -#include "ngtcp2_conversion.h" +#include "ngtcp2_transport_params.h" +#include "ngtcp2_settings.h" #include "ngtcp2_tstamp.h" #include "ngtcp2_frame_chain.h" @@ -50,8 +51,12 @@ /* NGTCP2_MIN_COALESCED_PAYLOADLEN is the minimum length of QUIC packet payload that should be coalesced to a long packet. */ #define NGTCP2_MIN_COALESCED_PAYLOADLEN 128 +/* NGTCP2_MAX_ACK_PER_PKT is the maximum number of ACK frame per an + incoming QUIC packet to process. ACK frames that exceed this limit + are not processed. */ +#define NGTCP2_MAX_ACK_PER_PKT 1 -ngtcp2_objalloc_def(strm, ngtcp2_strm, oplent); +ngtcp2_objalloc_def(strm, ngtcp2_strm, oplent) /* * conn_local_stream returns nonzero if |stream_id| indicates that it @@ -323,8 +328,8 @@ static int conn_call_select_preferred_addr(ngtcp2_conn *conn, assert(conn->remote.transport_params->preferred_addr_present); rv = conn->callbacks.select_preferred_addr( - conn, dest, &conn->remote.transport_params->preferred_addr, - conn->user_data); + conn, dest, &conn->remote.transport_params->preferred_addr, + conn->user_data); if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -377,7 +382,7 @@ static int conn_call_extend_max_stream_data(ngtcp2_conn *conn, } rv = conn->callbacks.extend_max_stream_data( - conn, stream_id, datalen, conn->user_data, strm->stream_user_data); + conn, stream_id, datalen, conn->user_data, strm->stream_user_data); if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -395,9 +400,9 @@ static int conn_call_dcid_status(ngtcp2_conn *conn, } rv = conn->callbacks.dcid_status( - conn, type, dcid->seq, &dcid->cid, - (dcid->flags & NGTCP2_DCID_FLAG_TOKEN_PRESENT) ? dcid->token : NULL, - conn->user_data); + conn, type, dcid->seq, &dcid->cid, + (dcid->flags & NGTCP2_DCID_FLAG_TOKEN_PRESENT) ? dcid->token : NULL, + conn->user_data); if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -413,7 +418,7 @@ static int conn_call_activate_dcid(ngtcp2_conn *conn, const ngtcp2_dcid *dcid) { static int conn_call_deactivate_dcid(ngtcp2_conn *conn, const ngtcp2_dcid *dcid) { return conn_call_dcid_status( - conn, NGTCP2_CONNECTION_ID_STATUS_TYPE_DEACTIVATE, dcid); + conn, NGTCP2_CONNECTION_ID_STATUS_TYPE_DEACTIVATE, dcid); } static int conn_call_stream_stop_sending(ngtcp2_conn *conn, int64_t stream_id, @@ -587,8 +592,8 @@ static int conn_call_recv_datagram(ngtcp2_conn *conn, flags |= NGTCP2_DATAGRAM_FLAG_0RTT; } - rv = conn->callbacks.recv_datagram(conn, flags, data, datalen, - conn->user_data); + rv = + conn->callbacks.recv_datagram(conn, flags, data, datalen, conn->user_data); if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -607,8 +612,8 @@ conn_call_update_key(ngtcp2_conn *conn, uint8_t *rx_secret, uint8_t *tx_secret, assert(conn->callbacks.update_key); rv = conn->callbacks.update_key( - conn, rx_secret, tx_secret, rx_aead_ctx, rx_iv, tx_aead_ctx, tx_iv, - current_rx_secret, current_tx_secret, secretlen, conn->user_data); + conn, rx_secret, tx_secret, rx_aead_ctx, rx_iv, tx_aead_ctx, tx_iv, + current_rx_secret, current_tx_secret, secretlen, conn->user_data); if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -623,7 +628,7 @@ static int conn_call_version_negotiation(ngtcp2_conn *conn, uint32_t version, assert(conn->callbacks.version_negotiation); rv = - conn->callbacks.version_negotiation(conn, version, dcid, conn->user_data); + conn->callbacks.version_negotiation(conn, version, dcid, conn->user_data); if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -663,13 +668,11 @@ static int conn_call_recv_tx_key(ngtcp2_conn *conn, return 0; } -static int pktns_init(ngtcp2_pktns *pktns, ngtcp2_pktns_id pktns_id, - ngtcp2_rst *rst, ngtcp2_cc *cc, int64_t initial_pkt_num, - ngtcp2_log *log, ngtcp2_qlog *qlog, - ngtcp2_objalloc *rtb_entry_objalloc, - ngtcp2_objalloc *frc_objalloc, const ngtcp2_mem *mem) { - int rv; - +static void pktns_init(ngtcp2_pktns *pktns, ngtcp2_pktns_id pktns_id, + ngtcp2_rst *rst, ngtcp2_cc *cc, int64_t initial_pkt_num, + ngtcp2_log *log, ngtcp2_qlog *qlog, + ngtcp2_objalloc *rtb_entry_objalloc, + ngtcp2_objalloc *frc_objalloc, const ngtcp2_mem *mem) { memset(pktns, 0, sizeof(*pktns)); ngtcp2_gaptr_init(&pktns->rx.pngap, mem); @@ -678,25 +681,15 @@ static int pktns_init(ngtcp2_pktns *pktns, ngtcp2_pktns_id pktns_id, pktns->tx.non_ack_pkt_start_ts = UINT64_MAX; pktns->rx.max_pkt_num = -1; pktns->rx.max_ack_eliciting_pkt_num = -1; + pktns->id = pktns_id; - rv = ngtcp2_acktr_init(&pktns->acktr, log, mem); - if (rv != 0) { - goto fail_acktr_init; - } + ngtcp2_acktr_init(&pktns->acktr, log, mem); ngtcp2_strm_init(&pktns->crypto.strm, 0, NGTCP2_STRM_FLAG_NONE, 0, 0, NULL, frc_objalloc, mem); - ngtcp2_rtb_init(&pktns->rtb, pktns_id, &pktns->crypto.strm, rst, cc, - initial_pkt_num, log, qlog, rtb_entry_objalloc, frc_objalloc, - mem); - - return 0; - -fail_acktr_init: - ngtcp2_gaptr_free(&pktns->rx.pngap); - - return rv; + ngtcp2_rtb_init(&pktns->rtb, rst, cc, initial_pkt_num, log, qlog, + rtb_entry_objalloc, frc_objalloc, mem); } static int pktns_new(ngtcp2_pktns **ppktns, ngtcp2_pktns_id pktns_id, @@ -704,20 +697,15 @@ static int pktns_new(ngtcp2_pktns **ppktns, ngtcp2_pktns_id pktns_id, ngtcp2_log *log, ngtcp2_qlog *qlog, ngtcp2_objalloc *rtb_entry_objalloc, ngtcp2_objalloc *frc_objalloc, const ngtcp2_mem *mem) { - int rv; - *ppktns = ngtcp2_mem_malloc(mem, sizeof(ngtcp2_pktns)); if (*ppktns == NULL) { return NGTCP2_ERR_NOMEM; } - rv = pktns_init(*ppktns, pktns_id, rst, cc, initial_pkt_num, log, qlog, - rtb_entry_objalloc, frc_objalloc, mem); - if (rv != 0) { - ngtcp2_mem_free(mem, *ppktns); - } + pktns_init(*ppktns, pktns_id, rst, cc, initial_pkt_num, log, qlog, + rtb_entry_objalloc, frc_objalloc, mem); - return rv; + return 0; } static int cycle_less(const ngtcp2_pq_entry *lhs, const ngtcp2_pq_entry *rhs) { @@ -783,6 +771,8 @@ static int cid_less(const ngtcp2_ksl_key *lhs, const ngtcp2_ksl_key *rhs) { return ngtcp2_cid_less(lhs, rhs); } +ngtcp2_ksl_search_def(cid_less, cid_less) + static int retired_ts_less(const ngtcp2_pq_entry *lhs, const ngtcp2_pq_entry *rhs) { const ngtcp2_scid *a = ngtcp2_struct_of(lhs, ngtcp2_scid, pe); @@ -803,8 +793,9 @@ static void conn_reset_conn_stat_cc(ngtcp2_conn *conn, cstat->first_rtt_sample_ts = UINT64_MAX; cstat->pto_count = 0; cstat->loss_detection_timer = UINT64_MAX; - cstat->cwnd = - ngtcp2_cc_compute_initcwnd(conn->local.settings.max_tx_udp_payload_size); + cstat->max_tx_udp_payload_size = + ngtcp2_conn_get_path_max_tx_udp_payload_size(conn); + cstat->cwnd = ngtcp2_cc_compute_initcwnd(cstat->max_tx_udp_payload_size); cstat->ssthresh = UINT64_MAX; cstat->congestion_recovery_start_ts = UINT64_MAX; cstat->bytes_in_flight = 0; @@ -847,7 +838,7 @@ static void delete_scid(ngtcp2_ksl *scids, const ngtcp2_mem *mem) { static ngtcp2_duration compute_pto(ngtcp2_duration smoothed_rtt, ngtcp2_duration rttvar, ngtcp2_duration max_ack_delay) { - ngtcp2_duration var = ngtcp2_max(4 * rttvar, NGTCP2_GRANULARITY); + ngtcp2_duration var = ngtcp2_max_uint64(4 * rttvar, NGTCP2_GRANULARITY); return smoothed_rtt + var + max_ack_delay; } @@ -859,7 +850,7 @@ static ngtcp2_duration conn_compute_initial_pto(ngtcp2_conn *conn, ngtcp2_duration initial_rtt = conn->local.settings.initial_rtt; ngtcp2_duration max_ack_delay; - if (pktns->rtb.pktns_id == NGTCP2_PKTNS_ID_APPLICATION && + if (pktns->id == NGTCP2_PKTNS_ID_APPLICATION && conn->remote.transport_params) { max_ack_delay = conn->remote.transport_params->max_ack_delay; } else { @@ -876,7 +867,7 @@ static ngtcp2_duration conn_compute_pto(ngtcp2_conn *conn, ngtcp2_conn_stat *cstat = &conn->cstat; ngtcp2_duration max_ack_delay; - if (pktns->rtb.pktns_id == NGTCP2_PKTNS_ID_APPLICATION && + if (pktns->id == NGTCP2_PKTNS_ID_APPLICATION && conn->remote.transport_params) { max_ack_delay = conn->remote.transport_params->max_ack_delay; } else { @@ -898,7 +889,7 @@ static ngtcp2_duration conn_compute_pv_timeout_pto(ngtcp2_conn *conn, ngtcp2_duration pto) { ngtcp2_duration initial_pto = conn_compute_initial_pto(conn, &conn->pktns); - return 3 * ngtcp2_max(pto, initial_pto); + return 3 * ngtcp2_max_uint64(pto, initial_pto); } /* @@ -1007,28 +998,15 @@ static void conn_reset_ecn_validation_state(ngtcp2_conn *conn) { static uint8_t server_default_available_versions[] = {0, 0, 0, 1}; /* - * available_versions_new allocates new buffer, and writes |versions| - * of length |versionslen| in network byte order, suitable for sending - * in available_versions field of version_information QUIC transport - * parameter. The pointer to the allocated buffer is assigned to - * |*pbuf|. - * - * This function returns 0 if it succeeds, or one of the negative - * error codes: - * - * NGTCP2_ERR_NOMEM - * Out of memory. + * available_versions_init writes |versions| of length |versionslen| + * in network byte order to the buffer pointed by |buf|, suitable for + * sending in available_versions field of version_information QUIC + * transport parameter. This function returns the pointer to the one + * beyond the last byte written. */ -static int available_versions_new(uint8_t **pbuf, const uint32_t *versions, - size_t versionslen, const ngtcp2_mem *mem) { +static void *available_versions_init(void *buf, const uint32_t *versions, + size_t versionslen) { size_t i; - uint8_t *buf = ngtcp2_mem_malloc(mem, sizeof(uint32_t) * versionslen); - - if (buf == NULL) { - return NGTCP2_ERR_NOMEM; - } - - *pbuf = buf; for (i = 0; i < versionslen; ++i) { buf = ngtcp2_put_uint32be(buf, versions[i]); @@ -1055,6 +1033,16 @@ conn_set_local_transport_params(ngtcp2_conn *conn, p->version_info_present = 1; } +static size_t buflen_align(size_t buflen) { + return (buflen + 0x7) & (size_t)~0x7; +} + +static void *buf_align(void *buf) { + return (void *)((uintptr_t)((uint8_t *)buf + 0x7) & (uintptr_t)~0x7); +} + +static void *buf_advance(void *buf, size_t n) { return (uint8_t *)buf + n; } + static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, const ngtcp2_cid *scid, const ngtcp2_path *path, uint32_t client_chosen_version, int callbacks_version, @@ -1065,16 +1053,20 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, const ngtcp2_mem *mem, void *user_data, int server) { int rv; ngtcp2_scid *scident; - uint8_t *buf; + void *buf, *tokenbuf; + size_t buflen; uint8_t fixed_bit_byte; size_t i; uint32_t *preferred_versions; + ngtcp2_settings settingsbuf; ngtcp2_transport_params paramsbuf; (void)callbacks_version; (void)settings_version; + settings = + ngtcp2_settings_convert_to_latest(&settingsbuf, settings_version, settings); params = ngtcp2_transport_params_convert_to_latest( - ¶msbuf, transport_params_version, params); + ¶msbuf, transport_params_version, params); assert(settings->max_window <= NGTCP2_MAX_VARINT); assert(settings->max_stream_window <= NGTCP2_MAX_VARINT); @@ -1111,21 +1103,59 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, assert(callbacks->get_path_challenge_data); assert(!server || !ngtcp2_is_reserved_version(client_chosen_version)); + for (i = 0; i < settings->pmtud_probeslen; ++i) { + assert(settings->pmtud_probes[i] > NGTCP2_MAX_UDP_PAYLOAD_SIZE); + } + if (mem == NULL) { mem = ngtcp2_mem_default(); } - *pconn = ngtcp2_mem_calloc(mem, 1, sizeof(ngtcp2_conn)); - if (*pconn == NULL) { - rv = NGTCP2_ERR_NOMEM; - goto fail_conn; + buflen = sizeof(ngtcp2_conn); + if (settings->qlog_write) { + buflen = buflen_align(buflen); + buflen += NGTCP2_QLOG_BUFLEN; + } + + if (settings->pmtud_probeslen) { + buflen = buflen_align(buflen); + buflen += sizeof(settings->pmtud_probes[0]) * settings->pmtud_probeslen; + } + + if (settings->preferred_versionslen) { + buflen = buflen_align(buflen); + buflen += + sizeof(settings->preferred_versions[0]) * settings->preferred_versionslen; + } + + if (settings->available_versionslen) { + buflen = buflen_align(buflen); + buflen += + sizeof(settings->available_versions[0]) * settings->available_versionslen; + } else if (server) { + if (settings->preferred_versionslen) { + buflen = buflen_align(buflen); + buflen += sizeof(settings->preferred_versions[0]) * + settings->preferred_versionslen; + } + } else if (!ngtcp2_is_reserved_version(client_chosen_version)) { + buflen = buflen_align(buflen); + buflen += sizeof(client_chosen_version); + } + + buf = ngtcp2_mem_calloc(mem, 1, buflen); + if (buf == NULL) { + return NGTCP2_ERR_NOMEM; } + *pconn = buf; + buf = buf_advance(buf, sizeof(ngtcp2_conn)); + (*pconn)->server = server; - ngtcp2_objalloc_frame_chain_init(&(*pconn)->frc_objalloc, 64, mem); - ngtcp2_objalloc_rtb_entry_init(&(*pconn)->rtb_entry_objalloc, 64, mem); - ngtcp2_objalloc_strm_init(&(*pconn)->strm_objalloc, 64, mem); + ngtcp2_objalloc_frame_chain_init(&(*pconn)->frc_objalloc, 16, mem); + ngtcp2_objalloc_rtb_entry_init(&(*pconn)->rtb_entry_objalloc, 16, mem); + ngtcp2_objalloc_strm_init(&(*pconn)->strm_objalloc, 16, mem); ngtcp2_static_ringbuf_dcid_bound_init(&(*pconn)->dcid.bound); @@ -1135,7 +1165,8 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, ngtcp2_gaptr_init(&(*pconn)->dcid.seqgap, mem); - ngtcp2_ksl_init(&(*pconn)->scid.set, cid_less, sizeof(ngtcp2_cid), mem); + ngtcp2_ksl_init(&(*pconn)->scid.set, cid_less, ksl_cid_less_search, + sizeof(ngtcp2_cid), mem); ngtcp2_pq_init(&(*pconn)->scid.used, retired_ts_less, mem); @@ -1143,9 +1174,9 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, ngtcp2_pq_init(&(*pconn)->tx.strmq, cycle_less, mem); - ngtcp2_idtr_init(&(*pconn)->remote.bidi.idtr, !server, mem); + ngtcp2_idtr_init(&(*pconn)->remote.bidi.idtr, mem); - ngtcp2_idtr_init(&(*pconn)->remote.uni.idtr, !server, mem); + ngtcp2_idtr_init(&(*pconn)->remote.uni.idtr, mem); ngtcp2_static_ringbuf_path_challenge_init(&(*pconn)->rx.path_challenge); @@ -1154,36 +1185,46 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, ngtcp2_qlog_init(&(*pconn)->qlog, settings->qlog_write, settings->initial_ts, user_data); if ((*pconn)->qlog.write) { - buf = ngtcp2_mem_malloc(mem, NGTCP2_QLOG_BUFLEN); - if (buf == NULL) { - rv = NGTCP2_ERR_NOMEM; - goto fail_qlog_buf; - } + buf = buf_align(buf); ngtcp2_buf_init(&(*pconn)->qlog.buf, buf, NGTCP2_QLOG_BUFLEN); + buf = buf_advance(buf, NGTCP2_QLOG_BUFLEN); } (*pconn)->local.settings = *settings; if (settings->tokenlen) { - buf = ngtcp2_mem_malloc(mem, settings->tokenlen); - if (buf == NULL) { + tokenbuf = ngtcp2_mem_malloc(mem, settings->tokenlen); + if (tokenbuf == NULL) { rv = NGTCP2_ERR_NOMEM; goto fail_token; } - memcpy(buf, settings->token, settings->tokenlen); - (*pconn)->local.settings.token = buf; + memcpy(tokenbuf, settings->token, settings->tokenlen); + (*pconn)->local.settings.token = tokenbuf; } else { (*pconn)->local.settings.token = NULL; } + if (settings->pmtud_probeslen) { + (*pconn)->local.settings.pmtud_probes = buf_align(buf); + buf = ngtcp2_cpymem( + (uint16_t *)(*pconn)->local.settings.pmtud_probes, settings->pmtud_probes, + sizeof(settings->pmtud_probes[0]) * settings->pmtud_probeslen); + } + if (!(*pconn)->local.settings.original_version) { (*pconn)->local.settings.original_version = client_chosen_version; } + ngtcp2_dcid_init(&(*pconn)->dcid.current, 0, dcid, NULL); + ngtcp2_dcid_set_path(&(*pconn)->dcid.current, path); + + rv = ngtcp2_gaptr_push(&(*pconn)->dcid.seqgap, 0, 1); + if (rv != 0) { + goto fail_seqgap_push; + } + conn_reset_conn_stat(*pconn, &(*pconn)->cstat); (*pconn)->cstat.initial_rtt = settings->initial_rtt; - (*pconn)->cstat.max_tx_udp_payload_size = - (*pconn)->local.settings.max_tx_udp_payload_size; ngtcp2_rst_init(&(*pconn)->rst); @@ -1195,7 +1236,7 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, break; case NGTCP2_CC_ALGO_CUBIC: - ngtcp2_cc_cubic_init(&(*pconn)->cubic, &(*pconn)->log); + ngtcp2_cc_cubic_init(&(*pconn)->cubic, &(*pconn)->log, &(*pconn)->rst); break; case NGTCP2_CC_ALGO_BBR: @@ -1224,13 +1265,10 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, goto fail_hs_pktns_init; } - rv = pktns_init(&(*pconn)->pktns, NGTCP2_PKTNS_ID_APPLICATION, &(*pconn)->rst, - &(*pconn)->cc, settings->initial_pkt_num, &(*pconn)->log, - &(*pconn)->qlog, &(*pconn)->rtb_entry_objalloc, - &(*pconn)->frc_objalloc, mem); - if (rv != 0) { - goto fail_pktns_init; - } + pktns_init(&(*pconn)->pktns, NGTCP2_PKTNS_ID_APPLICATION, &(*pconn)->rst, + &(*pconn)->cc, settings->initial_pkt_num, &(*pconn)->log, + &(*pconn)->qlog, &(*pconn)->rtb_entry_objalloc, + &(*pconn)->frc_objalloc, mem); scident = ngtcp2_mem_malloc(mem, sizeof(*scident)); if (scident == NULL) { @@ -1249,14 +1287,6 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, scident = NULL; - ngtcp2_dcid_init(&(*pconn)->dcid.current, 0, dcid, NULL); - ngtcp2_dcid_set_path(&(*pconn)->dcid.current, path); - - rv = ngtcp2_gaptr_push(&(*pconn)->dcid.seqgap, 0, 1); - if (rv != 0) { - goto fail_seqgap_push; - } - if (settings->preferred_versionslen) { if (!server && !ngtcp2_is_reserved_version(client_chosen_version)) { for (i = 0; i < settings->preferred_versionslen; ++i) { @@ -1268,12 +1298,9 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, assert(i < settings->preferred_versionslen); } - preferred_versions = ngtcp2_mem_malloc( - mem, sizeof(uint32_t) * settings->preferred_versionslen); - if (preferred_versions == NULL) { - rv = NGTCP2_ERR_NOMEM; - goto fail_preferred_versions; - } + preferred_versions = buf_align(buf); + buf = buf_advance(preferred_versions, sizeof(preferred_versions[0]) * + settings->preferred_versionslen); for (i = 0; i < settings->preferred_versionslen; ++i) { assert(ngtcp2_is_supported_version(settings->preferred_versions[i])); @@ -1304,39 +1331,33 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, ngtcp2_is_supported_version(settings->available_versions[i])); } - rv = available_versions_new(&buf, settings->available_versions, - settings->available_versionslen, mem); - if (rv != 0) { - goto fail_available_versions; - } - - (*pconn)->vneg.available_versions = buf; + (*pconn)->vneg.available_versions = buf_align(buf); (*pconn)->vneg.available_versionslen = - sizeof(uint32_t) * settings->available_versionslen; + sizeof(uint32_t) * settings->available_versionslen; + + buf = available_versions_init((*pconn)->vneg.available_versions, + settings->available_versions, + settings->available_versionslen); } else if (server) { if (settings->preferred_versionslen) { - rv = available_versions_new(&buf, settings->preferred_versions, - settings->preferred_versionslen, mem); - if (rv != 0) { - goto fail_available_versions; - } - - (*pconn)->vneg.available_versions = buf; + (*pconn)->vneg.available_versions = buf_align(buf); (*pconn)->vneg.available_versionslen = - sizeof(uint32_t) * settings->preferred_versionslen; + sizeof(uint32_t) * settings->preferred_versionslen; + + buf = available_versions_init((*pconn)->vneg.available_versions, + settings->preferred_versions, + settings->preferred_versionslen); } else { (*pconn)->vneg.available_versions = server_default_available_versions; (*pconn)->vneg.available_versionslen = - sizeof(server_default_available_versions); + sizeof(server_default_available_versions); } - } else if (!server && !ngtcp2_is_reserved_version(client_chosen_version)) { - rv = available_versions_new(&buf, &client_chosen_version, 1, mem); - if (rv != 0) { - goto fail_available_versions; - } - - (*pconn)->vneg.available_versions = buf; + } else if (!ngtcp2_is_reserved_version(client_chosen_version)) { + (*pconn)->vneg.available_versions = buf_align(buf); (*pconn)->vneg.available_versionslen = sizeof(uint32_t); + + buf = available_versions_init((*pconn)->vneg.available_versions, + &client_chosen_version, 1); } (*pconn)->local.settings.available_versions = NULL; @@ -1367,54 +1388,39 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, conn_reset_ecn_validation_state(*pconn); - ngtcp2_qlog_start( - &(*pconn)->qlog, - server ? ((*pconn)->local.transport_params.retry_scid_present - ? &(*pconn)->local.transport_params.retry_scid - : &(*pconn)->local.transport_params.original_dcid) - : dcid, - server); + ngtcp2_qlog_start(&(*pconn)->qlog, + server + ? ((*pconn)->local.transport_params.retry_scid_present + ? &(*pconn)->local.transport_params.retry_scid + : &(*pconn)->local.transport_params.original_dcid) + : dcid, + server); return 0; -fail_available_versions: - ngtcp2_mem_free(mem, (*pconn)->vneg.preferred_versions); -fail_preferred_versions: -fail_seqgap_push: fail_scid_set_insert: ngtcp2_mem_free(mem, scident); fail_scident: - pktns_free(&(*pconn)->pktns, mem); -fail_pktns_init: pktns_del((*pconn)->hs_pktns, mem); fail_hs_pktns_init: pktns_del((*pconn)->in_pktns, mem); fail_in_pktns_init: + ngtcp2_gaptr_free(&(*pconn)->dcid.seqgap); +fail_seqgap_push: ngtcp2_mem_free(mem, (uint8_t *)(*pconn)->local.settings.token); fail_token: - ngtcp2_mem_free(mem, (*pconn)->qlog.buf.begin); -fail_qlog_buf: - ngtcp2_idtr_free(&(*pconn)->remote.uni.idtr); - ngtcp2_idtr_free(&(*pconn)->remote.bidi.idtr); - ngtcp2_map_free(&(*pconn)->strms); - delete_scid(&(*pconn)->scid.set, mem); - ngtcp2_ksl_free(&(*pconn)->scid.set); - ngtcp2_gaptr_free(&(*pconn)->dcid.seqgap); - ngtcp2_objalloc_free(&(*pconn)->strm_objalloc); - ngtcp2_objalloc_free(&(*pconn)->rtb_entry_objalloc); - ngtcp2_objalloc_free(&(*pconn)->frc_objalloc); ngtcp2_mem_free(mem, *pconn); -fail_conn: + return rv; } int ngtcp2_conn_client_new_versioned( - ngtcp2_conn **pconn, const ngtcp2_cid *dcid, const ngtcp2_cid *scid, - const ngtcp2_path *path, uint32_t client_chosen_version, - int callbacks_version, const ngtcp2_callbacks *callbacks, - int settings_version, const ngtcp2_settings *settings, - int transport_params_version, const ngtcp2_transport_params *params, - const ngtcp2_mem *mem, void *user_data) { + ngtcp2_conn **pconn, const ngtcp2_cid *dcid, const ngtcp2_cid *scid, + const ngtcp2_path *path, uint32_t client_chosen_version, + int callbacks_version, const ngtcp2_callbacks *callbacks, + int settings_version, const ngtcp2_settings *settings, + int transport_params_version, const ngtcp2_transport_params *params, + const ngtcp2_mem *mem, void *user_data) { int rv; rv = conn_new(pconn, dcid, scid, path, client_chosen_version, @@ -1438,12 +1444,12 @@ int ngtcp2_conn_client_new_versioned( } int ngtcp2_conn_server_new_versioned( - ngtcp2_conn **pconn, const ngtcp2_cid *dcid, const ngtcp2_cid *scid, - const ngtcp2_path *path, uint32_t client_chosen_version, - int callbacks_version, const ngtcp2_callbacks *callbacks, - int settings_version, const ngtcp2_settings *settings, - int transport_params_version, const ngtcp2_transport_params *params, - const ngtcp2_mem *mem, void *user_data) { + ngtcp2_conn **pconn, const ngtcp2_cid *dcid, const ngtcp2_cid *scid, + const ngtcp2_path *path, uint32_t client_chosen_version, + int callbacks_version, const ngtcp2_callbacks *callbacks, + int settings_version, const ngtcp2_settings *settings, + int transport_params_version, const ngtcp2_transport_params *params, + const ngtcp2_mem *mem, void *user_data) { int rv; rv = conn_new(pconn, dcid, scid, path, client_chosen_version, @@ -1471,8 +1477,8 @@ int ngtcp2_conn_server_new_versioned( * credits are considered. */ static uint64_t conn_fc_credits(ngtcp2_conn *conn, ngtcp2_strm *strm) { - return ngtcp2_min(strm->tx.max_offset - strm->tx.offset, - conn->tx.max_offset - conn->tx.offset); + return ngtcp2_min_uint64(strm->tx.max_offset - strm->tx.offset, + conn->tx.max_offset - conn->tx.offset); } /* @@ -1483,7 +1489,7 @@ static uint64_t conn_fc_credits(ngtcp2_conn *conn, ngtcp2_strm *strm) { static uint64_t conn_enforce_flow_control(ngtcp2_conn *conn, ngtcp2_strm *strm, uint64_t len) { uint64_t fc_credits = conn_fc_credits(conn, strm); - return ngtcp2_min(len, fc_credits); + return ngtcp2_min_uint64(len, fc_credits); } static int delete_strms_each(void *data, void *ptr) { @@ -1525,15 +1531,15 @@ void ngtcp2_conn_del(ngtcp2_conn *conn) { if (conn->crypto.key_update.old_rx_ckm) { conn_call_delete_crypto_aead_ctx( - conn, &conn->crypto.key_update.old_rx_ckm->aead_ctx); + conn, &conn->crypto.key_update.old_rx_ckm->aead_ctx); } if (conn->crypto.key_update.new_rx_ckm) { conn_call_delete_crypto_aead_ctx( - conn, &conn->crypto.key_update.new_rx_ckm->aead_ctx); + conn, &conn->crypto.key_update.new_rx_ckm->aead_ctx); } if (conn->crypto.key_update.new_tx_ckm) { conn_call_delete_crypto_aead_ctx( - conn, &conn->crypto.key_update.new_tx_ckm->aead_ctx); + conn, &conn->crypto.key_update.new_tx_ckm->aead_ctx); } if (conn->pktns.crypto.rx.ckm) { @@ -1551,26 +1557,26 @@ void ngtcp2_conn_del(ngtcp2_conn *conn) { if (conn->hs_pktns) { if (conn->hs_pktns->crypto.rx.ckm) { conn_call_delete_crypto_aead_ctx( - conn, &conn->hs_pktns->crypto.rx.ckm->aead_ctx); + conn, &conn->hs_pktns->crypto.rx.ckm->aead_ctx); } conn_call_delete_crypto_cipher_ctx(conn, &conn->hs_pktns->crypto.rx.hp_ctx); if (conn->hs_pktns->crypto.tx.ckm) { conn_call_delete_crypto_aead_ctx( - conn, &conn->hs_pktns->crypto.tx.ckm->aead_ctx); + conn, &conn->hs_pktns->crypto.tx.ckm->aead_ctx); } conn_call_delete_crypto_cipher_ctx(conn, &conn->hs_pktns->crypto.tx.hp_ctx); } if (conn->in_pktns) { if (conn->in_pktns->crypto.rx.ckm) { conn_call_delete_crypto_aead_ctx( - conn, &conn->in_pktns->crypto.rx.ckm->aead_ctx); + conn, &conn->in_pktns->crypto.rx.ckm->aead_ctx); } conn_call_delete_crypto_cipher_ctx(conn, &conn->in_pktns->crypto.rx.hp_ctx); if (conn->in_pktns->crypto.tx.ckm) { conn_call_delete_crypto_aead_ctx( - conn, &conn->in_pktns->crypto.tx.ckm->aead_ctx); + conn, &conn->in_pktns->crypto.tx.ckm->aead_ctx); } conn_call_delete_crypto_cipher_ctx(conn, &conn->in_pktns->crypto.tx.hp_ctx); } @@ -1582,11 +1588,6 @@ void ngtcp2_conn_del(ngtcp2_conn *conn) { conn_vneg_crypto_free(conn); - ngtcp2_mem_free(conn->mem, conn->vneg.preferred_versions); - if (conn->vneg.available_versions != server_default_available_versions) { - ngtcp2_mem_free(conn->mem, conn->vneg.available_versions); - } - ngtcp2_mem_free(conn->mem, conn->crypto.decrypt_buf.base); ngtcp2_mem_free(conn->mem, conn->crypto.decrypt_hp_buf.base); ngtcp2_mem_free(conn->mem, (uint8_t *)conn->local.settings.token); @@ -1600,8 +1601,6 @@ void ngtcp2_conn_del(ngtcp2_conn *conn) { pktns_del(conn->hs_pktns, conn->mem); pktns_del(conn->in_pktns, conn->mem); - ngtcp2_mem_free(conn->mem, conn->qlog.buf.begin); - ngtcp2_pmtud_del(conn->pmtud); ngtcp2_pv_del(conn->pv); @@ -1611,7 +1610,7 @@ void ngtcp2_conn_del(ngtcp2_conn *conn) { ngtcp2_idtr_free(&conn->remote.bidi.idtr); ngtcp2_mem_free(conn->mem, conn->tx.ack); ngtcp2_pq_free(&conn->tx.strmq); - ngtcp2_map_each_free(&conn->strms, delete_strms_each, (void *)conn); + ngtcp2_map_each(&conn->strms, delete_strms_each, (void *)conn); ngtcp2_map_free(&conn->strms); ngtcp2_pq_free(&conn->scid.used); @@ -1665,8 +1664,9 @@ static int conn_ensure_ack_ranges(ngtcp2_conn *conn, size_t n) { * ACK. */ static ngtcp2_duration conn_compute_ack_delay(ngtcp2_conn *conn) { - return ngtcp2_min(conn->local.transport_params.max_ack_delay, - conn->cstat.smoothed_rtt / 8); + return ngtcp2_min_uint64( + conn->local.transport_params.max_ack_delay, + ngtcp2_max_uint64(conn->cstat.smoothed_rtt / 8, NGTCP2_NANOSECONDS)); } int ngtcp2_conn_create_ack_frame(ngtcp2_conn *conn, ngtcp2_frame **pfr, @@ -1702,8 +1702,8 @@ int ngtcp2_conn_create_ack_frame(ngtcp2_conn *conn, ngtcp2_frame **pfr, if (conn->tx.ack == NULL) { conn->tx.ack = ngtcp2_mem_malloc( - conn->mem, - sizeof(ngtcp2_ack) + sizeof(ngtcp2_ack_range) * initial_max_ack_ranges); + conn->mem, + sizeof(ngtcp2_ack) + sizeof(ngtcp2_ack_range) * initial_max_ack_ranges); if (conn->tx.ack == NULL) { return NGTCP2_ERR_NOMEM; } @@ -1844,8 +1844,9 @@ static int conn_ppe_write_frame(ngtcp2_conn *conn, ngtcp2_ppe *ppe, * NGTCP2_ERR_NOMEM * Out of memory */ -static int conn_on_pkt_sent(ngtcp2_conn *conn, ngtcp2_rtb *rtb, +static int conn_on_pkt_sent(ngtcp2_conn *conn, ngtcp2_pktns *pktns, ngtcp2_rtb_entry *ent) { + ngtcp2_rtb *rtb = &pktns->rtb; int rv; /* This function implements OnPacketSent, but it handles only @@ -1856,7 +1857,7 @@ static int conn_on_pkt_sent(ngtcp2_conn *conn, ngtcp2_rtb *rtb, } if (ent->flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING) { - conn->cstat.last_tx_pkt_ts[rtb->pktns_id] = ent->ts; + conn->cstat.last_tx_pkt_ts[pktns->id] = ent->ts; } ngtcp2_conn_set_loss_detection_timer(conn, ent->ts); @@ -1898,8 +1899,8 @@ static size_t pktns_select_pkt_numlen(ngtcp2_pktns *pktns) { */ static uint64_t conn_get_cwnd(ngtcp2_conn *conn) { return conn->pv && (conn->pv->flags & NGTCP2_PV_FLAG_FALLBACK_ON_FAILURE) - ? ngtcp2_cc_compute_initcwnd(conn->cstat.max_tx_udp_payload_size) - : conn->cstat.cwnd; + ? ngtcp2_cc_compute_initcwnd(conn->cstat.max_tx_udp_payload_size) + : conn->cstat.cwnd; } /* @@ -1947,7 +1948,7 @@ static uint64_t conn_retry_early_payloadlen(ngtcp2_conn *conn) { /* Take the min because in conn_should_pad_pkt we take max in order to deal with unbreakable DATAGRAM. */ - return ngtcp2_min(len, NGTCP2_MIN_COALESCED_PAYLOADLEN); + return ngtcp2_min_uint64(len, NGTCP2_MIN_COALESCED_PAYLOADLEN); } return 0; @@ -2021,12 +2022,13 @@ static int conn_should_pad_pkt(ngtcp2_conn *conn, uint8_t type, size_t left, return 0; } - if (conn->hs_pktns->crypto.tx.ckm && - (conn->hs_pktns->rtb.probe_pkt_left || - !ngtcp2_strm_streamfrq_empty(&conn->hs_pktns->crypto.strm) || - !ngtcp2_acktr_empty(&conn->hs_pktns->acktr))) { - /* If we have something to send in Handshake packet, then add - PADDING in Handshake packet. */ + if ((conn->hs_pktns->crypto.tx.ckm && + (conn->hs_pktns->rtb.probe_pkt_left || + !ngtcp2_strm_streamfrq_empty(&conn->hs_pktns->crypto.strm) || + !ngtcp2_acktr_empty(&conn->hs_pktns->acktr))) || + conn->pktns.crypto.tx.ckm) { + /* If we have something to send in Handshake or 1RTT packet, + then add PADDING in that packet. */ min_payloadlen = NGTCP2_MIN_COALESCED_PAYLOADLEN; } else { return 1; @@ -2044,7 +2046,7 @@ static int conn_should_pad_pkt(ngtcp2_conn *conn, uint8_t type, size_t left, PADDING in that packet. Take maximum in case that write_datalen includes DATAGRAM which cannot be split. */ min_payloadlen = - ngtcp2_max(write_datalen, NGTCP2_MIN_COALESCED_PAYLOADLEN); + ngtcp2_max_uint64(write_datalen, NGTCP2_MIN_COALESCED_PAYLOADLEN); } else { return 1; } @@ -2067,8 +2069,8 @@ static int conn_should_pad_pkt(ngtcp2_conn *conn, uint8_t type, size_t left, return left < /* TODO Assuming that pkt_num is encoded in 1 byte. */ NGTCP2_MIN_LONG_HEADERLEN + conn->dcid.current.cid.datalen + - conn->oscid.datalen + NGTCP2_PKT_LENGTHLEN - 1 + min_payloadlen + - NGTCP2_MAX_AEAD_OVERHEAD; + conn->oscid.datalen + NGTCP2_PKT_LENGTHLEN - 1 + min_payloadlen + + NGTCP2_MAX_AEAD_OVERHEAD; } static void conn_restart_timer_on_write(ngtcp2_conn *conn, ngtcp2_tstamp ts) { @@ -2189,15 +2191,18 @@ static uint8_t conn_pkt_flags_long(ngtcp2_conn *conn) { static uint8_t conn_pkt_flags_short(ngtcp2_conn *conn) { return (uint8_t)(conn_pkt_flags(conn) | ((conn->pktns.crypto.tx.ckm->flags & NGTCP2_CRYPTO_KM_FLAG_KEY_PHASE_ONE) - ? NGTCP2_PKT_FLAG_KEY_PHASE - : NGTCP2_PKT_FLAG_NONE)); + ? NGTCP2_PKT_FLAG_KEY_PHASE + : NGTCP2_PKT_FLAG_NONE)); } +static size_t conn_min_pktlen(ngtcp2_conn *conn); + /* * conn_write_handshake_pkt writes handshake packet in the buffer - * pointed by |dest| whose length is |destlen|. |type| specifies long - * packet type. It should be either NGTCP2_PKT_INITIAL or - * NGTCP2_PKT_HANDSHAKE_PKT. + * pointed by |dest| whose length is |destlen|. |dgram_offset| is the + * offset in UDP datagram payload where this QUIC packet is positioned + * at. |type| specifies long packet type. It should be either + * NGTCP2_PKT_INITIAL or NGTCP2_PKT_HANDSHAKE_PKT. * * |write_datalen| is the minimum length of application data ready to * send in subsequent 0RTT packet. @@ -2212,8 +2217,9 @@ static uint8_t conn_pkt_flags_short(ngtcp2_conn *conn) { */ static ngtcp2_ssize conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, - size_t destlen, uint8_t type, uint8_t flags, - uint64_t write_datalen, ngtcp2_tstamp ts) { + size_t destlen, size_t dgram_offset, uint8_t type, + uint8_t flags, uint64_t write_datalen, + ngtcp2_tstamp ts) { int rv; ngtcp2_ppe ppe; ngtcp2_pkt_hd hd; @@ -2228,6 +2234,7 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, uint16_t rtb_entry_flags = NGTCP2_RTB_ENTRY_FLAG_NONE; int require_padding = (flags & NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING) != 0; int pkt_empty = 1; + int min_padded = 0; int padded = 0; int hd_logged = 0; uint64_t crypto_offset; @@ -2271,10 +2278,9 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, cc.encrypt = conn->callbacks.encrypt; cc.hp_mask = conn->callbacks.hp_mask; - ngtcp2_pkt_hd_init(&hd, conn_pkt_flags_long(conn), type, - &conn->dcid.current.cid, &conn->oscid, - pktns->tx.last_pkt_num + 1, pktns_select_pkt_numlen(pktns), - version, 0); + ngtcp2_pkt_hd_init( + &hd, conn_pkt_flags_long(conn), type, &conn->dcid.current.cid, &conn->oscid, + pktns->tx.last_pkt_num + 1, pktns_select_pkt_numlen(pktns), version, 0); if (!conn->server && type == NGTCP2_PKT_INITIAL && conn->local.settings.tokenlen) { @@ -2282,7 +2288,7 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, hd.tokenlen = conn->local.settings.tokenlen; } - ngtcp2_ppe_init(&ppe, dest, destlen, &cc); + ngtcp2_ppe_init(&ppe, dest, destlen, dgram_offset, &cc); rv = ngtcp2_ppe_encode_hd(&ppe, &hd); if (rv != 0) { @@ -2395,7 +2401,7 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, assert(rv == NGTCP2_ERR_NOBUF); } else { rtb_entry_flags |= - NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | NGTCP2_RTB_ENTRY_FLAG_PROBE; + NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | NGTCP2_RTB_ENTRY_FLAG_PROBE; pkt_empty = 0; } } @@ -2412,6 +2418,7 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, } else { rtb_entry_flags |= NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING; pktns->tx.non_ack_pkt_start_ts = UINT64_MAX; + pkt_empty = 0; } } else if (pktns->tx.non_ack_pkt_start_ts == UINT64_MAX) { pktns->tx.non_ack_pkt_start_ts = ts; @@ -2429,20 +2436,24 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, /* If we cannot write another packet, then we need to add padding to Initial here. */ if (conn_should_pad_pkt( - conn, type, ngtcp2_ppe_left(&ppe), write_datalen, - (rtb_entry_flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING) != 0, - require_padding)) { + conn, type, ngtcp2_ppe_left(&ppe), write_datalen, + (rtb_entry_flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING) != 0, + require_padding)) { lfr.type = NGTCP2_FRAME_PADDING; - lfr.padding.len = ngtcp2_ppe_padding(&ppe); + lfr.padding.len = ngtcp2_ppe_dgram_padding(&ppe); } else if (pkt_empty) { return 0; } else { lfr.type = NGTCP2_FRAME_PADDING; - lfr.padding.len = ngtcp2_ppe_padding_hp_sample(&ppe); + lfr.padding.len = ngtcp2_ppe_padding_size(&ppe, conn_min_pktlen(conn)); + min_padded = 1; } if (lfr.padding.len) { - padded = 1; + if (!min_padded || + (rtb_entry_flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING)) { + padded = 1; + } ngtcp2_log_tx_fr(&conn->log, &hd, &lfr); ngtcp2_qlog_write_frame(&conn->qlog, &lfr); } @@ -2461,16 +2472,16 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, conn_handle_tx_ecn(conn, pi, &rtb_entry_flags, pktns, &hd, ts); } - rv = ngtcp2_rtb_entry_objalloc_new(&rtbent, &hd, frq, ts, (size_t)spktlen, - rtb_entry_flags, - &conn->rtb_entry_objalloc); + rv = + ngtcp2_rtb_entry_objalloc_new(&rtbent, &hd, frq, ts, (size_t)spktlen, + rtb_entry_flags, &conn->rtb_entry_objalloc); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); ngtcp2_frame_chain_list_objalloc_del(frq, &conn->frc_objalloc, conn->mem); return rv; } - rv = conn_on_pkt_sent(conn, &pktns->rtb, rtbent); + rv = conn_on_pkt_sent(conn, pktns, rtbent); if (rv != 0) { ngtcp2_rtb_entry_objalloc_del(rtbent, &conn->rtb_entry_objalloc, &conn->frc_objalloc, conn->mem); @@ -2565,8 +2576,8 @@ static ngtcp2_ssize conn_write_ack_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } spktlen = ngtcp2_conn_write_single_frame_pkt( - conn, pi, dest, destlen, type, NGTCP2_WRITE_PKT_FLAG_NONE, - &conn->dcid.current.cid, ackfr, NGTCP2_RTB_ENTRY_FLAG_NONE, NULL, ts); + conn, pi, dest, destlen, type, NGTCP2_WRITE_PKT_FLAG_NONE, + &conn->dcid.current.cid, ackfr, NGTCP2_RTB_ENTRY_FLAG_NONE, NULL, ts); if (spktlen <= 0) { return spktlen; @@ -2588,8 +2599,8 @@ static void conn_discard_pktns(ngtcp2_conn *conn, ngtcp2_pktns **ppktns, conn->cstat.bytes_in_flight -= bytes_in_flight; conn->cstat.pto_count = 0; - conn->cstat.last_tx_pkt_ts[pktns->rtb.pktns_id] = UINT64_MAX; - conn->cstat.loss_time[pktns->rtb.pktns_id] = UINT64_MAX; + conn->cstat.last_tx_pkt_ts[pktns->id] = UINT64_MAX; + conn->cstat.loss_time[pktns->id] = UINT64_MAX; conn_call_delete_crypto_aead_ctx(conn, &pktns->crypto.rx.ckm->aead_ctx); conn_call_delete_crypto_cipher_ctx(conn, &pktns->crypto.rx.hp_ctx); @@ -2602,11 +2613,7 @@ static void conn_discard_pktns(ngtcp2_conn *conn, ngtcp2_pktns **ppktns, ngtcp2_conn_set_loss_detection_timer(conn, ts); } -/* - * conn_discard_initial_state discards state for Initial packet number - * space. - */ -static void conn_discard_initial_state(ngtcp2_conn *conn, ngtcp2_tstamp ts) { +void ngtcp2_conn_discard_initial_state(ngtcp2_conn *conn, ngtcp2_tstamp ts) { if (!conn->in_pktns) { return; } @@ -2622,11 +2629,7 @@ static void conn_discard_initial_state(ngtcp2_conn *conn, ngtcp2_tstamp ts) { memset(&conn->vneg.tx, 0, sizeof(conn->vneg.tx)); } -/* - * conn_discard_handshake_state discards state for Handshake packet - * number space. - */ -static void conn_discard_handshake_state(ngtcp2_conn *conn, ngtcp2_tstamp ts) { +void ngtcp2_conn_discard_handshake_state(ngtcp2_conn *conn, ngtcp2_tstamp ts) { if (!conn->hs_pktns) { return; } @@ -2672,7 +2675,7 @@ static ngtcp2_ssize conn_write_handshake_ack_pkts(ngtcp2_conn *conn, to send ACK is give server RTT measurement early. */ if (conn->server && conn->in_pktns) { nwrite = - conn_write_ack_pkt(conn, pi, dest, destlen, NGTCP2_PKT_INITIAL, ts); + conn_write_ack_pkt(conn, pi, dest, destlen, NGTCP2_PKT_INITIAL, ts); if (nwrite < 0) { assert(nwrite != NGTCP2_ERR_NOBUF); return nwrite; @@ -2685,7 +2688,7 @@ static ngtcp2_ssize conn_write_handshake_ack_pkts(ngtcp2_conn *conn, if (conn->hs_pktns->crypto.tx.ckm) { nwrite = - conn_write_ack_pkt(conn, pi, dest, destlen, NGTCP2_PKT_HANDSHAKE, ts); + conn_write_ack_pkt(conn, pi, dest, destlen, NGTCP2_PKT_HANDSHAKE, ts); if (nwrite < 0) { assert(nwrite != NGTCP2_ERR_NOBUF); return nwrite; @@ -2694,7 +2697,7 @@ static ngtcp2_ssize conn_write_handshake_ack_pkts(ngtcp2_conn *conn, res += nwrite; if (!conn->server && nwrite) { - conn_discard_initial_state(conn, ts); + ngtcp2_conn_discard_initial_state(conn, ts); } } @@ -2725,9 +2728,9 @@ static ngtcp2_ssize conn_write_client_initial(ngtcp2_conn *conn, return rv; } - return conn_write_handshake_pkt(conn, pi, dest, destlen, NGTCP2_PKT_INITIAL, - NGTCP2_WRITE_PKT_FLAG_NONE, early_datalen, - ts); + return conn_write_handshake_pkt( + conn, pi, dest, destlen, 0, NGTCP2_PKT_INITIAL, NGTCP2_WRITE_PKT_FLAG_NONE, + early_datalen, ts); } /* @@ -2807,11 +2810,11 @@ static ngtcp2_ssize conn_write_handshake_pkts(ngtcp2_conn *conn, conn->hs_pktns->rtb.probe_pkt_left)) { /* Discard Initial state here so that Handshake packet is not padded. */ - conn_discard_initial_state(conn, ts); + ngtcp2_conn_discard_initial_state(conn, ts); } else if (conn->in_pktns) { nwrite = - conn_write_handshake_pkt(conn, pi, dest, destlen, NGTCP2_PKT_INITIAL, - NGTCP2_WRITE_PKT_FLAG_NONE, write_datalen, ts); + conn_write_handshake_pkt(conn, pi, dest, destlen, 0, NGTCP2_PKT_INITIAL, + NGTCP2_WRITE_PKT_FLAG_NONE, write_datalen, ts); if (nwrite < 0) { assert(nwrite != NGTCP2_ERR_NOBUF); return nwrite; @@ -2823,11 +2826,11 @@ static ngtcp2_ssize conn_write_handshake_pkts(ngtcp2_conn *conn, !ngtcp2_strm_streamfrq_empty(&conn->in_pktns->crypto.strm))) { if (cstat->loss_detection_timer != UINT64_MAX && conn_server_tx_left(conn, &conn->dcid.current) < - NGTCP2_MAX_UDP_PAYLOAD_SIZE) { + NGTCP2_MAX_UDP_PAYLOAD_SIZE) { ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_LDC, - "loss detection timer canceled due to amplification limit"); - cstat->loss_detection_timer = UINT64_MAX; + &conn->log, NGTCP2_LOG_EVENT_LDC, + "loss detection timer canceled due to amplification limit"); + ngtcp2_conn_cancel_loss_detection_timer(conn); } return 0; @@ -2837,10 +2840,10 @@ static ngtcp2_ssize conn_write_handshake_pkts(ngtcp2_conn *conn, dest += nwrite; destlen -= (size_t)nwrite; - if (destlen) { - /* We might have already added padding to Initial, but in that - case, we should have destlen == 0 and no Handshake packet - will be written. */ + /* If initial packet size is at least + NGTCP2_MAX_UDP_PAYLOAD_SIZE, no extra padding is needed in a + subsequent packet. */ + if (nwrite < NGTCP2_MAX_UDP_PAYLOAD_SIZE) { if (conn->server) { it = ngtcp2_rtb_head(&conn->in_pktns->rtb); if (!ngtcp2_ksl_it_end(&it)) { @@ -2856,8 +2859,9 @@ static ngtcp2_ssize conn_write_handshake_pkts(ngtcp2_conn *conn, } } - nwrite = conn_write_handshake_pkt( - conn, pi, dest, destlen, NGTCP2_PKT_HANDSHAKE, wflags, write_datalen, ts); + nwrite = + conn_write_handshake_pkt(conn, pi, dest, destlen, (size_t)res, + NGTCP2_PKT_HANDSHAKE, wflags, write_datalen, ts); if (nwrite < 0) { assert(nwrite != NGTCP2_ERR_NOBUF); return nwrite; @@ -2869,7 +2873,7 @@ static ngtcp2_ssize conn_write_handshake_pkts(ngtcp2_conn *conn, /* We don't need to send further Initial packet if we have Handshake key and sent something with it. So discard initial state here. */ - conn_discard_initial_state(conn, ts); + ngtcp2_conn_discard_initial_state(conn, ts); } return res; @@ -2905,7 +2909,7 @@ static int conn_should_send_max_stream_data(ngtcp2_conn *conn, uint64_t inc = strm->rx.unsent_max_offset - strm->rx.max_offset; (void)conn; - return strm->rx.window < 2 * inc; + return strm->rx.window < 4 * inc; } /* @@ -2915,7 +2919,7 @@ static int conn_should_send_max_stream_data(ngtcp2_conn *conn, static int conn_should_send_max_data(ngtcp2_conn *conn) { uint64_t inc = conn->rx.unsent_max_offset - conn->rx.max_offset; - return conn->rx.window < 2 * inc; + return conn->rx.window < 4 * inc; } /* @@ -2948,9 +2952,9 @@ static size_t conn_required_num_new_connection_id(ngtcp2_conn *conn) { n = conn->remote.transport_params->active_connection_id_limit + conn->scid.num_retired; - n = ngtcp2_min(NGTCP2_MAX_SCID_POOL_SIZE, n) - len; + n = ngtcp2_min_uint64(NGTCP2_MAX_SCID_POOL_SIZE, n) - len; - return (size_t)ngtcp2_min(lim, n); + return (size_t)ngtcp2_min_uint64(lim, n); } /* @@ -3093,11 +3097,12 @@ static int conn_remove_retired_connection_id(ngtcp2_conn *conn, } /* - * conn_min_short_pktlen returns the minimum length of Short packet - * this endpoint sends. + * conn_min_pktlen returns the minimum length of packet this endpoint + * sends. It may underestimate the length because this does not take + * into account header protection sample. */ -static size_t conn_min_short_pktlen(ngtcp2_conn *conn) { - return conn->dcid.current.cid.datalen + NGTCP2_MIN_PKT_EXPANDLEN; +static size_t conn_min_pktlen(ngtcp2_conn *conn) { + return conn->oscid.datalen + NGTCP2_MIN_PKT_EXPANDLEN; } /* @@ -3155,8 +3160,10 @@ static void conn_reset_ppe_pending(ngtcp2_conn *conn) { /* * conn_write_pkt writes a protected packet in the buffer pointed by - * |dest| whose length if |destlen|. |type| specifies the type of - * packet. It can be NGTCP2_PKT_1RTT or NGTCP2_PKT_0RTT. + * |dest| whose length if |destlen|. |dgram_offset| is the offset in + * UDP datagram payload where this QUIC packet is positioned at. + * |type| specifies the type of packet. It can be NGTCP2_PKT_1RTT or + * NGTCP2_PKT_0RTT. * * This function can send new stream data. In order to send stream * data, specify the underlying stream and parameters to @@ -3183,8 +3190,9 @@ static void conn_reset_ppe_pending(ngtcp2_conn *conn) { */ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, - ngtcp2_vmsg *vmsg, uint8_t type, - uint8_t flags, ngtcp2_tstamp ts) { + size_t dgram_offset, ngtcp2_vmsg *vmsg, + uint8_t type, uint8_t flags, + ngtcp2_tstamp ts) { int rv = 0; ngtcp2_crypto_cc *cc = &conn->pkt.cc; ngtcp2_ppe *ppe = &conn->pkt.ppe; @@ -3211,7 +3219,8 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, int require_padding = (flags & NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING) != 0; int write_more = (flags & NGTCP2_WRITE_PKT_FLAG_MORE) != 0; int ppe_pending = (conn->flags & NGTCP2_CONN_FLAG_PPE_PENDING) != 0; - size_t min_pktlen = conn_min_short_pktlen(conn); + size_t min_pktlen = conn_min_pktlen(conn); + int min_padded = 0; int padded = 0; ngtcp2_cc_pkt cc_pkt; uint64_t crypto_offset; @@ -3311,7 +3320,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (conn->local.settings.max_window && conn->tx.last_max_data_ts != UINT64_MAX && ts - conn->tx.last_max_data_ts < - NGTCP2_FLOW_WINDOW_RTT_FACTOR * cstat->smoothed_rtt && + NGTCP2_FLOW_WINDOW_RTT_FACTOR * cstat->smoothed_rtt && conn->local.settings.max_window > conn->rx.window) { target_max_data = NGTCP2_FLOW_WINDOW_SCALING_FACTOR * conn->rx.window; if (target_max_data > conn->local.settings.max_window) { @@ -3336,7 +3345,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, pktns->tx.frq = nfrc; conn->rx.max_offset = conn->rx.unsent_max_offset = - nfrc->fr.max_data.max_data; + nfrc->fr.max_data.max_data; } if (stream_blocked && conn_should_send_max_data(conn)) { @@ -3369,7 +3378,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, pktns->tx.last_pkt_num + 1, pktns_select_pkt_numlen(pktns), version, 0); - ngtcp2_ppe_init(ppe, dest, destlen, cc); + ngtcp2_ppe_init(ppe, dest, destlen, dgram_offset, cc); rv = ngtcp2_ppe_encode_hd(ppe, hd); if (rv != 0) { @@ -3407,8 +3416,8 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } rv = ngtcp2_conn_create_ack_frame( - conn, &ackfr, pktns, type, ts, conn_compute_ack_delay(conn), - conn->local.transport_params.ack_delay_exponent); + conn, &ackfr, pktns, type, ts, conn_compute_ack_delay(conn), + conn->local.transport_params.ack_delay_exponent); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); return rv; @@ -3422,10 +3431,9 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, ngtcp2_acktr_commit_ack(&pktns->acktr); ngtcp2_acktr_add_ack(&pktns->acktr, hd->pkt_num, ackfr->ack.largest_ack); - if (type == NGTCP2_PKT_1RTT) { - conn_handle_unconfirmed_key_update_from_remote( - conn, ackfr->ack.largest_ack, ts); - } + assert(NGTCP2_PKT_1RTT == type); + conn_handle_unconfirmed_key_update_from_remote( + conn, ackfr->ack.largest_ack, ts); pkt_empty = 0; } } @@ -3443,7 +3451,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, switch ((*pfrc)->fr.type) { case NGTCP2_FRAME_RESET_STREAM: strm = - ngtcp2_conn_find_stream(conn, (*pfrc)->fr.reset_stream.stream_id); + ngtcp2_conn_find_stream(conn, (*pfrc)->fr.reset_stream.stream_id); if (strm == NULL || !ngtcp2_strm_require_retransmit_reset_stream(strm)) { frc = *pfrc; @@ -3454,7 +3462,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, break; case NGTCP2_FRAME_STOP_SENDING: strm = - ngtcp2_conn_find_stream(conn, (*pfrc)->fr.stop_sending.stream_id); + ngtcp2_conn_find_stream(conn, (*pfrc)->fr.stop_sending.stream_id); if (strm == NULL || !ngtcp2_strm_require_retransmit_stop_sending(strm)) { frc = *pfrc; @@ -3484,10 +3492,10 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } break; case NGTCP2_FRAME_MAX_STREAM_DATA: - strm = ngtcp2_conn_find_stream(conn, - (*pfrc)->fr.max_stream_data.stream_id); + strm = + ngtcp2_conn_find_stream(conn, (*pfrc)->fr.max_stream_data.stream_id); if (strm == NULL || !ngtcp2_strm_require_retransmit_max_stream_data( - strm, &(*pfrc)->fr.max_stream_data)) { + strm, &(*pfrc)->fr.max_stream_data)) { frc = *pfrc; *pfrc = (*pfrc)->next; ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); @@ -3504,9 +3512,9 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, break; case NGTCP2_FRAME_STREAM_DATA_BLOCKED: strm = ngtcp2_conn_find_stream( - conn, (*pfrc)->fr.stream_data_blocked.stream_id); + conn, (*pfrc)->fr.stream_data_blocked.stream_id); if (strm == NULL || !ngtcp2_strm_require_retransmit_stream_data_blocked( - strm, &(*pfrc)->fr.stream_data_blocked)) { + strm, &(*pfrc)->fr.stream_data_blocked)) { frc = *pfrc; *pfrc = (*pfrc)->next; ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); @@ -3543,7 +3551,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, left = ngtcp2_ppe_left(ppe); crypto_offset = - ngtcp2_strm_streamfrq_unacked_offset(&pktns->crypto.strm); + ngtcp2_strm_streamfrq_unacked_offset(&pktns->crypto.strm); if (crypto_offset == (uint64_t)-1) { ngtcp2_strm_streamfrq_clear(&pktns->crypto.strm); break; @@ -3593,14 +3601,14 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, nfrc->fr.type = NGTCP2_FRAME_RESET_STREAM; nfrc->fr.reset_stream.stream_id = strm->stream_id; nfrc->fr.reset_stream.app_error_code = - strm->tx.reset_stream_app_error_code; + strm->tx.reset_stream_app_error_code; nfrc->fr.reset_stream.final_size = strm->tx.offset; *pfrc = nfrc; strm->flags &= ~NGTCP2_STRM_FLAG_SEND_RESET_STREAM; rv = - conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); + conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); if (rv != 0) { assert(NGTCP2_ERR_NOBUF == rv); @@ -3620,8 +3628,8 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, strm->flags &= ~NGTCP2_STRM_FLAG_SEND_STOP_SENDING; } else { rv = conn_call_stream_stop_sending( - conn, strm->stream_id, strm->tx.stop_sending_app_error_code, - strm->stream_user_data); + conn, strm->stream_id, strm->tx.stop_sending_app_error_code, + strm->stream_user_data); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); @@ -3636,13 +3644,13 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, nfrc->fr.type = NGTCP2_FRAME_STOP_SENDING; nfrc->fr.stop_sending.stream_id = strm->stream_id; nfrc->fr.stop_sending.app_error_code = - strm->tx.stop_sending_app_error_code; + strm->tx.stop_sending_app_error_code; *pfrc = nfrc; strm->flags &= ~NGTCP2_STRM_FLAG_SEND_STOP_SENDING; - rv = conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, - &nfrc->fr); + rv = + conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); if (rv != 0) { assert(NGTCP2_ERR_NOBUF == rv); @@ -3672,7 +3680,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, strm->tx.last_blocked_offset = strm->tx.max_offset; rv = - conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); + conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); if (rv != 0) { assert(NGTCP2_ERR_NOBUF == rv); @@ -3698,10 +3706,10 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (conn->local.settings.max_stream_window && strm->tx.last_max_stream_data_ts != UINT64_MAX && ts - strm->tx.last_max_stream_data_ts < - NGTCP2_FLOW_WINDOW_RTT_FACTOR * cstat->smoothed_rtt && + NGTCP2_FLOW_WINDOW_RTT_FACTOR * cstat->smoothed_rtt && conn->local.settings.max_stream_window > strm->rx.window) { target_max_data = - NGTCP2_FLOW_WINDOW_SCALING_FACTOR * strm->rx.window; + NGTCP2_FLOW_WINDOW_SCALING_FACTOR * strm->rx.window; if (target_max_data > conn->local.settings.max_stream_window) { target_max_data = conn->local.settings.max_stream_window; } @@ -3721,14 +3729,14 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, nfrc->fr.type = NGTCP2_FRAME_MAX_STREAM_DATA; nfrc->fr.max_stream_data.stream_id = strm->stream_id; nfrc->fr.max_stream_data.max_stream_data = - strm->rx.unsent_max_offset + delta; + strm->rx.unsent_max_offset + delta; *pfrc = nfrc; strm->rx.max_offset = strm->rx.unsent_max_offset = - nfrc->fr.max_stream_data.max_stream_data; + nfrc->fr.max_stream_data.max_stream_data; rv = - conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); + conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); if (rv != 0) { assert(NGTCP2_ERR_NOBUF == rv); break; @@ -3806,7 +3814,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (*pfrc == NULL && conn->remote.bidi.unsent_max_streams > conn->remote.bidi.max_streams) { rv = conn_call_extend_max_remote_streams_bidi( - conn, conn->remote.bidi.unsent_max_streams); + conn, conn->remote.bidi.unsent_max_streams); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); return rv; @@ -3838,7 +3846,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (*pfrc == NULL && conn->remote.uni.unsent_max_streams > conn->remote.uni.max_streams) { rv = conn_call_extend_max_remote_streams_uni( - conn, conn->remote.uni.unsent_max_streams); + conn, conn->remote.uni.unsent_max_streams); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); return rv; @@ -3901,8 +3909,8 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (*pfrc == NULL && send_stream && (ndatalen = ngtcp2_pkt_stream_max_datalen( - vmsg->stream.strm->stream_id, vmsg->stream.strm->tx.offset, ndatalen, - left)) != (size_t)-1 && + vmsg->stream.strm->stream_id, vmsg->stream.strm->tx.offset, ndatalen, + left)) != (size_t)-1 && (ndatalen || datalen == 0)) { datacnt = ngtcp2_vec_copy_at_most(data, NGTCP2_MAX_STREAM_DATACNT, vmsg->stream.data, vmsg->stream.datacnt, @@ -3912,7 +3920,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, assert((datacnt == 0 && datalen == 0) || (datacnt && datalen)); rv = ngtcp2_frame_chain_stream_datacnt_objalloc_new( - &nfrc, datacnt, &conn->frc_objalloc, conn->mem); + &nfrc, datacnt, &conn->frc_objalloc, conn->mem); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); return rv; @@ -3944,6 +3952,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, vmsg->stream.strm->tx.offset += ndatalen; conn->tx.offset += ndatalen; + vmsg->stream.strm->flags |= NGTCP2_STRM_FLAG_ANY_SENT; if (fin) { ngtcp2_strm_shutdown(vmsg->stream.strm, NGTCP2_STRM_FLAG_SHUT_WR); @@ -4095,7 +4104,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } keep_alive_expired = - type == NGTCP2_PKT_1RTT && conn_keep_alive_expired(conn, ts); + type == NGTCP2_PKT_1RTT && conn_keep_alive_expired(conn, ts); if (conn->pktns.rtb.probe_pkt_left == 0 && !keep_alive_expired && !require_padding) { @@ -4154,8 +4163,11 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, rtb_entry_flags |= NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING; if (conn->pktns.rtb.probe_pkt_left) { rtb_entry_flags |= NGTCP2_RTB_ENTRY_FLAG_PROBE; + } else { + rtb_entry_flags |= NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING; } pktns->tx.non_ack_pkt_start_ts = UINT64_MAX; + pkt_empty = 0; } } else if (pktns->tx.non_ack_pkt_start_ts == UINT64_MAX) { pktns->tx.non_ack_pkt_start_ts = ts; @@ -4166,19 +4178,19 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, /* TODO Push STREAM frame back to ngtcp2_strm if there is an error before ngtcp2_rtb_entry is safely created and added. */ - if (require_padding || - /* Making full sized packet will help GSO a bit */ - ngtcp2_ppe_left(ppe) < 10) { - lfr.padding.len = ngtcp2_ppe_padding(ppe); - } else if (type == NGTCP2_PKT_1RTT) { - lfr.padding.len = ngtcp2_ppe_padding_size(ppe, min_pktlen); + if (require_padding) { + lfr.padding.len = ngtcp2_ppe_dgram_padding(ppe); } else { - lfr.padding.len = ngtcp2_ppe_padding_hp_sample(ppe); + lfr.padding.len = ngtcp2_ppe_padding_size(ppe, min_pktlen); + min_padded = 1; } if (lfr.padding.len) { + if (!min_padded || + (rtb_entry_flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING)) { + padded = 1; + } lfr.type = NGTCP2_FRAME_PADDING; - padded = 1; ngtcp2_log_tx_fr(&conn->log, hd, &lfr); ngtcp2_qlog_write_frame(&conn->qlog, &lfr); } @@ -4200,11 +4212,11 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, conn_handle_tx_ecn(conn, pi, &rtb_entry_flags, pktns, hd, ts); } - rv = ngtcp2_rtb_entry_objalloc_new(&ent, hd, NULL, ts, (size_t)nwrite, - rtb_entry_flags, - &conn->rtb_entry_objalloc); + rv = + ngtcp2_rtb_entry_objalloc_new(&ent, hd, NULL, ts, (size_t)nwrite, + rtb_entry_flags, &conn->rtb_entry_objalloc); if (rv != 0) { - assert(ngtcp2_err_is_fatal((int)nwrite)); + assert(ngtcp2_err_is_fatal(rv)); return rv; } @@ -4220,7 +4232,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, ts); } - rv = conn_on_pkt_sent(conn, &pktns->rtb, ent); + rv = conn_on_pkt_sent(conn, pktns, ent); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); ngtcp2_rtb_entry_objalloc_del(ent, &conn->rtb_entry_objalloc, @@ -4231,10 +4243,10 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (rtb_entry_flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING) { if (conn->cc.on_pkt_sent) { conn->cc.on_pkt_sent( - &conn->cc, &conn->cstat, - ngtcp2_cc_pkt_init(&cc_pkt, hd->pkt_num, (size_t)nwrite, - NGTCP2_PKTNS_ID_APPLICATION, ts, ent->rst.lost, - ent->rst.tx_in_flight, ent->rst.is_app_limited)); + &conn->cc, &conn->cstat, + ngtcp2_cc_pkt_init(&cc_pkt, hd->pkt_num, (size_t)nwrite, + NGTCP2_PKTNS_ID_APPLICATION, ts, ent->rst.lost, + ent->rst.tx_in_flight, ent->rst.is_app_limited)); } if (conn->flags & NGTCP2_CONN_FLAG_RESTART_IDLE_TIMER_ON_WRITE) { @@ -4269,9 +4281,9 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } ngtcp2_ssize ngtcp2_conn_write_single_frame_pkt( - ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, - uint8_t type, uint8_t flags, const ngtcp2_cid *dcid, ngtcp2_frame *fr, - uint16_t rtb_entry_flags, const ngtcp2_path *path, ngtcp2_tstamp ts) { + ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, + uint8_t type, uint8_t flags, const ngtcp2_cid *dcid, ngtcp2_frame *fr, + uint16_t rtb_entry_flags, const ngtcp2_path *path, ngtcp2_tstamp ts) { int rv; ngtcp2_ppe ppe; ngtcp2_pkt_hd hd; @@ -4332,7 +4344,7 @@ ngtcp2_ssize ngtcp2_conn_write_single_frame_pkt( pktns->tx.last_pkt_num + 1, pktns_select_pkt_numlen(pktns), version, 0); - ngtcp2_ppe_init(&ppe, dest, destlen, &cc); + ngtcp2_ppe_init(&ppe, dest, destlen, 0, &cc); rv = ngtcp2_ppe_encode_hd(&ppe, &hd); if (rv != 0) { @@ -4354,25 +4366,22 @@ ngtcp2_ssize ngtcp2_conn_write_single_frame_pkt( } lfr.type = NGTCP2_FRAME_PADDING; - if (flags & NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING) { - lfr.padding.len = ngtcp2_ppe_padding(&ppe); + if (flags & NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING_FULL) { + lfr.padding.len = ngtcp2_ppe_dgram_padding_size(&ppe, destlen); + } else if (flags & NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING) { + lfr.padding.len = ngtcp2_ppe_dgram_padding(&ppe); } else { switch (fr->type) { case NGTCP2_FRAME_PATH_CHALLENGE: case NGTCP2_FRAME_PATH_RESPONSE: if (!conn->server || destlen >= NGTCP2_MAX_UDP_PAYLOAD_SIZE) { - lfr.padding.len = ngtcp2_ppe_padding(&ppe); + lfr.padding.len = ngtcp2_ppe_dgram_padding(&ppe); } else { lfr.padding.len = 0; } break; default: - if (type == NGTCP2_PKT_1RTT) { - lfr.padding.len = - ngtcp2_ppe_padding_size(&ppe, conn_min_short_pktlen(conn)); - } else { - lfr.padding.len = ngtcp2_ppe_padding_hp_sample(&ppe); - } + lfr.padding.len = ngtcp2_ppe_padding_size(&ppe, conn_min_pktlen(conn)); } } if (lfr.padding.len) { @@ -4402,6 +4411,12 @@ ngtcp2_ssize ngtcp2_conn_write_single_frame_pkt( conn_handle_unconfirmed_key_update_from_remote(conn, fr->ack.largest_ack, ts); } + + if (!(flags & (NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING_FULL | + NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING))) { + padded = 0; + } + break; } @@ -4412,14 +4427,14 @@ ngtcp2_ssize ngtcp2_conn_write_single_frame_pkt( conn_handle_tx_ecn(conn, pi, &rtb_entry_flags, pktns, &hd, ts); } - rv = ngtcp2_rtb_entry_objalloc_new(&rtbent, &hd, NULL, ts, (size_t)nwrite, - rtb_entry_flags, - &conn->rtb_entry_objalloc); + rv = + ngtcp2_rtb_entry_objalloc_new(&rtbent, &hd, NULL, ts, (size_t)nwrite, + rtb_entry_flags, &conn->rtb_entry_objalloc); if (rv != 0) { return rv; } - rv = conn_on_pkt_sent(conn, &pktns->rtb, rtbent); + rv = conn_on_pkt_sent(conn, pktns, rtbent); if (rv != 0) { ngtcp2_rtb_entry_objalloc_del(rtbent, &conn->rtb_entry_objalloc, &conn->frc_objalloc, conn->mem); @@ -4522,6 +4537,10 @@ static int conn_retire_dcid_seq(ngtcp2_conn *conn, uint64_t seq) { ngtcp2_frame_chain *nfrc; int rv; + if (ngtcp2_conn_check_retired_dcid_tracked(conn, seq)) { + return 0; + } + rv = ngtcp2_conn_track_retired_dcid_seq(conn, seq); if (rv != 0) { return rv; @@ -4658,13 +4677,15 @@ static int conn_start_pmtud(ngtcp2_conn *conn) { assert(conn->remote.transport_params->max_udp_payload_size >= NGTCP2_MAX_UDP_PAYLOAD_SIZE); - hard_max_udp_payload_size = (size_t)ngtcp2_min( - conn->remote.transport_params->max_udp_payload_size, - (uint64_t)conn->local.settings.max_tx_udp_payload_size); + hard_max_udp_payload_size = (size_t)ngtcp2_min_uint64( + conn->remote.transport_params->max_udp_payload_size, + (uint64_t)conn->local.settings.max_tx_udp_payload_size); - rv = ngtcp2_pmtud_new(&conn->pmtud, conn->dcid.current.max_udp_payload_size, - hard_max_udp_payload_size, - conn->pktns.tx.last_pkt_num + 1, conn->mem); + rv = + ngtcp2_pmtud_new(&conn->pmtud, conn->dcid.current.max_udp_payload_size, + hard_max_udp_payload_size, conn->pktns.tx.last_pkt_num + 1, + conn->local.settings.pmtud_probes, + conn->local.settings.pmtud_probeslen, conn->mem); if (rv != 0) { return rv; } @@ -4715,12 +4736,11 @@ static ngtcp2_ssize conn_write_pmtud_probe(ngtcp2_conn *conn, lfr.type = NGTCP2_FRAME_PING; nwrite = ngtcp2_conn_write_single_frame_pkt( - conn, pi, dest, probelen, NGTCP2_PKT_1RTT, - NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING, &conn->dcid.current.cid, &lfr, - NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | - NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING | - NGTCP2_RTB_ENTRY_FLAG_PMTUD_PROBE, - NULL, ts); + conn, pi, dest, probelen, NGTCP2_PKT_1RTT, + NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING_FULL, &conn->dcid.current.cid, &lfr, + NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING | + NGTCP2_RTB_ENTRY_FLAG_PMTUD_PROBE, + NULL, ts); if (nwrite < 0) { return nwrite; } @@ -4813,19 +4833,19 @@ static size_t conn_shape_udp_payload(ngtcp2_conn *conn, const ngtcp2_dcid *dcid, assert(conn->remote.transport_params->max_udp_payload_size >= NGTCP2_MAX_UDP_PAYLOAD_SIZE); - payloadlen = - (size_t)ngtcp2_min((uint64_t)payloadlen, - conn->remote.transport_params->max_udp_payload_size); + payloadlen = (size_t)ngtcp2_min_uint64( + (uint64_t)payloadlen, + conn->remote.transport_params->max_udp_payload_size); } payloadlen = - ngtcp2_min(payloadlen, conn->local.settings.max_tx_udp_payload_size); + ngtcp2_min_size(payloadlen, conn->local.settings.max_tx_udp_payload_size); if (conn->local.settings.no_tx_udp_payload_size_shaping) { return payloadlen; } - return ngtcp2_min(payloadlen, dcid->max_udp_payload_size); + return ngtcp2_min_size(payloadlen, dcid->max_udp_payload_size); } static void conn_reset_congestion_state(ngtcp2_conn *conn, ngtcp2_tstamp ts); @@ -4920,15 +4940,15 @@ static ngtcp2_ssize conn_write_path_challenge(ngtcp2_conn *conn, initial_pto = conn_compute_initial_pto(conn, &conn->pktns); timeout = conn_compute_pto(conn, &conn->pktns); - timeout = ngtcp2_max(timeout, initial_pto); + timeout = ngtcp2_max_uint64(timeout, initial_pto); expiry = ts + timeout * (1ULL << pv->round); - destlen = ngtcp2_min(destlen, NGTCP2_MAX_UDP_PAYLOAD_SIZE); + destlen = ngtcp2_min_size(destlen, NGTCP2_MAX_UDP_PAYLOAD_SIZE); if (conn->server) { if (!(pv->dcid.flags & NGTCP2_DCID_FLAG_PATH_VALIDATED)) { tx_left = conn_server_tx_left(conn, &pv->dcid); - destlen = (size_t)ngtcp2_min((uint64_t)destlen, tx_left); + destlen = (size_t)ngtcp2_min_uint64((uint64_t)destlen, tx_left); if (destlen == 0) { return 0; } @@ -4946,10 +4966,10 @@ static ngtcp2_ssize conn_write_path_challenge(ngtcp2_conn *conn, ngtcp2_pv_add_entry(pv, lfr.path_challenge.data, expiry, flags, ts); nwrite = ngtcp2_conn_write_single_frame_pkt( - conn, pi, dest, destlen, NGTCP2_PKT_1RTT, NGTCP2_WRITE_PKT_FLAG_NONE, - &pv->dcid.cid, &lfr, - NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING, - &pv->dcid.ps.path, ts); + conn, pi, dest, destlen, NGTCP2_PKT_1RTT, NGTCP2_WRITE_PKT_FLAG_NONE, + &pv->dcid.cid, &lfr, + NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING, + &pv->dcid.ps.path, ts); if (nwrite <= 0) { return nwrite; } @@ -5039,11 +5059,11 @@ static ngtcp2_ssize conn_write_path_response(ngtcp2_conn *conn, } } - destlen = ngtcp2_min(destlen, NGTCP2_MAX_UDP_PAYLOAD_SIZE); + destlen = ngtcp2_min_size(destlen, NGTCP2_MAX_UDP_PAYLOAD_SIZE); if (conn->server && !(dcid->flags & NGTCP2_DCID_FLAG_PATH_VALIDATED)) { tx_left = conn_server_tx_left(conn, dcid); - destlen = (size_t)ngtcp2_min((uint64_t)destlen, tx_left); + destlen = (size_t)ngtcp2_min_uint64((uint64_t)destlen, tx_left); if (destlen == 0) { return 0; } @@ -5053,9 +5073,8 @@ static ngtcp2_ssize conn_write_path_response(ngtcp2_conn *conn, memcpy(lfr.path_response.data, pcent->data, sizeof(lfr.path_response.data)); nwrite = ngtcp2_conn_write_single_frame_pkt( - conn, pi, dest, destlen, NGTCP2_PKT_1RTT, NGTCP2_WRITE_PKT_FLAG_NONE, - &dcid->cid, &lfr, NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING, &pcent->ps.path, - ts); + conn, pi, dest, destlen, NGTCP2_PKT_1RTT, NGTCP2_WRITE_PKT_FLAG_NONE, + &dcid->cid, &lfr, NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING, &pcent->ps.path, ts); if (nwrite <= 0) { return nwrite; } @@ -5077,10 +5096,10 @@ ngtcp2_ssize ngtcp2_conn_write_pkt_versioned(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_tstamp ts) { return ngtcp2_conn_writev_stream_versioned( - conn, path, pkt_info_version, pi, dest, destlen, - /* pdatalen = */ NULL, NGTCP2_WRITE_STREAM_FLAG_NONE, - /* stream_id = */ -1, - /* datav = */ NULL, /* datavcnt = */ 0, ts); + conn, path, pkt_info_version, pi, dest, destlen, + /* pdatalen = */ NULL, NGTCP2_WRITE_STREAM_FLAG_NONE, + /* stream_id = */ -1, + /* datav = */ NULL, /* datavcnt = */ 0, ts); } /* @@ -5225,8 +5244,8 @@ static int conn_on_retry(ngtcp2_conn *conn, const ngtcp2_pkt_hd *hd, retry.odcid = conn->dcid.current.cid; rv = ngtcp2_pkt_verify_retry_tag( - conn->client_chosen_version, &retry, pkt, pktlen, conn->callbacks.encrypt, - &conn->crypto.retry_aead, &conn->crypto.retry_aead_ctx); + conn->client_chosen_version, &retry, pkt, pktlen, conn->callbacks.encrypt, + &conn->crypto.retry_aead, &conn->crypto.retry_aead_ctx); if (rv != 0) { ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_PKT, "unable to verify Retry packet integrity"); @@ -5303,15 +5322,14 @@ int ngtcp2_conn_detect_lost_pkt(ngtcp2_conn *conn, ngtcp2_pktns *pktns, * conn_recv_ack processes received ACK frame |fr|. |pkt_ts| is the * timestamp when packet is received. |ts| should be the current * time. Usually they are the same, but for buffered packets, - * |pkt_ts| would be earlier than |ts|. + * |pkt_ts| would be earlier than |ts|. This function needs to be + * called after |fr| is validated by ngtcp2_pkt_validate_ack. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGTCP2_ERR_NOMEM * Out of memory - * NGTCP2_ERR_ACK_FRAME - * ACK frame is malformed. * NGTCP2_ERR_PROTO * |fr| acknowledges a packet this endpoint has not sent. * NGTCP2_ERR_CALLBACK_FAILURE @@ -5319,7 +5337,6 @@ int ngtcp2_conn_detect_lost_pkt(ngtcp2_conn *conn, ngtcp2_pktns *pktns, */ static int conn_recv_ack(ngtcp2_conn *conn, ngtcp2_pktns *pktns, ngtcp2_ack *fr, ngtcp2_tstamp pkt_ts, ngtcp2_tstamp ts) { - int rv; ngtcp2_ssize num_acked; ngtcp2_conn_stat *cstat = &conn->cstat; @@ -5327,15 +5344,10 @@ static int conn_recv_ack(ngtcp2_conn *conn, ngtcp2_pktns *pktns, ngtcp2_ack *fr, return NGTCP2_ERR_PROTO; } - rv = ngtcp2_pkt_validate_ack(fr, conn->local.settings.initial_pkt_num); - if (rv != 0) { - return rv; - } - ngtcp2_acktr_recv_ack(&pktns->acktr, fr); - num_acked = ngtcp2_rtb_recv_ack(&pktns->rtb, fr, &conn->cstat, conn, pktns, - pkt_ts, ts); + num_acked = + ngtcp2_rtb_recv_ack(&pktns->rtb, fr, &conn->cstat, conn, pktns, pkt_ts, ts); if (num_acked < 0) { assert(ngtcp2_err_is_fatal((int)num_acked)); return (int)num_acked; @@ -5351,7 +5363,7 @@ static int conn_recv_ack(ngtcp2_conn *conn, ngtcp2_pktns *pktns, ngtcp2_ack *fr, (conn->server || (conn->flags & NGTCP2_CONN_FLAG_SERVER_ADDR_VERIFIED))) { /* Reset PTO count but no less than 2 to avoid frequent probe packet transmission. */ - cstat->pto_count = ngtcp2_min(cstat->pto_count, 2); + cstat->pto_count = ngtcp2_min_size(cstat->pto_count, 2); } ngtcp2_conn_set_loss_detection_timer(conn, ts); @@ -5366,7 +5378,7 @@ static int conn_recv_ack(ngtcp2_conn *conn, ngtcp2_pktns *pktns, ngtcp2_ack *fr, static void assign_recved_ack_delay_unscaled(ngtcp2_ack *fr, uint64_t ack_delay_exponent) { fr->ack_delay_unscaled = - fr->ack_delay * (1ULL << ack_delay_exponent) * NGTCP2_MICROSECONDS; + fr->ack_delay * (1ULL << ack_delay_exponent) * NGTCP2_MICROSECONDS; } /* @@ -5467,7 +5479,7 @@ static int conn_recv_max_stream_data(ngtcp2_conn *conn, * conn_recv_max_data processes received MAX_DATA frame |fr|. */ static void conn_recv_max_data(ngtcp2_conn *conn, const ngtcp2_max_data *fr) { - conn->tx.max_offset = ngtcp2_max(conn->tx.max_offset, fr->max_data); + conn->tx.max_offset = ngtcp2_max_uint64(conn->tx.max_offset, fr->max_data); } /* @@ -5496,7 +5508,7 @@ static int conn_buffer_pkt(ngtcp2_conn *conn, ngtcp2_pktns *pktns, } rv = - ngtcp2_pkt_chain_new(&pc, path, pi, pkt, pktlen, dgramlen, ts, conn->mem); + ngtcp2_pkt_chain_new(&pc, path, pi, pkt, pktlen, dgramlen, ts, conn->mem); if (rv != 0) { return rv; } @@ -5695,8 +5707,8 @@ conn_emit_pending_crypto_data(ngtcp2_conn *conn, offset = rx_offset; rx_offset += datalen; - rv = conn_call_recv_crypto_data(conn, encryption_level, offset, data, - datalen); + rv = + conn_call_recv_crypto_data(conn, encryption_level, offset, data, datalen); if (rv != 0) { return rv; } @@ -5735,7 +5747,7 @@ static int conn_recv_connection_close(ngtcp2_conn *conn, } } - ccerr->reasonlen = ngtcp2_min(fr->reasonlen, NGTCP2_CCERR_MAX_REASONLEN); + ccerr->reasonlen = ngtcp2_min_size(fr->reasonlen, NGTCP2_CCERR_MAX_REASONLEN); ngtcp2_cpymem((uint8_t *)ccerr->reason, fr->reason, ccerr->reasonlen); return 0; @@ -5907,9 +5919,9 @@ static int pktns_commit_recv_pkt_num(ngtcp2_pktns *pktns, int64_t pkt_num, if (pktns->rx.max_ack_eliciting_pkt_num != -1) { if (pkt_num < pktns->rx.max_ack_eliciting_pkt_num) { ngtcp2_acktr_immediate_ack(&pktns->acktr); - } else if (pkt_num > pktns->rx.max_ack_eliciting_pkt_num) { + } else if (pkt_num != pktns->rx.max_ack_eliciting_pkt_num + 1) { r = ngtcp2_gaptr_get_first_gap_after( - &pktns->rx.pngap, (uint64_t)pktns->rx.max_ack_eliciting_pkt_num); + &pktns->rx.pngap, (uint64_t)pktns->rx.max_ack_eliciting_pkt_num); if (r.begin < (uint64_t)pkt_num) { ngtcp2_acktr_immediate_ack(&pktns->acktr); @@ -5977,7 +5989,7 @@ static int vneg_available_versions_includes(const uint8_t *available_versions, } for (i = 0; i < available_versionslen; i += sizeof(uint32_t)) { - available_versions = ngtcp2_get_uint32(&v, available_versions); + available_versions = ngtcp2_get_uint32be(&v, available_versions); if (version == v) { return 1; @@ -6097,6 +6109,7 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, ngtcp2_strm *crypto; ngtcp2_encryption_level encryption_level; int invalid_reserved_bits = 0; + size_t num_ack_processed = 0; if (pktlen == 0) { return 0; @@ -6116,8 +6129,8 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_CON, "buffering 1RTT packet len=%zu", pktlen); - rv = conn_buffer_pkt(conn, &conn->pktns, path, pi, pkt, pktlen, dgramlen, - ts); + rv = + conn_buffer_pkt(conn, &conn->pktns, path, pi, pkt, pktlen, dgramlen, ts); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); return rv; @@ -6158,8 +6171,8 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, "packet was ignored because of mismatched SCID"); return NGTCP2_ERR_DISCARD_PKT; } - rv = conn_on_version_negotiation(conn, &hd, pkt + hdpktlen, - pktlen - hdpktlen); + rv = + conn_on_version_negotiation(conn, &hd, pkt + hdpktlen, pktlen - hdpktlen); if (rv != 0) { if (ngtcp2_err_is_fatal(rv)) { return rv; @@ -6250,7 +6263,7 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, ngtcp2_ssize nread2; /* TODO Avoid to parse header twice. */ nread2 = - conn_recv_pkt(conn, path, pi, pkt, pktlen, dgramlen, pkt_ts, ts); + conn_recv_pkt(conn, path, pi, pkt, pktlen, dgramlen, pkt_ts, ts); if (nread2 < 0) { return nread2; } @@ -6275,8 +6288,8 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, case NGTCP2_PKT_INITIAL: if (!conn->in_pktns) { ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_PKT, - "Initial packet is discarded because keys have been discarded"); + &conn->log, NGTCP2_LOG_EVENT_PKT, + "Initial packet is discarded because keys have been discarded"); return (ngtcp2_ssize)pktlen; } @@ -6285,10 +6298,10 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, if (conn->server) { if (dgramlen < NGTCP2_MAX_UDP_PAYLOAD_SIZE) { ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_PKT, - "Initial packet was ignored because it is included in UDP datagram " - "less than %zu bytes: %zu bytes", - NGTCP2_MAX_UDP_PAYLOAD_SIZE, dgramlen); + &conn->log, NGTCP2_LOG_EVENT_PKT, + "Initial packet was ignored because it is included in UDP datagram " + "less than %zu bytes: %zu bytes", + NGTCP2_MAX_UDP_PAYLOAD_SIZE, dgramlen); return NGTCP2_ERR_DISCARD_PKT; } if (conn->local.settings.tokenlen) { @@ -6329,10 +6342,9 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, /* Install new Initial keys using QUIC version = hd.version */ rv = conn_call_version_negotiation( - conn, hd.version, - (conn->flags & NGTCP2_CONN_FLAG_RECV_RETRY) - ? &conn->dcid.current.cid - : &conn->rcid); + conn, hd.version, + (conn->flags & NGTCP2_CONN_FLAG_RECV_RETRY) ? &conn->dcid.current.cid + : &conn->rcid); if (rv != 0) { return rv; } @@ -6364,8 +6376,8 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, if (!conn->hs_pktns->crypto.rx.ckm) { if (conn->server) { ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_PKT, - "Handshake packet at this point is unexpected and discarded"); + &conn->log, NGTCP2_LOG_EVENT_PKT, + "Handshake packet at this point is unexpected and discarded"); return (ngtcp2_ssize)pktlen; } ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_CON, @@ -6420,8 +6432,8 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, payload = pkt + hdpktlen; payloadlen = hd.len - hd.pkt_numlen; - hd.pkt_num = ngtcp2_pkt_adjust_pkt_num(pktns->rx.max_pkt_num, hd.pkt_num, - hd.pkt_numlen); + hd.pkt_num = + ngtcp2_pkt_adjust_pkt_num(pktns->rx.max_pkt_num, hd.pkt_num, hd.pkt_numlen); if (hd.pkt_num > NGTCP2_MAX_PKT_NUM) { ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_PKT, "pkn=%" PRId64 " is greater than maximum pkn", hd.pkt_num); @@ -6542,6 +6554,13 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, case NGTCP2_FRAME_ACK_ECN: fr->ack.ack_delay = 0; fr->ack.ack_delay_unscaled = 0; + + rv = + ngtcp2_pkt_validate_ack(&fr->ack, conn->local.settings.initial_pkt_num); + if (rv != 0) { + return rv; + } + break; } @@ -6550,6 +6569,9 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, switch (fr->type) { case NGTCP2_FRAME_ACK: case NGTCP2_FRAME_ACK_ECN: + if (num_ack_processed >= NGTCP2_MAX_ACK_PER_PKT) { + break; + } if (!conn->server && hd.type == NGTCP2_PKT_HANDSHAKE) { conn->flags |= NGTCP2_CONN_FLAG_SERVER_ADDR_VERIFIED; } @@ -6557,6 +6579,7 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, if (rv != 0) { return rv; } + ++num_ack_processed; break; case NGTCP2_FRAME_PADDING: break; @@ -6609,8 +6632,8 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, /* Initial and Handshake are always acknowledged without delay. No need to call ngtcp2_acktr_immediate_ack(). */ - rv = ngtcp2_conn_sched_ack(conn, &pktns->acktr, hd.pkt_num, require_ack, - pkt_ts); + rv = + ngtcp2_conn_sched_ack(conn, &pktns->acktr, hd.pkt_num, require_ack, pkt_ts); if (rv != 0) { return rv; } @@ -6661,7 +6684,7 @@ static ngtcp2_ssize conn_recv_handshake_cpkt(ngtcp2_conn *conn, while (pktlen) { nread = - conn_recv_handshake_pkt(conn, path, pi, pkt, pktlen, dgramlen, ts, ts); + conn_recv_handshake_pkt(conn, path, pi, pkt, pktlen, dgramlen, ts, ts); if (nread < 0) { if (ngtcp2_err_is_fatal((int)nread)) { return nread; @@ -6673,7 +6696,7 @@ static ngtcp2_ssize conn_recv_handshake_cpkt(ngtcp2_conn *conn, if ((pkt[0] & NGTCP2_HEADER_FORM_BIT) && pktlen > 4) { /* Not a Version Negotiation packet */ - ngtcp2_get_uint32(&version, &pkt[1]); + ngtcp2_get_uint32be(&version, &pkt[1]); if (ngtcp2_pkt_get_type_long(version, pkt[0]) == NGTCP2_PKT_INITIAL) { if (conn->server) { if (is_unrecoverable_error((int)nread)) { @@ -6738,14 +6761,14 @@ int ngtcp2_conn_init_stream(ngtcp2_conn *conn, ngtcp2_strm *strm, if (bidi_stream(stream_id)) { if (local_stream) { max_rx_offset = - conn->local.transport_params.initial_max_stream_data_bidi_local; + conn->local.transport_params.initial_max_stream_data_bidi_local; max_tx_offset = - conn->remote.transport_params->initial_max_stream_data_bidi_remote; + conn->remote.transport_params->initial_max_stream_data_bidi_remote; } else { max_rx_offset = - conn->local.transport_params.initial_max_stream_data_bidi_remote; + conn->local.transport_params.initial_max_stream_data_bidi_remote; max_tx_offset = - conn->remote.transport_params->initial_max_stream_data_bidi_local; + conn->remote.transport_params->initial_max_stream_data_bidi_local; } } else if (local_stream) { max_rx_offset = 0; @@ -6759,8 +6782,8 @@ int ngtcp2_conn_init_stream(ngtcp2_conn *conn, ngtcp2_strm *strm, max_tx_offset, stream_user_data, &conn->frc_objalloc, conn->mem); - rv = ngtcp2_map_insert(&conn->strms, (ngtcp2_map_key_type)strm->stream_id, - strm); + rv = + ngtcp2_map_insert(&conn->strms, (ngtcp2_map_key_type)strm->stream_id, strm); if (rv != 0) { assert(rv != NGTCP2_ERR_INVALID_ARGUMENT); goto fail; @@ -6833,6 +6856,12 @@ static int conn_emit_pending_stream_data(ngtcp2_conn *conn, ngtcp2_strm *strm, return rv; } + /* ngtcp2_conn_shutdown_stream_read from a callback will free + strm->rx.rob. */ + if (!strm->rx.rob) { + return 0; + } + ngtcp2_rob_pop(strm->rx.rob, rx_offset - datalen, datalen); } } @@ -6901,7 +6930,8 @@ static int conn_recv_crypto(ngtcp2_conn *conn, return 0; } - crypto->rx.last_offset = ngtcp2_max(crypto->rx.last_offset, fr_end_offset); + crypto->rx.last_offset = + ngtcp2_max_uint64(crypto->rx.last_offset, fr_end_offset); /* TODO Before dispatching incoming data to TLS stack, make sure that previous data in previous encryption level has been @@ -6917,14 +6947,14 @@ static int conn_recv_crypto(ngtcp2_conn *conn, rx_offset += datalen; ngtcp2_strm_update_rx_offset(crypto, rx_offset); - rv = conn_call_recv_crypto_data(conn, encryption_level, offset, data, - datalen); + rv = + conn_call_recv_crypto_data(conn, encryption_level, offset, data, datalen); if (rv != 0) { return rv; } - rv = conn_emit_pending_crypto_data(conn, encryption_level, crypto, - rx_offset); + rv = + conn_emit_pending_crypto_data(conn, encryption_level, crypto, rx_offset); if (rv != 0) { return rv; } @@ -7100,7 +7130,8 @@ static int conn_recv_stream(ngtcp2_conn *conn, const ngtcp2_stream *fr) { return NGTCP2_ERR_FINAL_SIZE; } - strm->rx.last_offset = ngtcp2_max(strm->rx.last_offset, fr_end_offset); + strm->rx.last_offset = + ngtcp2_max_uint64(strm->rx.last_offset, fr_end_offset); if (fr_end_offset <= rx_offset) { return 0; @@ -7220,9 +7251,9 @@ handle_max_remote_streams_extension(uint64_t *punsent_max_remote_streams, size_t n) { if ( #if SIZE_MAX > UINT32_MAX - NGTCP2_MAX_STREAMS < n || + NGTCP2_MAX_STREAMS < n || #endif /* SIZE_MAX > UINT32_MAX */ - *punsent_max_remote_streams > (uint64_t)(NGTCP2_MAX_STREAMS - n)) { + *punsent_max_remote_streams > (uint64_t)(NGTCP2_MAX_STREAMS - n)) { *punsent_max_remote_streams = NGTCP2_MAX_STREAMS; } else { *punsent_max_remote_streams += n; @@ -7360,7 +7391,7 @@ static int conn_recv_reset_stream(ngtcp2_conn *conn, which are not passed to application. */ if (!(strm->flags & NGTCP2_STRM_FLAG_STOP_SENDING)) { ngtcp2_conn_extend_max_offset(conn, strm->rx.last_offset - - ngtcp2_strm_rx_offset(strm)); + ngtcp2_strm_rx_offset(strm)); } conn->rx.offset += datalen; @@ -7368,7 +7399,7 @@ static int conn_recv_reset_stream(ngtcp2_conn *conn, strm->rx.last_offset = fr->final_size; strm->flags |= - NGTCP2_STRM_FLAG_SHUT_RD | NGTCP2_STRM_FLAG_RESET_STREAM_RECVED; + NGTCP2_STRM_FLAG_SHUT_RD | NGTCP2_STRM_FLAG_RESET_STREAM_RECVED; ngtcp2_strm_set_app_error_code(strm, fr->app_error_code); @@ -7471,7 +7502,7 @@ static int conn_recv_stop_sending(ngtcp2_conn *conn, } strm->flags |= - NGTCP2_STRM_FLAG_SHUT_WR | NGTCP2_STRM_FLAG_STOP_SENDING_RECVED; + NGTCP2_STRM_FLAG_SHUT_WR | NGTCP2_STRM_FLAG_STOP_SENDING_RECVED; ngtcp2_strm_streamfrq_clear(strm); @@ -7487,7 +7518,7 @@ static int check_stateless_reset(const ngtcp2_dcid *dcid, const ngtcp2_pkt_stateless_reset *sr) { return ngtcp2_path_eq(&dcid->ps.path, path) && ngtcp2_dcid_verify_stateless_reset_token( - dcid, sr->stateless_reset_token) == 0; + dcid, sr->stateless_reset_token) == 0; } /* @@ -7576,7 +7607,7 @@ static int conn_recv_max_streams(ngtcp2_conn *conn, return NGTCP2_ERR_FRAME_ENCODING; } - n = ngtcp2_min(fr->max_streams, NGTCP2_MAX_STREAMS); + n = ngtcp2_min_uint64(fr->max_streams, NGTCP2_MAX_STREAMS); if (fr->type == NGTCP2_FRAME_MAX_STREAMS_BIDI) { if (conn->local.bidi.max_streams < n) { @@ -8221,7 +8252,7 @@ static int conn_recv_handshake_done(ngtcp2_conn *conn, ngtcp2_tstamp ts) { conn->pktns.rtb.persistent_congestion_start_ts = ts; - conn_discard_handshake_state(conn, ts); + ngtcp2_conn_discard_handshake_state(conn, ts); assert(conn->remote.transport_params); @@ -8323,9 +8354,9 @@ static int conn_prepare_key_update(ngtcp2_conn *conn, ngtcp2_tstamp ts) { new_tx_ckm = conn->crypto.key_update.new_tx_ckm; rv = conn_call_update_key( - conn, new_rx_ckm->secret.base, new_tx_ckm->secret.base, &rx_aead_ctx, - new_rx_ckm->iv.base, &tx_aead_ctx, new_tx_ckm->iv.base, - rx_ckm->secret.base, tx_ckm->secret.base, secretlen); + conn, new_rx_ckm->secret.base, new_tx_ckm->secret.base, &rx_aead_ctx, + new_rx_ckm->iv.base, &tx_aead_ctx, new_tx_ckm->iv.base, rx_ckm->secret.base, + tx_ckm->secret.base, secretlen); if (rv != 0) { return rv; } @@ -8340,7 +8371,7 @@ static int conn_prepare_key_update(ngtcp2_conn *conn, ngtcp2_tstamp ts) { if (conn->crypto.key_update.old_rx_ckm) { conn_call_delete_crypto_aead_ctx( - conn, &conn->crypto.key_update.old_rx_ckm->aead_ctx); + conn, &conn->crypto.key_update.old_rx_ckm->aead_ctx); ngtcp2_crypto_km_del(conn->crypto.key_update.old_rx_ckm, conn->mem); conn->crypto.key_update.old_rx_ckm = NULL; } @@ -8412,7 +8443,6 @@ static int conn_recv_non_probing_pkt_on_new_path(ngtcp2_conn *conn, size_t dgramlen, int new_cid_used, ngtcp2_tstamp ts) { - ngtcp2_dcid dcid, *bound_dcid, *last; ngtcp2_pv *pv; int rv; @@ -8448,9 +8478,9 @@ static int conn_recv_non_probing_pkt_on_new_path(ngtcp2_conn *conn, } remote_addr_cmp = - ngtcp2_addr_compare(&conn->dcid.current.ps.path.remote, &path->remote); + ngtcp2_addr_compare(&conn->dcid.current.ps.path.remote, &path->remote); local_addr_eq = - ngtcp2_addr_eq(&conn->dcid.current.ps.path.local, &path->local); + ngtcp2_addr_eq(&conn->dcid.current.ps.path.local, &path->local); /* * When to change DCID? RFC 9002 section 9.5 says: @@ -8480,8 +8510,8 @@ static int conn_recv_non_probing_pkt_on_new_path(ngtcp2_conn *conn, bound_dcid = ngtcp2_ringbuf_get(&conn->dcid.bound.rb, i); if (ngtcp2_path_eq(&bound_dcid->ps.path, path)) { ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_CON, - "Found DCID which has already been bound to the new path"); + &conn->log, NGTCP2_LOG_EVENT_CON, + "Found DCID which has already been bound to the new path"); ngtcp2_dcid_copy(&dcid, bound_dcid); if (i == 0) { @@ -8550,21 +8580,21 @@ static int conn_recv_non_probing_pkt_on_new_path(ngtcp2_conn *conn, pv->fallback_pto = pto; } + ngtcp2_dcid_copy(&conn->dcid.current, &dcid); + if (!local_addr_eq || (remote_addr_cmp & (NGTCP2_ADDR_COMPARE_FLAG_ADDR | NGTCP2_ADDR_COMPARE_FLAG_FAMILY))) { conn_reset_congestion_state(conn, ts); } - ngtcp2_dcid_copy(&conn->dcid.current, &dcid); - conn_reset_ecn_validation_state(conn); ngtcp2_conn_stop_pmtud(conn); if (conn->pv) { ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_PTV, - "path migration is aborted because new migration has started"); + &conn->log, NGTCP2_LOG_EVENT_PTV, + "path migration is aborted because new migration has started"); rv = conn_abort_pv(conn, ts); if (rv != 0) { return rv; @@ -8658,6 +8688,7 @@ conn_recv_delayed_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_pkt_info *pi, int rv; int require_ack = 0; ngtcp2_pktns *pktns; + size_t num_ack_processed = 0; assert(hd->type == NGTCP2_PKT_HANDSHAKE); @@ -8684,6 +8715,13 @@ conn_recv_delayed_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_pkt_info *pi, case NGTCP2_FRAME_ACK_ECN: fr->ack.ack_delay = 0; fr->ack.ack_delay_unscaled = 0; + + rv = + ngtcp2_pkt_validate_ack(&fr->ack, conn->local.settings.initial_pkt_num); + if (rv != 0) { + return rv; + } + break; } @@ -8692,6 +8730,9 @@ conn_recv_delayed_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_pkt_info *pi, switch (fr->type) { case NGTCP2_FRAME_ACK: case NGTCP2_FRAME_ACK_ECN: + if (num_ack_processed >= NGTCP2_MAX_ACK_PER_PKT) { + break; + } if (!conn->server) { conn->flags |= NGTCP2_CONN_FLAG_SERVER_ADDR_VERIFIED; } @@ -8699,6 +8740,7 @@ conn_recv_delayed_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_pkt_info *pi, if (rv != 0) { return rv; } + ++num_ack_processed; break; case NGTCP2_FRAME_PADDING: break; @@ -8762,7 +8804,7 @@ conn_allow_path_change_under_disable_active_migration(ngtcp2_conn *conn, (NAT rebinding). */ if (ngtcp2_addr_eq(&conn->dcid.current.ps.path.local, &path->local)) { remote_addr_cmp = - ngtcp2_addr_compare(&conn->dcid.current.ps.path.remote, &path->remote); + ngtcp2_addr_compare(&conn->dcid.current.ps.path.remote, &path->remote); return (remote_addr_cmp | NGTCP2_ADDR_COMPARE_FLAG_PORT) == NGTCP2_ADDR_COMPARE_FLAG_PORT; @@ -8860,6 +8902,7 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, int recv_ncid = 0; int new_cid_used = 0; int path_challenge_recved = 0; + size_t num_ack_processed = 0; if (conn->server && conn->local.transport_params.disable_active_migration && !ngtcp2_path_eq(&conn->dcid.current.ps.path, path) && @@ -8992,8 +9035,8 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, payload = pkt + hdpktlen; payloadlen = pktlen - hdpktlen; - hd.pkt_num = ngtcp2_pkt_adjust_pkt_num(pktns->rx.max_pkt_num, hd.pkt_num, - hd.pkt_numlen); + hd.pkt_num = + ngtcp2_pkt_adjust_pkt_num(pktns->rx.max_pkt_num, hd.pkt_num, hd.pkt_numlen); if (hd.pkt_num > NGTCP2_MAX_PKT_NUM) { ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_PKT, "pkn=%" PRId64 " is greater than maximum pkn", hd.pkt_num); @@ -9057,7 +9100,7 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, if (hd.type == NGTCP2_PKT_1RTT && ++conn->crypto.decryption_failure_count >= - pktns->crypto.ctx.max_decryption_failure) { + pktns->crypto.ctx.max_decryption_failure) { return NGTCP2_ERR_AEAD_LIMIT_REACHED; } @@ -9163,7 +9206,14 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, } assert(conn->remote.transport_params); assign_recved_ack_delay_unscaled( - &fr->ack, conn->remote.transport_params->ack_delay_exponent); + &fr->ack, conn->remote.transport_params->ack_delay_exponent); + + rv = + ngtcp2_pkt_validate_ack(&fr->ack, conn->local.settings.initial_pkt_num); + if (rv != 0) { + return rv; + } + break; } @@ -9210,6 +9260,9 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, switch (fr->type) { case NGTCP2_FRAME_ACK: case NGTCP2_FRAME_ACK_ECN: + if (num_ack_processed >= NGTCP2_MAX_ACK_PER_PKT) { + break; + } if (!conn->server) { conn->flags |= NGTCP2_CONN_FLAG_SERVER_ADDR_VERIFIED; } @@ -9218,6 +9271,7 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, return rv; } non_probing_pkt = 1; + ++num_ack_processed; break; case NGTCP2_FRAME_STREAM: rv = conn_recv_stream(conn, &fr->stream); @@ -9429,8 +9483,8 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, ngtcp2_acktr_immediate_ack(&pktns->acktr); } - rv = ngtcp2_conn_sched_ack(conn, &pktns->acktr, hd.pkt_num, require_ack, - pkt_ts); + rv = + ngtcp2_conn_sched_ack(conn, &pktns->acktr, hd.pkt_num, require_ack, pkt_ts); if (rv != 0) { return rv; } @@ -9602,8 +9656,8 @@ static int conn_handshake_completed(ngtcp2_conn *conn) { } } if (conn->local.uni.max_streams > 0) { - rv = conn_call_extend_max_local_streams_uni(conn, - conn->local.uni.max_streams); + rv = + conn_call_extend_max_local_streams_uni(conn, conn->local.uni.max_streams); if (rv != 0) { return rv; } @@ -9822,7 +9876,7 @@ static ngtcp2_ssize conn_read_handshake(ngtcp2_conn *conn, } if (conn->hs_pktns->rx.max_pkt_num != -1) { - conn_discard_initial_state(conn, ts); + ngtcp2_conn_discard_initial_state(conn, ts); } if (!conn_is_tls_handshake_completed(conn)) { @@ -9888,7 +9942,7 @@ static ngtcp2_ssize conn_read_handshake(ngtcp2_conn *conn, return rv; } - conn_discard_handshake_state(conn, ts); + ngtcp2_conn_discard_handshake_state(conn, ts); rv = conn_enqueue_handshake_done(conn); if (rv != 0) { @@ -9936,7 +9990,7 @@ int ngtcp2_conn_read_pkt_versioned(ngtcp2_conn *conn, const ngtcp2_path *path, pktlen); if (pktlen == 0) { - return NGTCP2_ERR_INVALID_ARGUMENT; + return 0; } /* client does not expect a packet from unknown path. */ @@ -10035,13 +10089,12 @@ static int conn_check_pkt_num_exhausted(ngtcp2_conn *conn) { * conn_retransmit_retry_early retransmits 0RTT packet after Retry is * received from server. */ -static ngtcp2_ssize conn_retransmit_retry_early(ngtcp2_conn *conn, - ngtcp2_pkt_info *pi, - uint8_t *dest, size_t destlen, - uint8_t flags, - ngtcp2_tstamp ts) { - return conn_write_pkt(conn, pi, dest, destlen, NULL, NGTCP2_PKT_0RTT, flags, - ts); +static ngtcp2_ssize +conn_retransmit_retry_early(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, + uint8_t *dest, size_t destlen, size_t dgram_offset, + uint8_t flags, ngtcp2_tstamp ts) { + return conn_write_pkt(conn, pi, dest, destlen, dgram_offset, NULL, + NGTCP2_PKT_0RTT, flags, ts); } /* @@ -10067,21 +10120,21 @@ static int conn_validate_early_transport_params_limits(ngtcp2_conn *conn) { assert(params); if (conn->early.transport_params.active_connection_id_limit > - params->active_connection_id_limit || + params->active_connection_id_limit || conn->early.transport_params.initial_max_data > - params->initial_max_data || + params->initial_max_data || conn->early.transport_params.initial_max_stream_data_bidi_local > - params->initial_max_stream_data_bidi_local || + params->initial_max_stream_data_bidi_local || conn->early.transport_params.initial_max_stream_data_bidi_remote > - params->initial_max_stream_data_bidi_remote || + params->initial_max_stream_data_bidi_remote || conn->early.transport_params.initial_max_stream_data_uni > - params->initial_max_stream_data_uni || + params->initial_max_stream_data_uni || conn->early.transport_params.initial_max_streams_bidi > - params->initial_max_streams_bidi || + params->initial_max_streams_bidi || conn->early.transport_params.initial_max_streams_uni > - params->initial_max_streams_uni || + params->initial_max_streams_uni || conn->early.transport_params.max_datagram_frame_size > - params->max_datagram_frame_size) { + params->max_datagram_frame_size) { return NGTCP2_ERR_PROTO; } @@ -10131,14 +10184,14 @@ static ngtcp2_ssize conn_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (!(conn->flags & NGTCP2_CONN_FLAG_RECV_RETRY)) { nwrite = - conn_write_client_initial(conn, pi, dest, destlen, write_datalen, ts); + conn_write_client_initial(conn, pi, dest, destlen, write_datalen, ts); if (nwrite <= 0) { return nwrite; } } else { - nwrite = conn_write_handshake_pkt( - conn, pi, dest, destlen, NGTCP2_PKT_INITIAL, - NGTCP2_WRITE_PKT_FLAG_NONE, write_datalen, ts); + nwrite = + conn_write_handshake_pkt(conn, pi, dest, destlen, 0, NGTCP2_PKT_INITIAL, + NGTCP2_WRITE_PKT_FLAG_NONE, write_datalen, ts); if (nwrite < 0) { return nwrite; } @@ -10146,10 +10199,10 @@ static ngtcp2_ssize conn_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (pending_early_datalen) { early_spktlen = conn_retransmit_retry_early( - conn, pi, dest + nwrite, destlen - (size_t)nwrite, - nwrite ? NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING - : NGTCP2_WRITE_PKT_FLAG_NONE, - ts); + conn, pi, dest + nwrite, destlen - (size_t)nwrite, (size_t)nwrite, + nwrite ? NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING + : NGTCP2_WRITE_PKT_FLAG_NONE, + ts); if (early_spktlen < 0) { assert(ngtcp2_err_is_fatal((int)early_spktlen)); @@ -10163,6 +10216,8 @@ static ngtcp2_ssize conn_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, return res; case NGTCP2_CS_CLIENT_WAIT_HANDSHAKE: + pending_early_datalen = 0; + if (!conn_handshake_probe_left(conn) && conn_cwnd_is_zero(conn)) { destlen = 0; } else { @@ -10174,7 +10229,7 @@ static ngtcp2_ssize conn_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } nwrite = - conn_write_handshake_pkts(conn, pi, dest, destlen, write_datalen, ts); + conn_write_handshake_pkts(conn, pi, dest, destlen, write_datalen, ts); if (nwrite < 0) { return nwrite; } @@ -10185,9 +10240,10 @@ static ngtcp2_ssize conn_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } if (!conn_is_tls_handshake_completed(conn)) { - if (!(conn->flags & NGTCP2_CONN_FLAG_EARLY_DATA_REJECTED)) { - nwrite = conn_retransmit_retry_early(conn, pi, dest, destlen, - NGTCP2_WRITE_PKT_FLAG_NONE, ts); + if (pending_early_datalen && + !(conn->flags & NGTCP2_CONN_FLAG_EARLY_DATA_REJECTED)) { + nwrite = conn_retransmit_retry_early( + conn, pi, dest, destlen, (size_t)res, NGTCP2_WRITE_PKT_FLAG_NONE, ts); if (nwrite < 0) { return nwrite; } @@ -10250,8 +10306,8 @@ static ngtcp2_ssize conn_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, assert(conn->dcid.current.seq == 0); assert(!(conn->dcid.current.flags & NGTCP2_DCID_FLAG_TOKEN_PRESENT)); ngtcp2_dcid_set_token( - &conn->dcid.current, - conn->remote.transport_params->stateless_reset_token); + &conn->dcid.current, + conn->remote.transport_params->stateless_reset_token); } rv = conn_call_activate_dcid(conn, &conn->dcid.current); @@ -10271,7 +10327,7 @@ static ngtcp2_ssize conn_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, return res; case NGTCP2_CS_SERVER_INITIAL: nwrite = - conn_write_handshake_pkts(conn, pi, dest, destlen, write_datalen, ts); + conn_write_handshake_pkts(conn, pi, dest, destlen, write_datalen, ts); if (nwrite < 0) { return nwrite; } @@ -10284,7 +10340,7 @@ static ngtcp2_ssize conn_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, case NGTCP2_CS_SERVER_WAIT_HANDSHAKE: if (conn_handshake_probe_left(conn) || !conn_cwnd_is_zero(conn)) { nwrite = - conn_write_handshake_pkts(conn, pi, dest, destlen, write_datalen, ts); + conn_write_handshake_pkts(conn, pi, dest, destlen, write_datalen, ts); if (nwrite < 0) { return nwrite; } @@ -10363,8 +10419,8 @@ static ngtcp2_ssize conn_client_write_handshake(ngtcp2_conn *conn, datalen = ngtcp2_vec_len(vmsg->stream.data, vmsg->stream.datacnt); send_stream = conn_retry_early_payloadlen(conn) == 0; if (send_stream) { - write_datalen = ngtcp2_min(datalen + NGTCP2_STREAM_OVERHEAD, - NGTCP2_MIN_COALESCED_PAYLOADLEN); + write_datalen = ngtcp2_min_uint64(datalen + NGTCP2_STREAM_OVERHEAD, + NGTCP2_MIN_COALESCED_PAYLOADLEN); if (vmsg->stream.flags & NGTCP2_WRITE_STREAM_FLAG_MORE) { wflags |= NGTCP2_WRITE_PKT_FLAG_MORE; @@ -10428,8 +10484,8 @@ static ngtcp2_ssize conn_client_write_handshake(ngtcp2_conn *conn, return spktlen; } - early_spktlen = conn_write_pkt(conn, pi, dest, destlen, vmsg, NGTCP2_PKT_0RTT, - wflags, ts); + early_spktlen = conn_write_pkt(conn, pi, dest, destlen, (size_t)spktlen, vmsg, + NGTCP2_PKT_0RTT, wflags, ts); if (early_spktlen < 0) { switch (early_spktlen) { case NGTCP2_ERR_STREAM_DATA_BLOCKED: @@ -10520,10 +10576,10 @@ int ngtcp2_accept(ngtcp2_pkt_hd *dest, const uint8_t *pkt, size_t pktlen) { } int ngtcp2_conn_install_initial_key( - ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *rx_aead_ctx, - const uint8_t *rx_iv, const ngtcp2_crypto_cipher_ctx *rx_hp_ctx, - const ngtcp2_crypto_aead_ctx *tx_aead_ctx, const uint8_t *tx_iv, - const ngtcp2_crypto_cipher_ctx *tx_hp_ctx, size_t ivlen) { + ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *rx_aead_ctx, + const uint8_t *rx_iv, const ngtcp2_crypto_cipher_ctx *rx_hp_ctx, + const ngtcp2_crypto_aead_ctx *tx_aead_ctx, const uint8_t *tx_iv, + const ngtcp2_crypto_cipher_ctx *tx_hp_ctx, size_t ivlen) { ngtcp2_pktns *pktns = conn->in_pktns; int rv; @@ -10571,11 +10627,11 @@ int ngtcp2_conn_install_initial_key( } int ngtcp2_conn_install_vneg_initial_key( - ngtcp2_conn *conn, uint32_t version, - const ngtcp2_crypto_aead_ctx *rx_aead_ctx, const uint8_t *rx_iv, - const ngtcp2_crypto_cipher_ctx *rx_hp_ctx, - const ngtcp2_crypto_aead_ctx *tx_aead_ctx, const uint8_t *tx_iv, - const ngtcp2_crypto_cipher_ctx *tx_hp_ctx, size_t ivlen) { + ngtcp2_conn *conn, uint32_t version, + const ngtcp2_crypto_aead_ctx *rx_aead_ctx, const uint8_t *rx_iv, + const ngtcp2_crypto_cipher_ctx *rx_hp_ctx, + const ngtcp2_crypto_aead_ctx *tx_aead_ctx, const uint8_t *tx_iv, + const ngtcp2_crypto_cipher_ctx *tx_hp_ctx, size_t ivlen) { int rv; assert(ivlen >= 8); @@ -10622,8 +10678,8 @@ int ngtcp2_conn_install_vneg_initial_key( } int ngtcp2_conn_install_rx_handshake_key( - ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, - const uint8_t *iv, size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx) { + ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, + size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx) { ngtcp2_pktns *pktns = conn->hs_pktns; int rv; @@ -10654,8 +10710,8 @@ int ngtcp2_conn_install_rx_handshake_key( } int ngtcp2_conn_install_tx_handshake_key( - ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, - const uint8_t *iv, size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx) { + ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, + size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx) { ngtcp2_pktns *pktns = conn->hs_pktns; int rv; @@ -10905,7 +10961,7 @@ ngtcp2_tstamp ngtcp2_conn_loss_detection_expiry(ngtcp2_conn *conn) { } ngtcp2_tstamp ngtcp2_conn_internal_expiry(ngtcp2_conn *conn) { - ngtcp2_tstamp res = UINT64_MAX, t; + ngtcp2_tstamp res = UINT64_MAX; ngtcp2_duration pto = conn_compute_pto(conn, &conn->pktns); ngtcp2_scid *scid; ngtcp2_dcid *dcid; @@ -10916,21 +10972,19 @@ ngtcp2_tstamp ngtcp2_conn_internal_expiry(ngtcp2_conn *conn) { } if (conn->pmtud) { - res = ngtcp2_min(res, conn->pmtud->expiry); + res = ngtcp2_min_uint64(res, conn->pmtud->expiry); } if (!ngtcp2_pq_empty(&conn->scid.used)) { scid = ngtcp2_struct_of(ngtcp2_pq_top(&conn->scid.used), ngtcp2_scid, pe); if (scid->retired_ts != UINT64_MAX) { - t = scid->retired_ts + pto; - res = ngtcp2_min(res, t); + res = ngtcp2_min_uint64(res, scid->retired_ts + pto); } } if (ngtcp2_ringbuf_len(&conn->dcid.retired.rb)) { dcid = ngtcp2_ringbuf_get(&conn->dcid.retired.rb, 0); - t = dcid->retired_ts + pto; - res = ngtcp2_min(res, t); + res = ngtcp2_min_uint64(res, dcid->retired_ts + pto); } if (conn->dcid.current.cid.datalen) { @@ -10941,15 +10995,13 @@ ngtcp2_tstamp ngtcp2_conn_internal_expiry(ngtcp2_conn *conn) { assert(dcid->cid.datalen); assert(dcid->bound_ts != UINT64_MAX); - t = dcid->bound_ts + 3 * pto; - res = ngtcp2_min(res, t); + res = ngtcp2_min_uint64(res, dcid->bound_ts + 3 * pto); } } if (conn->server && conn->early.ckm && conn->early.discard_started_ts != UINT64_MAX) { - t = conn->early.discard_started_ts + 3 * pto; - res = ngtcp2_min(res, t); + res = ngtcp2_min_uint64(res, conn->early.discard_started_ts + 3 * pto); } return res; @@ -10969,7 +11021,7 @@ static ngtcp2_tstamp conn_handshake_expiry(ngtcp2_conn *conn) { if (conn_is_tls_handshake_completed(conn) || conn->local.settings.handshake_timeout == UINT64_MAX || conn->local.settings.initial_ts >= - UINT64_MAX - conn->local.settings.handshake_timeout) { + UINT64_MAX - conn->local.settings.handshake_timeout) { return UINT64_MAX; } @@ -10978,20 +11030,14 @@ static ngtcp2_tstamp conn_handshake_expiry(ngtcp2_conn *conn) { } ngtcp2_tstamp ngtcp2_conn_get_expiry(ngtcp2_conn *conn) { - ngtcp2_tstamp t1 = ngtcp2_conn_loss_detection_expiry(conn); - ngtcp2_tstamp t2 = ngtcp2_conn_ack_delay_expiry(conn); - ngtcp2_tstamp t3 = ngtcp2_conn_internal_expiry(conn); - ngtcp2_tstamp t4 = ngtcp2_conn_lost_pkt_expiry(conn); - ngtcp2_tstamp t5 = conn_keep_alive_expiry(conn); - ngtcp2_tstamp t6 = conn_handshake_expiry(conn); - ngtcp2_tstamp t7 = ngtcp2_conn_get_idle_expiry(conn); - ngtcp2_tstamp res = ngtcp2_min(t1, t2); - res = ngtcp2_min(res, t3); - res = ngtcp2_min(res, t4); - res = ngtcp2_min(res, t5); - res = ngtcp2_min(res, t6); - res = ngtcp2_min(res, t7); - return ngtcp2_min(res, conn->tx.pacing.next_ts); + ngtcp2_tstamp res = ngtcp2_min_uint64(ngtcp2_conn_loss_detection_expiry(conn), + ngtcp2_conn_ack_delay_expiry(conn)); + res = ngtcp2_min_uint64(res, ngtcp2_conn_internal_expiry(conn)); + res = ngtcp2_min_uint64(res, ngtcp2_conn_lost_pkt_expiry(conn)); + res = ngtcp2_min_uint64(res, conn_keep_alive_expiry(conn)); + res = ngtcp2_min_uint64(res, conn_handshake_expiry(conn)); + res = ngtcp2_min_uint64(res, ngtcp2_conn_get_idle_expiry(conn)); + return ngtcp2_min_uint64(res, conn->tx.pacing.next_ts); } int ngtcp2_conn_handle_expiry(ngtcp2_conn *conn, ngtcp2_tstamp ts) { @@ -11089,7 +11135,7 @@ ngtcp2_tstamp ngtcp2_conn_lost_pkt_expiry(ngtcp2_conn *conn) { ts = ngtcp2_rtb_lost_pkt_ts(&conn->in_pktns->rtb); if (ts != UINT64_MAX) { ts += conn_compute_pto(conn, conn->in_pktns); - res = ngtcp2_min(res, ts); + res = ngtcp2_min_uint64(res, ts); } } @@ -11097,14 +11143,14 @@ ngtcp2_tstamp ngtcp2_conn_lost_pkt_expiry(ngtcp2_conn *conn) { ts = ngtcp2_rtb_lost_pkt_ts(&conn->hs_pktns->rtb); if (ts != UINT64_MAX) { ts += conn_compute_pto(conn, conn->hs_pktns); - res = ngtcp2_min(res, ts); + res = ngtcp2_min_uint64(res, ts); } } ts = ngtcp2_rtb_lost_pkt_ts(&conn->pktns.rtb); if (ts != UINT64_MAX) { ts += conn_compute_pto(conn, &conn->pktns); - res = ngtcp2_min(res, ts); + res = ngtcp2_min_uint64(res, ts); } return res; @@ -11153,7 +11199,7 @@ static uint32_t select_preferred_version(const uint32_t *preferred_versions, } for (j = 0, p = available_versions; j < available_versionslen; j += sizeof(uint32_t)) { - p = ngtcp2_get_uint32(&v, p); + p = ngtcp2_get_uint32be(&v, p); if (preferred_versions[i] == v) { return v; @@ -11266,13 +11312,13 @@ ngtcp2_conn_server_negotiate_version(ngtcp2_conn *conn, assert(conn->client_chosen_version == version_info->chosen_version); return select_preferred_version( - conn->vneg.preferred_versions, conn->vneg.preferred_versionslen, - version_info->chosen_version, version_info->available_versions, - version_info->available_versionslen, version_info->chosen_version); + conn->vneg.preferred_versions, conn->vneg.preferred_versionslen, + version_info->chosen_version, version_info->available_versions, + version_info->available_versionslen, version_info->chosen_version); } int ngtcp2_conn_set_remote_transport_params( - ngtcp2_conn *conn, const ngtcp2_transport_params *params) { + ngtcp2_conn *conn, const ngtcp2_transport_params *params) { int rv; /* We expect this function is called once per QUIC connection, but @@ -11315,9 +11361,9 @@ int ngtcp2_conn_set_remote_transport_params( if (params->version_info_present) { if (!vneg_available_versions_includes( - params->version_info.available_versions, - params->version_info.available_versionslen, - params->version_info.chosen_version)) { + params->version_info.available_versions, + params->version_info.available_versionslen, + params->version_info.chosen_version)) { return NGTCP2_ERR_TRANSPORT_PARAM; } @@ -11326,7 +11372,7 @@ int ngtcp2_conn_set_remote_transport_params( } conn->negotiated_version = - ngtcp2_conn_server_negotiate_version(conn, ¶ms->version_info); + ngtcp2_conn_server_negotiate_version(conn, ¶ms->version_info); if (conn->negotiated_version != conn->client_chosen_version) { rv = conn_call_version_negotiation(conn, conn->negotiated_version, &conn->rcid); @@ -11339,7 +11385,7 @@ int ngtcp2_conn_set_remote_transport_params( } conn->local.transport_params.version_info.chosen_version = - conn->negotiated_version; + conn->negotiated_version; ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_CON, "the negotiated version is 0x%08x", @@ -11372,7 +11418,7 @@ int ngtcp2_conn_set_remote_transport_params( assert(!conn->remote.pending_transport_params); rv = ngtcp2_transport_params_copy_new( - &conn->remote.pending_transport_params, params, conn->mem); + &conn->remote.pending_transport_params, params, conn->mem); if (rv != 0) { return rv; } @@ -11424,9 +11470,9 @@ ngtcp2_ssize ngtcp2_conn_encode_0rtt_transport_params(ngtcp2_conn *conn, params.initial_max_streams_bidi = src->initial_max_streams_bidi; params.initial_max_streams_uni = src->initial_max_streams_uni; params.initial_max_stream_data_bidi_local = - src->initial_max_stream_data_bidi_local; + src->initial_max_stream_data_bidi_local; params.initial_max_stream_data_bidi_remote = - src->initial_max_stream_data_bidi_remote; + src->initial_max_stream_data_bidi_remote; params.initial_max_stream_data_uni = src->initial_max_stream_data_uni; params.initial_max_data = src->initial_max_data; params.active_connection_id_limit = src->active_connection_id_limit; @@ -11456,7 +11502,7 @@ int ngtcp2_conn_decode_and_set_0rtt_transport_params(ngtcp2_conn *conn, } int ngtcp2_conn_set_0rtt_remote_transport_params( - ngtcp2_conn *conn, const ngtcp2_transport_params *params) { + ngtcp2_conn *conn, const ngtcp2_transport_params *params) { ngtcp2_transport_params *p; assert(!conn->server); @@ -11475,41 +11521,41 @@ int ngtcp2_conn_set_0rtt_remote_transport_params( p->initial_max_streams_bidi = params->initial_max_streams_bidi; p->initial_max_streams_uni = params->initial_max_streams_uni; p->initial_max_stream_data_bidi_local = - params->initial_max_stream_data_bidi_local; + params->initial_max_stream_data_bidi_local; p->initial_max_stream_data_bidi_remote = - params->initial_max_stream_data_bidi_remote; + params->initial_max_stream_data_bidi_remote; p->initial_max_stream_data_uni = params->initial_max_stream_data_uni; p->initial_max_data = params->initial_max_data; /* we might hit garbage, then set the sane default. */ p->active_connection_id_limit = - ngtcp2_max(NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT, - params->active_connection_id_limit); + ngtcp2_max_uint64(NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT, + params->active_connection_id_limit); p->max_datagram_frame_size = params->max_datagram_frame_size; /* we might hit garbage, then set the sane default. */ if (params->max_udp_payload_size) { - p->max_udp_payload_size = - ngtcp2_max(NGTCP2_MAX_UDP_PAYLOAD_SIZE, params->max_udp_payload_size); + p->max_udp_payload_size = ngtcp2_max_uint64(NGTCP2_MAX_UDP_PAYLOAD_SIZE, + params->max_udp_payload_size); } /* These parameters are treated specially. If server accepts early data, it must not set values for these parameters that are smaller than these remembered values. */ conn->early.transport_params.initial_max_streams_bidi = - params->initial_max_streams_bidi; + params->initial_max_streams_bidi; conn->early.transport_params.initial_max_streams_uni = - params->initial_max_streams_uni; + params->initial_max_streams_uni; conn->early.transport_params.initial_max_stream_data_bidi_local = - params->initial_max_stream_data_bidi_local; + params->initial_max_stream_data_bidi_local; conn->early.transport_params.initial_max_stream_data_bidi_remote = - params->initial_max_stream_data_bidi_remote; + params->initial_max_stream_data_bidi_remote; conn->early.transport_params.initial_max_stream_data_uni = - params->initial_max_stream_data_uni; + params->initial_max_stream_data_uni; conn->early.transport_params.initial_max_data = params->initial_max_data; conn->early.transport_params.active_connection_id_limit = - params->active_connection_id_limit; + params->active_connection_id_limit; conn->early.transport_params.max_datagram_frame_size = - params->max_datagram_frame_size; + params->max_datagram_frame_size; conn_sync_stream_id_limit(conn); @@ -11522,12 +11568,12 @@ int ngtcp2_conn_set_0rtt_remote_transport_params( } int ngtcp2_conn_set_local_transport_params_versioned( - ngtcp2_conn *conn, int transport_params_version, - const ngtcp2_transport_params *params) { + ngtcp2_conn *conn, int transport_params_version, + const ngtcp2_transport_params *params) { ngtcp2_transport_params paramsbuf; params = ngtcp2_transport_params_convert_to_latest( - ¶msbuf, transport_params_version, params); + ¶msbuf, transport_params_version, params); assert(conn->server); assert(params->active_connection_id_limit >= @@ -11576,7 +11622,7 @@ int ngtcp2_conn_commit_local_transport_params(ngtcp2_conn *conn) { } conn->rx.window = conn->rx.unsent_max_offset = conn->rx.max_offset = - params->initial_max_data; + params->initial_max_data; conn->remote.bidi.unsent_max_streams = params->initial_max_streams_bidi; conn->remote.bidi.max_streams = params->initial_max_streams_bidi; conn->remote.uni.unsent_max_streams = params->initial_max_streams_uni; @@ -11662,10 +11708,10 @@ ngtcp2_strm *ngtcp2_conn_find_stream(ngtcp2_conn *conn, int64_t stream_id) { } ngtcp2_ssize ngtcp2_conn_write_stream_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_ssize *pdatalen, - uint32_t flags, int64_t stream_id, const uint8_t *data, size_t datalen, - ngtcp2_tstamp ts) { + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_ssize *pdatalen, + uint32_t flags, int64_t stream_id, const uint8_t *data, size_t datalen, + ngtcp2_tstamp ts) { ngtcp2_vec datav; datav.len = datalen; @@ -11693,9 +11739,10 @@ static ngtcp2_ssize conn_write_vmsg_wrapper(ngtcp2_conn *conn, if (cstat->bytes_in_flight >= cstat->cwnd) { conn->rst.is_cwnd_limited = 1; - } - - if (nwrite == 0 && cstat->bytes_in_flight < cstat->cwnd) { + } else if ((cstat->cwnd >= cstat->ssthresh || + cstat->bytes_in_flight * 2 < cstat->cwnd) && + nwrite == 0 && conn_pacing_pkt_tx_allowed(conn, ts) && + (conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED)) { conn->rst.app_limited = conn->rst.delivered + cstat->bytes_in_flight; if (conn->rst.app_limited == 0) { @@ -11707,10 +11754,10 @@ static ngtcp2_ssize conn_write_vmsg_wrapper(ngtcp2_conn *conn, } ngtcp2_ssize ngtcp2_conn_writev_stream_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_ssize *pdatalen, - uint32_t flags, int64_t stream_id, const ngtcp2_vec *datav, size_t datavcnt, - ngtcp2_tstamp ts) { + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, ngtcp2_ssize *pdatalen, + uint32_t flags, int64_t stream_id, const ngtcp2_vec *datav, size_t datavcnt, + ngtcp2_tstamp ts) { ngtcp2_vmsg vmsg, *pvmsg; ngtcp2_strm *strm; int64_t datalen; @@ -11734,19 +11781,24 @@ ngtcp2_ssize ngtcp2_conn_writev_stream_versioned( return NGTCP2_ERR_INVALID_ARGUMENT; } - if ((uint64_t)datalen > NGTCP2_MAX_VARINT - strm->tx.offset || - (uint64_t)datalen > NGTCP2_MAX_VARINT - conn->tx.offset) { - return NGTCP2_ERR_INVALID_ARGUMENT; - } + if (datalen == 0 && !(flags & NGTCP2_WRITE_STREAM_FLAG_FIN) && + (strm->flags & NGTCP2_STRM_FLAG_ANY_SENT)) { + pvmsg = NULL; + } else { + if ((uint64_t)datalen > NGTCP2_MAX_VARINT - strm->tx.offset || + (uint64_t)datalen > NGTCP2_MAX_VARINT - conn->tx.offset) { + return NGTCP2_ERR_INVALID_ARGUMENT; + } - vmsg.type = NGTCP2_VMSG_TYPE_STREAM; - vmsg.stream.strm = strm; - vmsg.stream.flags = flags; - vmsg.stream.data = datav; - vmsg.stream.datacnt = datavcnt; - vmsg.stream.pdatalen = pdatalen; + vmsg.type = NGTCP2_VMSG_TYPE_STREAM; + vmsg.stream.strm = strm; + vmsg.stream.flags = flags; + vmsg.stream.data = datav; + vmsg.stream.datacnt = datavcnt; + vmsg.stream.pdatalen = pdatalen; - pvmsg = &vmsg; + pvmsg = &vmsg; + } } else { pvmsg = NULL; } @@ -11756,10 +11808,10 @@ ngtcp2_ssize ngtcp2_conn_writev_stream_versioned( } ngtcp2_ssize ngtcp2_conn_write_datagram_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, int *paccepted, - uint32_t flags, uint64_t dgram_id, const uint8_t *data, size_t datalen, - ngtcp2_tstamp ts) { + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, int *paccepted, + uint32_t flags, uint64_t dgram_id, const uint8_t *data, size_t datalen, + ngtcp2_tstamp ts) { ngtcp2_vec datav; datav.len = datalen; @@ -11771,10 +11823,10 @@ ngtcp2_ssize ngtcp2_conn_write_datagram_versioned( } ngtcp2_ssize ngtcp2_conn_writev_datagram_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, int *paccepted, - uint32_t flags, uint64_t dgram_id, const ngtcp2_vec *datav, size_t datavcnt, - ngtcp2_tstamp ts) { + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, int *paccepted, + uint32_t flags, uint64_t dgram_id, const ngtcp2_vec *datav, size_t datavcnt, + ngtcp2_tstamp ts) { ngtcp2_vmsg vmsg; int64_t datalen; @@ -11837,7 +11889,7 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, } origlen = destlen = - conn_shape_udp_payload(conn, &conn->dcid.current, destlen); + conn_shape_udp_payload(conn, &conn->dcid.current, destlen); if (!ppe_pending && pi) { pi->ecn = NGTCP2_ECN_NOT_ECT; @@ -11867,10 +11919,9 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, assert(dest[0] & NGTCP2_HEADER_FORM_BIT); assert(conn->negotiated_version); - if (ngtcp2_pkt_get_type_long(conn->negotiated_version, dest[0]) == - NGTCP2_PKT_INITIAL) { - /* We have added padding already, but in that case, there is no - space left to write 1RTT packet. */ + if (nwrite < NGTCP2_MAX_UDP_PAYLOAD_SIZE && + ngtcp2_pkt_get_type_long(conn->negotiated_version, dest[0]) == + NGTCP2_PKT_INITIAL) { wflags |= NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING; } @@ -11890,7 +11941,7 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, return 0; } - origlen = (size_t)ngtcp2_min((uint64_t)origlen, server_tx_left); + origlen = (size_t)ngtcp2_min_uint64((uint64_t)origlen, server_tx_left); } return conn_write_handshake_ack_pkts(conn, pi, dest, origlen, ts); @@ -11902,15 +11953,15 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, if (server_tx_left == 0) { if (cstat->loss_detection_timer != UINT64_MAX) { ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_LDC, - "loss detection timer canceled due to amplification limit"); - cstat->loss_detection_timer = UINT64_MAX; + &conn->log, NGTCP2_LOG_EVENT_LDC, + "loss detection timer canceled due to amplification limit"); + ngtcp2_conn_cancel_loss_detection_timer(conn); } return 0; } - destlen = (size_t)ngtcp2_min((uint64_t)destlen, server_tx_left); + destlen = (size_t)ngtcp2_min_uint64((uint64_t)destlen, server_tx_left); } if (conn->in_pktns) { @@ -11931,14 +11982,12 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, dest += nwrite; destlen -= (size_t)nwrite; - if (conn->in_pktns && nwrite > 0) { + if (res < NGTCP2_MAX_UDP_PAYLOAD_SIZE && conn->in_pktns && nwrite > 0) { it = ngtcp2_rtb_head(&conn->in_pktns->rtb); if (!ngtcp2_ksl_it_end(&it)) { rtbent = ngtcp2_ksl_it_get(&it); if (rtbent->hd.pkt_num != prev_in_pkt_num && (rtbent->flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING)) { - /* We have added padding already, but in that case, there - is no space left to write 1RTT packet. */ wflags |= NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING; } } @@ -11959,7 +12008,7 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, return 0; } - origlen = (size_t)ngtcp2_min((uint64_t)origlen, server_tx_left); + origlen = (size_t)ngtcp2_min_uint64((uint64_t)origlen, server_tx_left); } return conn_write_ack_pkt(conn, pi, dest, origlen, NGTCP2_PKT_1RTT, ts); @@ -12004,12 +12053,12 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, } /* dest and destlen have already been adjusted in ppe in the first run. They are adjusted for probe packet later. */ - nwrite = conn_write_pkt(conn, pi, dest, destlen, vmsg, NGTCP2_PKT_1RTT, - wflags, ts); + nwrite = conn_write_pkt(conn, pi, dest, destlen, (size_t)res, vmsg, + NGTCP2_PKT_1RTT, wflags, ts); goto fin; } else { conn->pkt.require_padding = - (wflags & NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING); + (wflags & NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING); if (conn->state == NGTCP2_CS_POST_HANDSHAKE) { rv = conn_prepare_key_update(conn, ts); @@ -12023,14 +12072,14 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, } else { if (res == 0) { nwrite = - conn_write_path_response(conn, path, pi, dest, origdestlen, ts); + conn_write_path_response(conn, path, pi, dest, origdestlen, ts); if (nwrite) { goto fin; } if (conn->pv) { nwrite = - conn_write_path_challenge(conn, path, pi, dest, origdestlen, ts); + conn_write_path_challenge(conn, path, pi, dest, origdestlen, ts); if (nwrite) { goto fin; } @@ -12051,15 +12100,15 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, if (conn->server && !(conn->dcid.current.flags & NGTCP2_DCID_FLAG_PATH_VALIDATED)) { server_tx_left = conn_server_tx_left(conn, &conn->dcid.current); - origlen = (size_t)ngtcp2_min((uint64_t)origlen, server_tx_left); - destlen = (size_t)ngtcp2_min((uint64_t)destlen, server_tx_left); + origlen = (size_t)ngtcp2_min_uint64((uint64_t)origlen, server_tx_left); + destlen = (size_t)ngtcp2_min_uint64((uint64_t)destlen, server_tx_left); if (server_tx_left == 0 && conn->cstat.loss_detection_timer != UINT64_MAX) { ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_LDC, - "loss detection timer canceled due to amplification limit"); - conn->cstat.loss_detection_timer = UINT64_MAX; + &conn->log, NGTCP2_LOG_EVENT_LDC, + "loss detection timer canceled due to amplification limit"); + ngtcp2_conn_cancel_loss_detection_timer(conn); } } } @@ -12100,14 +12149,14 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, "transmit probe pkt left=%zu", conn->pktns.rtb.probe_pkt_left); - nwrite = conn_write_pkt(conn, pi, dest, destlen, vmsg, NGTCP2_PKT_1RTT, - wflags, ts); + nwrite = conn_write_pkt(conn, pi, dest, destlen, (size_t)res, vmsg, + NGTCP2_PKT_1RTT, wflags, ts); goto fin; } - nwrite = conn_write_pkt(conn, pi, dest, destlen, vmsg, NGTCP2_PKT_1RTT, - wflags, ts); + nwrite = conn_write_pkt(conn, pi, dest, destlen, (size_t)res, vmsg, + NGTCP2_PKT_1RTT, wflags, ts); if (nwrite) { assert(nwrite != NGTCP2_ERR_NOBUF); goto fin; @@ -12162,9 +12211,8 @@ conn_write_connection_close(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, pkt_type != NGTCP2_PKT_INITIAL) { if (in_pktns && conn->server) { nwrite = ngtcp2_conn_write_single_frame_pkt( - conn, pi, dest, destlen, NGTCP2_PKT_INITIAL, - NGTCP2_WRITE_PKT_FLAG_NONE, &conn->dcid.current.cid, &fr, - NGTCP2_RTB_ENTRY_FLAG_NONE, NULL, ts); + conn, pi, dest, destlen, NGTCP2_PKT_INITIAL, NGTCP2_WRITE_PKT_FLAG_NONE, + &conn->dcid.current.cid, &fr, NGTCP2_RTB_ENTRY_FLAG_NONE, NULL, ts); if (nwrite < 0) { return nwrite; } @@ -12177,9 +12225,9 @@ conn_write_connection_close(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (pkt_type != NGTCP2_PKT_HANDSHAKE && hs_pktns && hs_pktns->crypto.tx.ckm) { nwrite = ngtcp2_conn_write_single_frame_pkt( - conn, pi, dest, destlen, NGTCP2_PKT_HANDSHAKE, - NGTCP2_WRITE_PKT_FLAG_NONE, &conn->dcid.current.cid, &fr, - NGTCP2_RTB_ENTRY_FLAG_NONE, NULL, ts); + conn, pi, dest, destlen, NGTCP2_PKT_HANDSHAKE, + NGTCP2_WRITE_PKT_FLAG_NONE, &conn->dcid.current.cid, &fr, + NGTCP2_RTB_ENTRY_FLAG_NONE, NULL, ts); if (nwrite < 0) { return nwrite; } @@ -12195,8 +12243,8 @@ conn_write_connection_close(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } nwrite = ngtcp2_conn_write_single_frame_pkt( - conn, pi, dest, destlen, pkt_type, flags, &conn->dcid.current.cid, &fr, - NGTCP2_RTB_ENTRY_FLAG_NONE, NULL, ts); + conn, pi, dest, destlen, pkt_type, flags, &conn->dcid.current.cid, &fr, + NGTCP2_RTB_ENTRY_FLAG_NONE, NULL, ts); if (nwrite < 0) { return nwrite; @@ -12212,9 +12260,9 @@ conn_write_connection_close(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } ngtcp2_ssize ngtcp2_conn_write_connection_close_pkt( - ngtcp2_conn *conn, ngtcp2_path *path, ngtcp2_pkt_info *pi, uint8_t *dest, - size_t destlen, uint64_t error_code, const uint8_t *reason, - size_t reasonlen, ngtcp2_tstamp ts) { + ngtcp2_conn *conn, ngtcp2_path *path, ngtcp2_pkt_info *pi, uint8_t *dest, + size_t destlen, uint64_t error_code, const uint8_t *reason, size_t reasonlen, + ngtcp2_tstamp ts) { ngtcp2_pktns *in_pktns = conn->in_pktns; ngtcp2_pktns *hs_pktns = conn->hs_pktns; uint8_t pkt_type; @@ -12247,7 +12295,7 @@ ngtcp2_ssize ngtcp2_conn_write_connection_close_pkt( if (conn->server) { server_tx_left = conn_server_tx_left(conn, &conn->dcid.current); - destlen = (size_t)ngtcp2_min((uint64_t)destlen, server_tx_left); + destlen = (size_t)ngtcp2_min_uint64((uint64_t)destlen, server_tx_left); } if (conn->state == NGTCP2_CS_POST_HANDSHAKE || @@ -12275,9 +12323,9 @@ ngtcp2_ssize ngtcp2_conn_write_connection_close_pkt( } ngtcp2_ssize ngtcp2_conn_write_application_close_pkt( - ngtcp2_conn *conn, ngtcp2_path *path, ngtcp2_pkt_info *pi, uint8_t *dest, - size_t destlen, uint64_t app_error_code, const uint8_t *reason, - size_t reasonlen, ngtcp2_tstamp ts) { + ngtcp2_conn *conn, ngtcp2_path *path, ngtcp2_pkt_info *pi, uint8_t *dest, + size_t destlen, uint64_t app_error_code, const uint8_t *reason, + size_t reasonlen, ngtcp2_tstamp ts) { ngtcp2_ssize nwrite; ngtcp2_ssize res = 0; ngtcp2_frame fr; @@ -12309,15 +12357,14 @@ ngtcp2_ssize ngtcp2_conn_write_application_close_pkt( if (conn->server) { server_tx_left = conn_server_tx_left(conn, &conn->dcid.current); - destlen = (size_t)ngtcp2_min((uint64_t)destlen, server_tx_left); + destlen = (size_t)ngtcp2_min_uint64((uint64_t)destlen, server_tx_left); } if (!(conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_CONFIRMED)) { - nwrite = conn_write_connection_close(conn, pi, dest, destlen, - conn->hs_pktns->crypto.tx.ckm - ? NGTCP2_PKT_HANDSHAKE - : NGTCP2_PKT_INITIAL, - NGTCP2_APPLICATION_ERROR, NULL, 0, ts); + nwrite = conn_write_connection_close( + conn, pi, dest, destlen, + conn->hs_pktns->crypto.tx.ckm ? NGTCP2_PKT_HANDSHAKE : NGTCP2_PKT_INITIAL, + NGTCP2_APPLICATION_ERROR, NULL, 0, ts); if (nwrite < 0) { return nwrite; } @@ -12326,12 +12373,9 @@ ngtcp2_ssize ngtcp2_conn_write_application_close_pkt( destlen -= (size_t)nwrite; } - if (conn->state != NGTCP2_CS_POST_HANDSHAKE) { - assert(res); - - if (!conn->server || !conn->pktns.crypto.tx.ckm) { - return res; - } + if (conn->state != NGTCP2_CS_POST_HANDSHAKE && + (!conn->server || !conn->pktns.crypto.tx.ckm)) { + return res; } assert(conn->pktns.crypto.tx.ckm); @@ -12343,8 +12387,8 @@ ngtcp2_ssize ngtcp2_conn_write_application_close_pkt( fr.connection_close.reason = (uint8_t *)reason; nwrite = ngtcp2_conn_write_single_frame_pkt( - conn, pi, dest, destlen, NGTCP2_PKT_1RTT, NGTCP2_WRITE_PKT_FLAG_NONE, - &conn->dcid.current.cid, &fr, NGTCP2_RTB_ENTRY_FLAG_NONE, NULL, ts); + conn, pi, dest, destlen, NGTCP2_PKT_1RTT, NGTCP2_WRITE_PKT_FLAG_NONE, + &conn->dcid.current.cid, &fr, NGTCP2_RTB_ENTRY_FLAG_NONE, NULL, ts); if (nwrite < 0) { return nwrite; @@ -12392,12 +12436,22 @@ void ngtcp2_ccerr_set_liberr(ngtcp2_ccerr *ccerr, int liberr, ccerr_init(ccerr, NGTCP2_CCERR_TYPE_IDLE_CLOSE, NGTCP2_NO_ERROR, reason, reasonlen); + return; + case NGTCP2_ERR_DROP_CONN: + ccerr_init(ccerr, NGTCP2_CCERR_TYPE_DROP_CONN, NGTCP2_NO_ERROR, reason, + reasonlen); + + return; + case NGTCP2_ERR_RETRY: + ccerr_init(ccerr, NGTCP2_CCERR_TYPE_RETRY, NGTCP2_NO_ERROR, reason, + reasonlen); + return; }; ngtcp2_ccerr_set_transport_error( - ccerr, ngtcp2_err_infer_quic_transport_error_code(liberr), reason, - reasonlen); + ccerr, ngtcp2_err_infer_quic_transport_error_code(liberr), reason, + reasonlen); } void ngtcp2_ccerr_set_tls_alert(ngtcp2_ccerr *ccerr, uint8_t tls_alert, @@ -12415,9 +12469,9 @@ void ngtcp2_ccerr_set_application_error(ngtcp2_ccerr *ccerr, } ngtcp2_ssize ngtcp2_conn_write_connection_close_versioned( - ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, - ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, - const ngtcp2_ccerr *ccerr, ngtcp2_tstamp ts) { + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, const ngtcp2_ccerr *ccerr, + ngtcp2_tstamp ts) { (void)pkt_info_version; conn_update_timestamp(conn, ts); @@ -12425,12 +12479,12 @@ ngtcp2_ssize ngtcp2_conn_write_connection_close_versioned( switch (ccerr->type) { case NGTCP2_CCERR_TYPE_TRANSPORT: return ngtcp2_conn_write_connection_close_pkt( - conn, path, pi, dest, destlen, ccerr->error_code, ccerr->reason, - ccerr->reasonlen, ts); + conn, path, pi, dest, destlen, ccerr->error_code, ccerr->reason, + ccerr->reasonlen, ts); case NGTCP2_CCERR_TYPE_APPLICATION: return ngtcp2_conn_write_application_close_pkt( - conn, path, pi, dest, destlen, ccerr->error_code, ccerr->reason, - ccerr->reasonlen, ts); + conn, path, pi, dest, destlen, ccerr->error_code, ccerr->reason, + ccerr->reasonlen, ts); default: return 0; } @@ -12471,7 +12525,7 @@ int ngtcp2_conn_close_stream(ngtcp2_conn *conn, ngtcp2_strm *strm) { int ngtcp2_conn_close_stream_if_shut_rdwr(ngtcp2_conn *conn, ngtcp2_strm *strm) { if ((strm->flags & NGTCP2_STRM_FLAG_SHUT_RDWR) == - NGTCP2_STRM_FLAG_SHUT_RDWR && + NGTCP2_STRM_FLAG_SHUT_RDWR && ((strm->flags & NGTCP2_STRM_FLAG_RESET_STREAM_RECVED) || ngtcp2_strm_rx_offset(strm) == strm->rx.last_offset) && (((strm->flags & NGTCP2_STRM_FLAG_RESET_STREAM) && @@ -12537,7 +12591,7 @@ static int conn_shutdown_stream_read(ngtcp2_conn *conn, ngtcp2_strm *strm, which are not passed to application. */ if (!(strm->flags & NGTCP2_STRM_FLAG_RESET_STREAM_RECVED)) { ngtcp2_conn_extend_max_offset(conn, strm->rx.last_offset - - ngtcp2_strm_rx_offset(strm)); + ngtcp2_strm_rx_offset(strm)); } strm->flags |= NGTCP2_STRM_FLAG_STOP_SENDING; @@ -12720,20 +12774,20 @@ static void conn_discard_early_data_state(ngtcp2_conn *conn) { ngtcp2_rtb_remove_early_data(&conn->pktns.rtb, &conn->cstat); - ngtcp2_map_each_free(&conn->strms, delete_strms_pq_each, conn); + ngtcp2_map_each(&conn->strms, delete_strms_pq_each, conn); ngtcp2_map_clear(&conn->strms); conn->tx.offset = 0; conn->tx.last_blocked_offset = UINT64_MAX; conn->rx.unsent_max_offset = conn->rx.max_offset = - conn->local.transport_params.initial_max_data; + conn->local.transport_params.initial_max_data; conn->remote.bidi.unsent_max_streams = conn->remote.bidi.max_streams = - conn->local.transport_params.initial_max_streams_bidi; + conn->local.transport_params.initial_max_streams_bidi; conn->remote.uni.unsent_max_streams = conn->remote.uni.max_streams = - conn->local.transport_params.initial_max_streams_uni; + conn->local.transport_params.initial_max_streams_uni; if (conn->server) { conn->local.bidi.next_stream_id = 1; @@ -12778,6 +12832,8 @@ int ngtcp2_conn_update_rtt(ngtcp2_conn *conn, ngtcp2_duration rtt, ngtcp2_duration ack_delay, ngtcp2_tstamp ts) { ngtcp2_conn_stat *cstat = &conn->cstat; + assert(rtt > 0); + if (cstat->min_rtt == UINT64_MAX) { cstat->latest_rtt = rtt; cstat->min_rtt = rtt; @@ -12788,43 +12844,43 @@ int ngtcp2_conn_update_rtt(ngtcp2_conn *conn, ngtcp2_duration rtt, if (conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_CONFIRMED) { assert(conn->remote.transport_params); - ack_delay = - ngtcp2_min(ack_delay, conn->remote.transport_params->max_ack_delay); + ack_delay = ngtcp2_min_uint64( + ack_delay, conn->remote.transport_params->max_ack_delay); } else if (ack_delay > 0 && rtt >= cstat->min_rtt && rtt < cstat->min_rtt + ack_delay) { /* Ignore RTT sample if adjusting ack_delay causes the sample less than min_rtt before handshake confirmation. */ ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_LDC, - "ignore rtt sample because ack_delay is too large latest_rtt=%" PRIu64 - " min_rtt=%" PRIu64 " ack_delay=%" PRIu64, - rtt / NGTCP2_MILLISECONDS, cstat->min_rtt / NGTCP2_MILLISECONDS, - ack_delay / NGTCP2_MILLISECONDS); + &conn->log, NGTCP2_LOG_EVENT_LDC, + "ignore rtt sample because ack_delay is too large latest_rtt=%" PRIu64 + " min_rtt=%" PRIu64 " ack_delay=%" PRIu64, + rtt / NGTCP2_MILLISECONDS, cstat->min_rtt / NGTCP2_MILLISECONDS, + ack_delay / NGTCP2_MILLISECONDS); return NGTCP2_ERR_INVALID_ARGUMENT; } cstat->latest_rtt = rtt; - cstat->min_rtt = ngtcp2_min(cstat->min_rtt, rtt); + cstat->min_rtt = ngtcp2_min_uint64(cstat->min_rtt, rtt); if (rtt >= cstat->min_rtt + ack_delay) { rtt -= ack_delay; } cstat->rttvar = (cstat->rttvar * 3 + (cstat->smoothed_rtt < rtt - ? rtt - cstat->smoothed_rtt - : cstat->smoothed_rtt - rtt)) / + ? rtt - cstat->smoothed_rtt + : cstat->smoothed_rtt - rtt)) / 4; cstat->smoothed_rtt = (cstat->smoothed_rtt * 7 + rtt) / 8; } ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_LDC, - "latest_rtt=%" PRIu64 " min_rtt=%" PRIu64 " smoothed_rtt=%" PRIu64 - " rttvar=%" PRIu64 " ack_delay=%" PRIu64, - cstat->latest_rtt / NGTCP2_MILLISECONDS, - cstat->min_rtt / NGTCP2_MILLISECONDS, - cstat->smoothed_rtt / NGTCP2_MILLISECONDS, - cstat->rttvar / NGTCP2_MILLISECONDS, ack_delay / NGTCP2_MILLISECONDS); + &conn->log, NGTCP2_LOG_EVENT_LDC, + "latest_rtt=%" PRIu64 " min_rtt=%" PRIu64 " smoothed_rtt=%" PRIu64 + " rttvar=%" PRIu64 " ack_delay=%" PRIu64, + cstat->latest_rtt / NGTCP2_MILLISECONDS, + cstat->min_rtt / NGTCP2_MILLISECONDS, + cstat->smoothed_rtt / NGTCP2_MILLISECONDS, + cstat->rttvar / NGTCP2_MILLISECONDS, ack_delay / NGTCP2_MILLISECONDS); return 0; } @@ -12879,8 +12935,8 @@ static ngtcp2_tstamp conn_get_earliest_pto_expiry(ngtcp2_conn *conn, ngtcp2_conn_stat *cstat = &conn->cstat; ngtcp2_tstamp *times = cstat->last_tx_pkt_ts; ngtcp2_duration duration = - compute_pto(cstat->smoothed_rtt, cstat->rttvar, /* max_ack_delay = */ 0) * - (1ULL << cstat->pto_count); + compute_pto(cstat->smoothed_rtt, cstat->rttvar, /* max_ack_delay = */ 0) * + (1ULL << cstat->pto_count); for (i = NGTCP2_PKTNS_ID_INITIAL; i < NGTCP2_PKTNS_ID_MAX; ++i) { if (ns[i] == NULL || ns[i]->rtb.num_pto_eliciting == 0 || @@ -12939,8 +12995,7 @@ void ngtcp2_conn_set_loss_detection_timer(ngtcp2_conn *conn, ngtcp2_tstamp ts) { if (cstat->loss_detection_timer != UINT64_MAX) { ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_LDC, "loss detection timer canceled"); - cstat->loss_detection_timer = UINT64_MAX; - cstat->pto_count = 0; + ngtcp2_conn_cancel_loss_detection_timer(conn); } return; } @@ -12948,13 +13003,20 @@ void ngtcp2_conn_set_loss_detection_timer(ngtcp2_conn *conn, ngtcp2_tstamp ts) { cstat->loss_detection_timer = conn_get_earliest_pto_expiry(conn, ts); timeout = - cstat->loss_detection_timer > ts ? cstat->loss_detection_timer - ts : 0; + cstat->loss_detection_timer > ts ? cstat->loss_detection_timer - ts : 0; ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_LDC, "loss_detection_timer=%" PRIu64 " timeout=%" PRIu64, cstat->loss_detection_timer, timeout / NGTCP2_MILLISECONDS); } +void ngtcp2_conn_cancel_loss_detection_timer(ngtcp2_conn *conn) { + ngtcp2_conn_stat *cstat = &conn->cstat; + + cstat->loss_detection_timer = UINT64_MAX; + cstat->pto_count = 0; +} + int ngtcp2_conn_on_loss_detection_timer(ngtcp2_conn *conn, ngtcp2_tstamp ts) { ngtcp2_conn_stat *cstat = &conn->cstat; int rv; @@ -12966,8 +13028,7 @@ int ngtcp2_conn_on_loss_detection_timer(ngtcp2_conn *conn, ngtcp2_tstamp ts) { switch (conn->state) { case NGTCP2_CS_CLOSING: case NGTCP2_CS_DRAINING: - cstat->loss_detection_timer = UINT64_MAX; - cstat->pto_count = 0; + ngtcp2_conn_cancel_loss_detection_timer(conn); return 0; default: break; @@ -13042,7 +13103,8 @@ static int conn_buffer_crypto_data(ngtcp2_conn *conn, const uint8_t **pdata, } if (!*pbufchain) { - rv = ngtcp2_buf_chain_new(pbufchain, ngtcp2_max(1024, datalen), conn->mem); + rv = ngtcp2_buf_chain_new(pbufchain, ngtcp2_max_size(1024, datalen), + conn->mem); if (rv != 0) { return rv; } @@ -13125,7 +13187,7 @@ int ngtcp2_conn_submit_new_token(ngtcp2_conn *conn, const uint8_t *token, assert(tokenlen); rv = ngtcp2_frame_chain_new_token_objalloc_new( - &nfrc, token, tokenlen, &conn->frc_objalloc, conn->mem); + &nfrc, token, tokenlen, &conn->frc_objalloc, conn->mem); if (rv != 0) { return rv; } @@ -13208,7 +13270,7 @@ static void copy_dcid_to_cid_token(ngtcp2_cid_token *dest, dest->cid = src->cid; ngtcp2_path_storage_init2(&dest->ps, &src->ps.path); if ((dest->token_present = - (src->flags & NGTCP2_DCID_FLAG_TOKEN_PRESENT) != 0)) { + (src->flags & NGTCP2_DCID_FLAG_TOKEN_PRESENT) != 0)) { memcpy(dest->token, src->token, NGTCP2_STATELESS_RESET_TOKENLEN); } } @@ -13411,8 +13473,8 @@ uint64_t ngtcp2_conn_get_streams_bidi_left(ngtcp2_conn *conn) { uint64_t n = ngtcp2_ord_stream_id(conn->local.bidi.next_stream_id); return n > conn->local.bidi.max_streams - ? 0 - : conn->local.bidi.max_streams - n + 1; + ? 0 + : conn->local.bidi.max_streams - n + 1; } uint64_t ngtcp2_conn_get_streams_uni_left(ngtcp2_conn *conn) { @@ -13444,7 +13506,7 @@ ngtcp2_tstamp ngtcp2_conn_get_idle_expiry(ngtcp2_conn *conn) { conn->remote.transport_params->max_idle_timeout == 0 || (conn->local.transport_params.max_idle_timeout && conn->local.transport_params.max_idle_timeout < - conn->remote.transport_params->max_idle_timeout)) { + conn->remote.transport_params->max_idle_timeout)) { idle_timeout = conn->local.transport_params.max_idle_timeout; } else { idle_timeout = conn->remote.transport_params->max_idle_timeout; @@ -13455,10 +13517,10 @@ ngtcp2_tstamp ngtcp2_conn_get_idle_expiry(ngtcp2_conn *conn) { } trpto = 3 * conn_compute_pto(conn, conn_is_tls_handshake_completed(conn) - ? &conn->pktns - : conn->hs_pktns); + ? &conn->pktns + : conn->hs_pktns); - idle_timeout = ngtcp2_max(idle_timeout, trpto); + idle_timeout = ngtcp2_max_uint64(idle_timeout, trpto); if (conn->idle_ts >= UINT64_MAX - idle_timeout) { return UINT64_MAX; @@ -13469,8 +13531,8 @@ ngtcp2_tstamp ngtcp2_conn_get_idle_expiry(ngtcp2_conn *conn) { ngtcp2_duration ngtcp2_conn_get_pto(ngtcp2_conn *conn) { return conn_compute_pto(conn, conn_is_tls_handshake_completed(conn) - ? &conn->pktns - : conn->hs_pktns); + ? &conn->pktns + : conn->hs_pktns); } void ngtcp2_conn_set_initial_crypto_ctx(ngtcp2_conn *conn, @@ -13581,8 +13643,8 @@ void ngtcp2_conn_update_pkt_tx_time(ngtcp2_conn *conn, ngtcp2_tstamp ts) { /* 1.25 is the under-utilization avoidance factor described in https://datatracker.ietf.org/doc/html/rfc9002#section-7.7 */ pacing_interval = (conn->cstat.first_rtt_sample_ts == UINT64_MAX - ? NGTCP2_MILLISECONDS - : conn->cstat.smoothed_rtt) * + ? NGTCP2_MILLISECONDS + : conn->cstat.smoothed_rtt) * 100 / 125 / conn->cstat.cwnd; } @@ -13597,20 +13659,11 @@ size_t ngtcp2_conn_get_send_quantum(ngtcp2_conn *conn) { } int ngtcp2_conn_track_retired_dcid_seq(ngtcp2_conn *conn, uint64_t seq) { - size_t i; - if (conn->dcid.retire_unacked.len >= ngtcp2_arraylen(conn->dcid.retire_unacked.seqs)) { return NGTCP2_ERR_CONNECTION_ID_LIMIT; } - /* Make sure that we do not have a duplicate */ - for (i = 0; i < conn->dcid.retire_unacked.len; ++i) { - if (conn->dcid.retire_unacked.seqs[i] == seq) { - ngtcp2_unreachable(); - } - } - conn->dcid.retire_unacked.seqs[conn->dcid.retire_unacked.len++] = seq; return 0; @@ -13626,7 +13679,7 @@ void ngtcp2_conn_untrack_retired_dcid_seq(ngtcp2_conn *conn, uint64_t seq) { if (i != conn->dcid.retire_unacked.len - 1) { conn->dcid.retire_unacked.seqs[i] = - conn->dcid.retire_unacked.seqs[conn->dcid.retire_unacked.len - 1]; + conn->dcid.retire_unacked.seqs[conn->dcid.retire_unacked.len - 1]; } --conn->dcid.retire_unacked.len; @@ -13635,6 +13688,18 @@ void ngtcp2_conn_untrack_retired_dcid_seq(ngtcp2_conn *conn, uint64_t seq) { } } +int ngtcp2_conn_check_retired_dcid_tracked(ngtcp2_conn *conn, uint64_t seq) { + size_t i; + + for (i = 0; i < conn->dcid.retire_unacked.len; ++i) { + if (conn->dcid.retire_unacked.seqs[i] == seq) { + return 1; + } + } + + return 0; +} + size_t ngtcp2_conn_get_stream_loss_count(ngtcp2_conn *conn, int64_t stream_id) { ngtcp2_strm *strm = ngtcp2_conn_find_stream(conn, stream_id); @@ -13652,56 +13717,17 @@ void ngtcp2_path_challenge_entry_init(ngtcp2_path_challenge_entry *pcent, memcpy(pcent->data, data, sizeof(pcent->data)); } -void ngtcp2_settings_default_versioned(int settings_version, - ngtcp2_settings *settings) { - (void)settings_version; - - memset(settings, 0, sizeof(*settings)); - settings->cc_algo = NGTCP2_CC_ALGO_CUBIC; - settings->initial_rtt = NGTCP2_DEFAULT_INITIAL_RTT; - settings->ack_thresh = 2; - settings->max_tx_udp_payload_size = 1500 - 48; - settings->handshake_timeout = UINT64_MAX; -} - -void ngtcp2_transport_params_default_versioned( - int transport_params_version, ngtcp2_transport_params *params) { - size_t len; - - switch (transport_params_version) { - case NGTCP2_TRANSPORT_PARAMS_VERSION: - len = sizeof(*params); - - break; - default: - ngtcp2_unreachable(); - } - - memset(params, 0, len); - - switch (transport_params_version) { - case NGTCP2_TRANSPORT_PARAMS_VERSION: - params->max_udp_payload_size = NGTCP2_DEFAULT_MAX_RECV_UDP_PAYLOAD_SIZE; - params->active_connection_id_limit = - NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT; - params->ack_delay_exponent = NGTCP2_DEFAULT_ACK_DELAY_EXPONENT; - params->max_ack_delay = NGTCP2_DEFAULT_MAX_ACK_DELAY; - - break; - } -} - /* The functions prefixed with ngtcp2_pkt_ are usually put inside ngtcp2_pkt.c. This function uses encryption construct and uses test data defined only in ngtcp2_conn_test.c, so it is written here. */ ngtcp2_ssize ngtcp2_pkt_write_connection_close( - uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, - const ngtcp2_cid *scid, uint64_t error_code, const uint8_t *reason, - size_t reasonlen, ngtcp2_encrypt encrypt, const ngtcp2_crypto_aead *aead, - const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, - ngtcp2_hp_mask hp_mask, const ngtcp2_crypto_cipher *hp, - const ngtcp2_crypto_cipher_ctx *hp_ctx) { + uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, + const ngtcp2_cid *scid, uint64_t error_code, const uint8_t *reason, + size_t reasonlen, ngtcp2_encrypt encrypt, const ngtcp2_crypto_aead *aead, + const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, + ngtcp2_hp_mask hp_mask, const ngtcp2_crypto_cipher *hp, + const ngtcp2_crypto_cipher_ctx *hp_ctx) { ngtcp2_pkt_hd hd; ngtcp2_crypto_km ckm; ngtcp2_crypto_cc cc; @@ -13726,7 +13752,7 @@ ngtcp2_ssize ngtcp2_pkt_write_connection_close( cc.encrypt = encrypt; cc.hp_mask = hp_mask; - ngtcp2_ppe_init(&ppe, dest, destlen, &cc); + ngtcp2_ppe_init(&ppe, dest, destlen, 0, &cc); rv = ngtcp2_ppe_encode_hd(&ppe, &hd); if (rv != 0) { diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.h index 4ed67876bc3749..55073fcc828d73 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -65,9 +65,6 @@ typedef enum { NGTCP2_CS_DRAINING, } ngtcp2_conn_state; -/* NGTCP2_MAX_STREAMS is the maximum number of streams. */ -#define NGTCP2_MAX_STREAMS (1LL << 60) - /* NGTCP2_MAX_NUM_BUFFED_RX_PKTS is the maximum number of buffered reordered packets. */ #define NGTCP2_MAX_NUM_BUFFED_RX_PKTS 4 @@ -77,15 +74,6 @@ typedef enum { unreceived data. */ #define NGTCP2_MAX_REORDERED_CRYPTO_DATA 65536 -/* NGTCP2_MAX_RX_INITIAL_CRYPTO_DATA is the maximum offset of received - crypto stream in Initial packet. We set this hard limit here - because crypto stream is unbounded. */ -#define NGTCP2_MAX_RX_INITIAL_CRYPTO_DATA 65536 -/* NGTCP2_MAX_RX_HANDSHAKE_CRYPTO_DATA is the maximum offset of - received crypto stream in Handshake packet. We set this hard limit - here because crypto stream is unbounded. */ -#define NGTCP2_MAX_RX_HANDSHAKE_CRYPTO_DATA 65536 - /* NGTCP2_MAX_RETRIES is the number of Retry packet which client can accept. */ #define NGTCP2_MAX_RETRIES 3 @@ -121,12 +109,18 @@ typedef enum { /* NGTCP2_WRITE_PKT_FLAG_NONE indicates that no flag is set. */ #define NGTCP2_WRITE_PKT_FLAG_NONE 0x00u /* NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING indicates that packet other - than Initial packet should be padded. Initial packet might be - padded based on QUIC requirement regardless of this flag. */ + than Initial packet should be padded so that UDP datagram payload + is at least NGTCP2_MAX_UDP_PAYLOAD_SIZE bytes. Initial packet + might be padded based on QUIC requirement regardless of this + flag. */ #define NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING 0x01u /* NGTCP2_WRITE_PKT_FLAG_MORE indicates that more frames might come and it should be encoded into the current packet. */ #define NGTCP2_WRITE_PKT_FLAG_MORE 0x02u +/* NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING_FULL is just like + NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING, but it requests to add + padding to the full UDP datagram payload size. */ +#define NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING_FULL 0x04u /* * ngtcp2_max_frame is defined so that it covers the largest ACK @@ -319,6 +313,7 @@ typedef struct ngtcp2_pktns { ngtcp2_acktr acktr; ngtcp2_rtb rtb; + ngtcp2_pktns_id id; } ngtcp2_pktns; typedef enum ngtcp2_ecn_state { @@ -342,15 +337,15 @@ typedef struct ngtcp2_early_transport_params { } ngtcp2_early_transport_params; ngtcp2_static_ringbuf_def(dcid_bound, NGTCP2_MAX_BOUND_DCID_POOL_SIZE, - sizeof(ngtcp2_dcid)); + sizeof(ngtcp2_dcid)) ngtcp2_static_ringbuf_def(dcid_unused, NGTCP2_MAX_DCID_POOL_SIZE, - sizeof(ngtcp2_dcid)); + sizeof(ngtcp2_dcid)) ngtcp2_static_ringbuf_def(dcid_retired, NGTCP2_MAX_DCID_RETIRED_SIZE, - sizeof(ngtcp2_dcid)); + sizeof(ngtcp2_dcid)) ngtcp2_static_ringbuf_def(path_challenge, 4, - sizeof(ngtcp2_path_challenge_entry)); + sizeof(ngtcp2_path_challenge_entry)) -ngtcp2_objalloc_decl(strm, ngtcp2_strm, oplent); +ngtcp2_objalloc_decl(strm, ngtcp2_strm, oplent) struct ngtcp2_conn { ngtcp2_objalloc frc_objalloc; @@ -813,6 +808,8 @@ int ngtcp2_conn_update_rtt(ngtcp2_conn *conn, ngtcp2_duration rtt, void ngtcp2_conn_set_loss_detection_timer(ngtcp2_conn *conn, ngtcp2_tstamp ts); +void ngtcp2_conn_cancel_loss_detection_timer(ngtcp2_conn *conn); + int ngtcp2_conn_on_loss_detection_timer(ngtcp2_conn *conn, ngtcp2_tstamp ts); /* @@ -878,9 +875,9 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, * User-defined callback function failed. */ ngtcp2_ssize ngtcp2_conn_write_single_frame_pkt( - ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, - uint8_t type, uint8_t flags, const ngtcp2_cid *dcid, ngtcp2_frame *fr, - uint16_t rtb_entry_flags, const ngtcp2_path *path, ngtcp2_tstamp ts); + ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, + uint8_t type, uint8_t flags, const ngtcp2_cid *dcid, ngtcp2_frame *fr, + uint16_t rtb_entry_flags, const ngtcp2_path *path, ngtcp2_tstamp ts); /* * ngtcp2_conn_commit_local_transport_params commits the local @@ -973,6 +970,12 @@ int ngtcp2_conn_track_retired_dcid_seq(ngtcp2_conn *conn, uint64_t seq); */ void ngtcp2_conn_untrack_retired_dcid_seq(ngtcp2_conn *conn, uint64_t seq); +/* + * ngtcp2_conn_check_retired_dcid_tracked returns nonzero if |seq| has + * already been tracked. + */ +int ngtcp2_conn_check_retired_dcid_tracked(ngtcp2_conn *conn, uint64_t seq); + /* * ngtcp2_conn_server_negotiate_version negotiates QUIC version. It * is compatible version negotiation. It returns the negotiated QUIC @@ -1020,9 +1023,9 @@ ngtcp2_conn_server_negotiate_version(ngtcp2_conn *conn, * User callback failed */ ngtcp2_ssize ngtcp2_conn_write_connection_close_pkt( - ngtcp2_conn *conn, ngtcp2_path *path, ngtcp2_pkt_info *pi, uint8_t *dest, - size_t destlen, uint64_t error_code, const uint8_t *reason, - size_t reasonlen, ngtcp2_tstamp ts); + ngtcp2_conn *conn, ngtcp2_path *path, ngtcp2_pkt_info *pi, uint8_t *dest, + size_t destlen, uint64_t error_code, const uint8_t *reason, size_t reasonlen, + ngtcp2_tstamp ts); /** * @function @@ -1066,9 +1069,9 @@ ngtcp2_ssize ngtcp2_conn_write_connection_close_pkt( * User callback failed */ ngtcp2_ssize ngtcp2_conn_write_application_close_pkt( - ngtcp2_conn *conn, ngtcp2_path *path, ngtcp2_pkt_info *pi, uint8_t *dest, - size_t destlen, uint64_t app_error_code, const uint8_t *reason, - size_t reasonlen, ngtcp2_tstamp ts); + ngtcp2_conn *conn, ngtcp2_path *path, ngtcp2_pkt_info *pi, uint8_t *dest, + size_t destlen, uint64_t app_error_code, const uint8_t *reason, + size_t reasonlen, ngtcp2_tstamp ts); int ngtcp2_conn_start_pmtud(ngtcp2_conn *conn); @@ -1093,7 +1096,7 @@ void ngtcp2_conn_stop_pmtud(ngtcp2_conn *conn); * Out of memory. */ int ngtcp2_conn_set_remote_transport_params( - ngtcp2_conn *conn, const ngtcp2_transport_params *params); + ngtcp2_conn *conn, const ngtcp2_transport_params *params); /** * @function @@ -1132,7 +1135,7 @@ int ngtcp2_conn_set_remote_transport_params( * Out of memory. */ int ngtcp2_conn_set_0rtt_remote_transport_params( - ngtcp2_conn *conn, const ngtcp2_transport_params *params); + ngtcp2_conn *conn, const ngtcp2_transport_params *params); /* * ngtcp2_conn_create_ack_frame creates ACK frame, and assigns its @@ -1156,4 +1159,16 @@ int ngtcp2_conn_create_ack_frame(ngtcp2_conn *conn, ngtcp2_frame **pfr, ngtcp2_tstamp ts, ngtcp2_duration ack_delay, uint64_t ack_delay_exponent); -#endif /* NGTCP2_CONN_H */ +/* + * ngtcp2_conn_discard_initial_state discards state for Initial packet + * number space. + */ +void ngtcp2_conn_discard_initial_state(ngtcp2_conn *conn, ngtcp2_tstamp ts); + +/* + * ngtcp2_conn_discard_handshake_state discards state for Handshake + * packet number space. + */ +void ngtcp2_conn_discard_handshake_state(ngtcp2_conn *conn, ngtcp2_tstamp ts); + +#endif /* !defined(NGTCP2_CONN_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn_stat.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn_stat.h index 1a93867aab3cae..ad2b7329f48df2 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn_stat.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn_stat.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -105,8 +105,9 @@ typedef struct ngtcp2_conn_stat { uint64_t bytes_in_flight; /** * :member:`max_tx_udp_payload_size` is the maximum size of UDP - * datagram payload that this endpoint transmits. It is used by - * congestion controller to compute congestion window. + * datagram payload that this endpoint transmits to the current + * path. It is used by congestion controller to compute congestion + * window. */ size_t max_tx_udp_payload_size; /** @@ -129,4 +130,4 @@ typedef struct ngtcp2_conn_stat { size_t send_quantum; } ngtcp2_conn_stat; -#endif /* NGTCP2_CONN_STAT_H */ +#endif /* !defined(NGTCP2_CONN_STAT_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.c index 336721772b4e4c..6528011cc0edf4 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.c @@ -32,47 +32,37 @@ #include "ngtcp2_net.h" #include "ngtcp2_unreachable.h" -const uint8_t *ngtcp2_get_uint64(uint64_t *dest, const uint8_t *p) { - uint64_t n; - memcpy(&n, p, sizeof(n)); - *dest = ngtcp2_ntohl64(n); - return p + sizeof(n); -} - -const uint8_t *ngtcp2_get_uint48(uint64_t *dest, const uint8_t *p) { - uint64_t n = 0; - memcpy(((uint8_t *)&n) + 2, p, 6); - *dest = ngtcp2_ntohl64(n); - return p + 6; +const uint8_t *ngtcp2_get_uint64be(uint64_t *dest, const uint8_t *p) { + memcpy(dest, p, sizeof(*dest)); + *dest = ngtcp2_ntohl64(*dest); + return p + sizeof(*dest); } -const uint8_t *ngtcp2_get_uint32(uint32_t *dest, const uint8_t *p) { - uint32_t n; - memcpy(&n, p, sizeof(n)); - *dest = ngtcp2_ntohl(n); - return p + sizeof(n); +const uint8_t *ngtcp2_get_uint32be(uint32_t *dest, const uint8_t *p) { + memcpy(dest, p, sizeof(*dest)); + *dest = ngtcp2_ntohl(*dest); + return p + sizeof(*dest); } -const uint8_t *ngtcp2_get_uint24(uint32_t *dest, const uint8_t *p) { - uint32_t n = 0; - memcpy(((uint8_t *)&n) + 1, p, 3); - *dest = ngtcp2_ntohl(n); +const uint8_t *ngtcp2_get_uint24be(uint32_t *dest, const uint8_t *p) { + *dest = 0; + memcpy(((uint8_t *)dest) + 1, p, 3); + *dest = ngtcp2_ntohl(*dest); return p + 3; } -const uint8_t *ngtcp2_get_uint16(uint16_t *dest, const uint8_t *p) { - uint16_t n; - memcpy(&n, p, sizeof(n)); - *dest = ngtcp2_ntohs(n); - return p + sizeof(n); +const uint8_t *ngtcp2_get_uint16be(uint16_t *dest, const uint8_t *p) { + memcpy(dest, p, sizeof(*dest)); + *dest = ngtcp2_ntohs(*dest); + return p + sizeof(*dest); } -const uint8_t *ngtcp2_get_uint16be(uint16_t *dest, const uint8_t *p) { +const uint8_t *ngtcp2_get_uint16(uint16_t *dest, const uint8_t *p) { memcpy(dest, p, sizeof(*dest)); return p + sizeof(*dest); } -static uint64_t get_uvarint(size_t *plen, const uint8_t *p) { +static const uint8_t *get_uvarint(uint64_t *dest, const uint8_t *p) { union { uint8_t n8; uint16_t n16; @@ -80,42 +70,39 @@ static uint64_t get_uvarint(size_t *plen, const uint8_t *p) { uint64_t n64; } n; - *plen = (size_t)(1u << (*p >> 6)); - - switch (*plen) { + switch (*p >> 6) { + case 0: + *dest = *p++; + return p; case 1: - return *p; - case 2: memcpy(&n, p, 2); n.n8 &= 0x3f; - return ngtcp2_ntohs(n.n16); - case 4: + *dest = ngtcp2_ntohs(n.n16); + + return p + 2; + case 2: memcpy(&n, p, 4); n.n8 &= 0x3f; - return ngtcp2_ntohl(n.n32); - case 8: + *dest = ngtcp2_ntohl(n.n32); + + return p + 4; + case 3: memcpy(&n, p, 8); n.n8 &= 0x3f; - return ngtcp2_ntohl64(n.n64); + *dest = ngtcp2_ntohl64(n.n64); + + return p + 8; default: ngtcp2_unreachable(); } } const uint8_t *ngtcp2_get_uvarint(uint64_t *dest, const uint8_t *p) { - size_t len; - - *dest = get_uvarint(&len, p); - - return p + len; + return get_uvarint(dest, p); } const uint8_t *ngtcp2_get_varint(int64_t *dest, const uint8_t *p) { - size_t len; - - *dest = (int64_t)get_uvarint(&len, p); - - return p + len; + return get_uvarint((uint64_t *)dest, p); } int64_t ngtcp2_get_pkt_num(const uint8_t *p, size_t pkt_numlen) { @@ -126,13 +113,13 @@ int64_t ngtcp2_get_pkt_num(const uint8_t *p, size_t pkt_numlen) { case 1: return *p; case 2: - ngtcp2_get_uint16(&s, p); + ngtcp2_get_uint16be(&s, p); return (int64_t)s; case 3: - ngtcp2_get_uint24(&l, p); + ngtcp2_get_uint24be(&l, p); return (int64_t)l; case 4: - ngtcp2_get_uint32(&l, p); + ngtcp2_get_uint32be(&l, p); return (int64_t)l; default: ngtcp2_unreachable(); @@ -144,11 +131,6 @@ uint8_t *ngtcp2_put_uint64be(uint8_t *p, uint64_t n) { return ngtcp2_cpymem(p, (const uint8_t *)&n, sizeof(n)); } -uint8_t *ngtcp2_put_uint48be(uint8_t *p, uint64_t n) { - n = ngtcp2_htonl64(n); - return ngtcp2_cpymem(p, ((const uint8_t *)&n) + 2, 6); -} - uint8_t *ngtcp2_put_uint32be(uint8_t *p, uint32_t n) { n = ngtcp2_htonl(n); return ngtcp2_cpymem(p, (const uint8_t *)&n, sizeof(n)); @@ -207,14 +189,11 @@ uint8_t *ngtcp2_put_pkt_num(uint8_t *p, int64_t pkt_num, size_t len) { *p++ = (uint8_t)pkt_num; return p; case 2: - ngtcp2_put_uint16be(p, (uint16_t)pkt_num); - return p + 2; + return ngtcp2_put_uint16be(p, (uint16_t)pkt_num); case 3: - ngtcp2_put_uint24be(p, (uint32_t)pkt_num); - return p + 3; + return ngtcp2_put_uint24be(p, (uint32_t)pkt_num); case 4: - ngtcp2_put_uint32be(p, (uint32_t)pkt_num); - return p + 4; + return ngtcp2_put_uint32be(p, (uint32_t)pkt_num); default: ngtcp2_unreachable(); } @@ -238,54 +217,6 @@ size_t ngtcp2_put_uvarintlen(uint64_t n) { return 8; } -int64_t ngtcp2_nth_server_bidi_id(uint64_t n) { - if (n == 0) { - return 0; - } - - if ((NGTCP2_MAX_VARINT >> 2) < n - 1) { - return NGTCP2_MAX_SERVER_STREAM_ID_BIDI; - } - - return (int64_t)(((n - 1) << 2) | 0x01); -} - -int64_t ngtcp2_nth_client_bidi_id(uint64_t n) { - if (n == 0) { - return 0; - } - - if ((NGTCP2_MAX_VARINT >> 2) < n - 1) { - return NGTCP2_MAX_CLIENT_STREAM_ID_BIDI; - } - - return (int64_t)((n - 1) << 2); -} - -int64_t ngtcp2_nth_server_uni_id(uint64_t n) { - if (n == 0) { - return 0; - } - - if ((NGTCP2_MAX_VARINT >> 2) < n - 1) { - return NGTCP2_MAX_SERVER_STREAM_ID_UNI; - } - - return (int64_t)(((n - 1) << 2) | 0x03); -} - -int64_t ngtcp2_nth_client_uni_id(uint64_t n) { - if (n == 0) { - return 0; - } - - if ((NGTCP2_MAX_VARINT >> 2) < n - 1) { - return NGTCP2_MAX_CLIENT_STREAM_ID_UNI; - } - - return (int64_t)(((n - 1) << 2) | 0x02); -} - uint64_t ngtcp2_ord_stream_id(int64_t stream_id) { return (uint64_t)(stream_id >> 2) + 1; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.h index ef089a971a37f1..ad924683b8dc10 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.h @@ -27,51 +27,44 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include /* - * ngtcp2_get_uint64 reads 8 bytes from |p| as 64 bits unsigned + * ngtcp2_get_uint64be reads 8 bytes from |p| as 64 bits unsigned * integer encoded as network byte order, and stores it in the buffer * pointed by |dest| in host byte order. It returns |p| + 8. */ -const uint8_t *ngtcp2_get_uint64(uint64_t *dest, const uint8_t *p); +const uint8_t *ngtcp2_get_uint64be(uint64_t *dest, const uint8_t *p); /* - * ngtcp2_get_uint48 reads 6 bytes from |p| as 48 bits unsigned - * integer encoded as network byte order, and stores it in the buffer - * pointed by |dest| in host byte order. It returns |p| + 6. - */ -const uint8_t *ngtcp2_get_uint48(uint64_t *dest, const uint8_t *p); - -/* - * ngtcp2_get_uint32 reads 4 bytes from |p| as 32 bits unsigned + * ngtcp2_get_uint32be reads 4 bytes from |p| as 32 bits unsigned * integer encoded as network byte order, and stores it in the buffer * pointed by |dest| in host byte order. It returns |p| + 4. */ -const uint8_t *ngtcp2_get_uint32(uint32_t *dest, const uint8_t *p); +const uint8_t *ngtcp2_get_uint32be(uint32_t *dest, const uint8_t *p); /* - * ngtcp2_get_uint24 reads 3 bytes from |p| as 24 bits unsigned + * ngtcp2_get_uint24be reads 3 bytes from |p| as 24 bits unsigned * integer encoded as network byte order, and stores it in the buffer * pointed by |dest| in host byte order. It returns |p| + 3. */ -const uint8_t *ngtcp2_get_uint24(uint32_t *dest, const uint8_t *p); +const uint8_t *ngtcp2_get_uint24be(uint32_t *dest, const uint8_t *p); /* - * ngtcp2_get_uint16 reads 2 bytes from |p| as 16 bits unsigned + * ngtcp2_get_uint16be reads 2 bytes from |p| as 16 bits unsigned * integer encoded as network byte order, and stores it in the buffer * pointed by |dest| in host byte order. It returns |p| + 2. */ -const uint8_t *ngtcp2_get_uint16(uint16_t *dest, const uint8_t *p); +const uint8_t *ngtcp2_get_uint16be(uint16_t *dest, const uint8_t *p); /* - * ngtcp2_get_uint16be reads 2 bytes from |p| as 16 bits unsigned + * ngtcp2_get_uint16 reads 2 bytes from |p| as 16 bits unsigned * integer encoded as network byte order, and stores it in the buffer * pointed by |dest| as is. It returns |p| + 2. */ -const uint8_t *ngtcp2_get_uint16be(uint16_t *dest, const uint8_t *p); +const uint8_t *ngtcp2_get_uint16(uint16_t *dest, const uint8_t *p); /* * ngtcp2_get_uvarint reads variable-length unsigned integer from |p|, @@ -102,13 +95,6 @@ int64_t ngtcp2_get_pkt_num(const uint8_t *p, size_t pkt_numlen); */ uint8_t *ngtcp2_put_uint64be(uint8_t *p, uint64_t n); -/* - * ngtcp2_put_uint48be writes |n| in host byte order in |p| in network - * byte order. It writes only least significant 48 bits. It returns - * the one beyond of the last written position. - */ -uint8_t *ngtcp2_put_uint48be(uint8_t *p, uint64_t n); - /* * ngtcp2_put_uint32be writes |n| in host byte order in |p| in network * byte order. It returns the one beyond of the last written @@ -168,41 +154,9 @@ size_t ngtcp2_get_uvarintlen(const uint8_t *p); */ size_t ngtcp2_put_uvarintlen(uint64_t n); -/* - * ngtcp2_nth_server_bidi_id returns |n|-th server bidirectional - * stream ID. If |n| is 0, it returns 0. If the |n|-th stream ID is - * larger than NGTCP2_MAX_SERVER_STREAM_ID_BIDI, this function returns - * NGTCP2_MAX_SERVER_STREAM_ID_BIDI. - */ -int64_t ngtcp2_nth_server_bidi_id(uint64_t n); - -/* - * ngtcp2_nth_client_bidi_id returns |n|-th client bidirectional - * stream ID. If |n| is 0, it returns 0. If the |n|-th stream ID is - * larger than NGTCP2_MAX_CLIENT_STREAM_ID_BIDI, this function returns - * NGTCP2_MAX_CLIENT_STREAM_ID_BIDI. - */ -int64_t ngtcp2_nth_client_bidi_id(uint64_t n); - -/* - * ngtcp2_nth_server_uni_id returns |n|-th server unidirectional - * stream ID. If |n| is 0, it returns 0. If the |n|-th stream ID is - * larger than NGTCP2_MAX_SERVER_STREAM_ID_UNI, this function returns - * NGTCP2_MAX_SERVER_STREAM_ID_UNI. - */ -int64_t ngtcp2_nth_server_uni_id(uint64_t n); - -/* - * ngtcp2_nth_client_uni_id returns |n|-th client unidirectional - * stream ID. If |n| is 0, it returns 0. If the |n|-th stream ID is - * larger than NGTCP2_MAX_CLIENT_STREAM_ID_UNI, this function returns - * NGTCP2_MAX_CLIENT_STREAM_ID_UNI. - */ -int64_t ngtcp2_nth_client_uni_id(uint64_t n); - /* * ngtcp2_ord_stream_id returns the ordinal number of |stream_id|. */ uint64_t ngtcp2_ord_stream_id(int64_t stream_id); -#endif /* NGTCP2_CONV_H */ +#endif /* !defined(NGTCP2_CONV_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.c deleted file mode 100644 index eb85687a068449..00000000000000 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.c +++ /dev/null @@ -1,66 +0,0 @@ -/* - * ngtcp2 - * - * Copyright (c) 2023 ngtcp2 contributors - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -#include "ngtcp2_conversion.h" - -#include -#include - -static void transport_params_copy(int transport_params_version, - ngtcp2_transport_params *dest, - const ngtcp2_transport_params *src) { - assert(transport_params_version != NGTCP2_TRANSPORT_PARAMS_VERSION); - - switch (transport_params_version) { - case NGTCP2_TRANSPORT_PARAMS_V1: - memcpy(dest, src, - offsetof(ngtcp2_transport_params, version_info_present) + - sizeof(src->version_info_present)); - - break; - } -} - -const ngtcp2_transport_params * -ngtcp2_transport_params_convert_to_latest(ngtcp2_transport_params *dest, - int transport_params_version, - const ngtcp2_transport_params *src) { - if (transport_params_version == NGTCP2_TRANSPORT_PARAMS_VERSION) { - return src; - } - - ngtcp2_transport_params_default(dest); - - transport_params_copy(transport_params_version, dest, src); - - return dest; -} - -void ngtcp2_transport_params_convert_to_old( - int transport_params_version, ngtcp2_transport_params *dest, - const ngtcp2_transport_params *src) { - assert(transport_params_version != NGTCP2_TRANSPORT_PARAMS_VERSION); - - transport_params_copy(transport_params_version, dest, src); -} diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.c index 0a3ecf6a2440cb..1f74e8c58397b1 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.c @@ -27,11 +27,7 @@ #include #include -#include "ngtcp2_str.h" -#include "ngtcp2_conv.h" -#include "ngtcp2_conn.h" #include "ngtcp2_net.h" -#include "ngtcp2_conversion.h" int ngtcp2_crypto_km_new(ngtcp2_crypto_km **pckm, const uint8_t *secret, size_t secretlen, @@ -46,9 +42,11 @@ int ngtcp2_crypto_km_new(ngtcp2_crypto_km **pckm, const uint8_t *secret, if (secretlen) { memcpy((*pckm)->secret.base, secret, secretlen); } + if (aead_ctx) { (*pckm)->aead_ctx = *aead_ctx; } + memcpy((*pckm)->iv.base, iv, ivlen); return 0; @@ -85,825 +83,30 @@ void ngtcp2_crypto_km_del(ngtcp2_crypto_km *ckm, const ngtcp2_mem *mem) { return; } + if (ckm->secret.len) { +#ifdef WIN32 + SecureZeroMemory(ckm->secret.base, ckm->secret.len); +#elif defined(HAVE_EXPLICIT_BZERO) + explicit_bzero(ckm->secret.base, ckm->secret.len); +#elif defined(HAVE_MEMSET_S) + memset_s(ckm->secret.base, ckm->secret.len, 0, ckm->secret.len); +#endif /* defined(HAVE_MEMSET_S) */ + } + ngtcp2_mem_free(mem, ckm); } void ngtcp2_crypto_create_nonce(uint8_t *dest, const uint8_t *iv, size_t ivlen, int64_t pkt_num) { - size_t i; uint64_t n; - assert(ivlen >= 8); + assert(ivlen >= sizeof(n)); memcpy(dest, iv, ivlen); - n = ngtcp2_htonl64((uint64_t)pkt_num); - - for (i = 0; i < 8; ++i) { - dest[ivlen - 8 + i] ^= ((uint8_t *)&n)[i]; - } -} - -/* - * varint_paramlen returns the length of a single transport parameter - * which has variable integer in its parameter. - */ -static size_t varint_paramlen(ngtcp2_transport_param_id id, uint64_t param) { - size_t valuelen = ngtcp2_put_uvarintlen(param); - return ngtcp2_put_uvarintlen(id) + ngtcp2_put_uvarintlen(valuelen) + valuelen; -} - -/* - * write_varint_param writes parameter |id| of the given |value| in - * varint encoding. It returns p + the number of bytes written. - */ -static uint8_t *write_varint_param(uint8_t *p, ngtcp2_transport_param_id id, - uint64_t value) { - p = ngtcp2_put_uvarint(p, id); - p = ngtcp2_put_uvarint(p, ngtcp2_put_uvarintlen(value)); - return ngtcp2_put_uvarint(p, value); -} - -/* - * zero_paramlen returns the length of a single transport parameter - * which has zero length value in its parameter. - */ -static size_t zero_paramlen(ngtcp2_transport_param_id id) { - return ngtcp2_put_uvarintlen(id) + 1; -} - -/* - * write_zero_param writes parameter |id| that has zero length value. - * It returns p + the number of bytes written. - */ -static uint8_t *write_zero_param(uint8_t *p, ngtcp2_transport_param_id id) { - p = ngtcp2_put_uvarint(p, id); - *p++ = 0; - - return p; -} - -/* - * cid_paramlen returns the length of a single transport parameter - * which has |cid| as value. - */ -static size_t cid_paramlen(ngtcp2_transport_param_id id, - const ngtcp2_cid *cid) { - return ngtcp2_put_uvarintlen(id) + ngtcp2_put_uvarintlen(cid->datalen) + - cid->datalen; -} - -/* - * write_cid_param writes parameter |id| of the given |cid|. It - * returns p + the number of bytes written. - */ -static uint8_t *write_cid_param(uint8_t *p, ngtcp2_transport_param_id id, - const ngtcp2_cid *cid) { - assert(cid->datalen == 0 || cid->datalen >= NGTCP2_MIN_CIDLEN); - assert(cid->datalen <= NGTCP2_MAX_CIDLEN); - - p = ngtcp2_put_uvarint(p, id); - p = ngtcp2_put_uvarint(p, cid->datalen); - if (cid->datalen) { - p = ngtcp2_cpymem(p, cid->data, cid->datalen); - } - return p; -} - -static const uint8_t empty_address[16]; - -ngtcp2_ssize ngtcp2_transport_params_encode_versioned( - uint8_t *dest, size_t destlen, int transport_params_version, - const ngtcp2_transport_params *params) { - uint8_t *p; - size_t len = 0; - /* For some reason, gcc 7.3.0 requires this initialization. */ - size_t preferred_addrlen = 0; - size_t version_infolen = 0; - const ngtcp2_sockaddr_in *sa_in; - const ngtcp2_sockaddr_in6 *sa_in6; - ngtcp2_transport_params paramsbuf; - - params = ngtcp2_transport_params_convert_to_latest( - ¶msbuf, transport_params_version, params); - - if (params->original_dcid_present) { - len += - cid_paramlen(NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID, - ¶ms->original_dcid); - } - - if (params->stateless_reset_token_present) { - len += ngtcp2_put_uvarintlen(NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN) + - ngtcp2_put_uvarintlen(NGTCP2_STATELESS_RESET_TOKENLEN) + - NGTCP2_STATELESS_RESET_TOKENLEN; - } - - if (params->preferred_addr_present) { - assert(params->preferred_addr.cid.datalen >= NGTCP2_MIN_CIDLEN); - assert(params->preferred_addr.cid.datalen <= NGTCP2_MAX_CIDLEN); - preferred_addrlen = 4 /* ipv4Address */ + 2 /* ipv4Port */ + - 16 /* ipv6Address */ + 2 /* ipv6Port */ - + 1 + params->preferred_addr.cid.datalen /* CID */ + - NGTCP2_STATELESS_RESET_TOKENLEN; - len += ngtcp2_put_uvarintlen(NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS) + - ngtcp2_put_uvarintlen(preferred_addrlen) + preferred_addrlen; - } - if (params->retry_scid_present) { - len += cid_paramlen(NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID, - ¶ms->retry_scid); - } - - if (params->initial_scid_present) { - len += cid_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID, - ¶ms->initial_scid); - } - - if (params->initial_max_stream_data_bidi_local) { - len += varint_paramlen( - NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL, - params->initial_max_stream_data_bidi_local); - } - if (params->initial_max_stream_data_bidi_remote) { - len += varint_paramlen( - NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE, - params->initial_max_stream_data_bidi_remote); - } - if (params->initial_max_stream_data_uni) { - len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_UNI, - params->initial_max_stream_data_uni); - } - if (params->initial_max_data) { - len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_DATA, - params->initial_max_data); - } - if (params->initial_max_streams_bidi) { - len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_BIDI, - params->initial_max_streams_bidi); - } - if (params->initial_max_streams_uni) { - len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_UNI, - params->initial_max_streams_uni); - } - if (params->max_udp_payload_size != - NGTCP2_DEFAULT_MAX_RECV_UDP_PAYLOAD_SIZE) { - len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_MAX_UDP_PAYLOAD_SIZE, - params->max_udp_payload_size); - } - if (params->ack_delay_exponent != NGTCP2_DEFAULT_ACK_DELAY_EXPONENT) { - len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_ACK_DELAY_EXPONENT, - params->ack_delay_exponent); - } - if (params->disable_active_migration) { - len += zero_paramlen(NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION); - } - if (params->max_ack_delay != NGTCP2_DEFAULT_MAX_ACK_DELAY) { - len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY, - params->max_ack_delay / NGTCP2_MILLISECONDS); - } - if (params->max_idle_timeout) { - len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_MAX_IDLE_TIMEOUT, - params->max_idle_timeout / NGTCP2_MILLISECONDS); - } - if (params->active_connection_id_limit && - params->active_connection_id_limit != - NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT) { - len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_ACTIVE_CONNECTION_ID_LIMIT, - params->active_connection_id_limit); - } - if (params->max_datagram_frame_size) { - len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_MAX_DATAGRAM_FRAME_SIZE, - params->max_datagram_frame_size); - } - if (params->grease_quic_bit) { - len += zero_paramlen(NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT); - } - if (params->version_info_present) { - version_infolen = - sizeof(uint32_t) + params->version_info.available_versionslen; - len += ngtcp2_put_uvarintlen(NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION) + - ngtcp2_put_uvarintlen(version_infolen) + version_infolen; - } - - if (dest == NULL && destlen == 0) { - return (ngtcp2_ssize)len; - } - - if (destlen < len) { - return NGTCP2_ERR_NOBUF; - } - - p = dest; - - if (params->original_dcid_present) { - p = write_cid_param( - p, NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID, - ¶ms->original_dcid); - } - - if (params->stateless_reset_token_present) { - p = ngtcp2_put_uvarint(p, NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN); - p = ngtcp2_put_uvarint(p, sizeof(params->stateless_reset_token)); - p = ngtcp2_cpymem(p, params->stateless_reset_token, - sizeof(params->stateless_reset_token)); - } - - if (params->preferred_addr_present) { - p = ngtcp2_put_uvarint(p, NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS); - p = ngtcp2_put_uvarint(p, preferred_addrlen); - - if (params->preferred_addr.ipv4_present) { - sa_in = ¶ms->preferred_addr.ipv4; - p = ngtcp2_cpymem(p, &sa_in->sin_addr, sizeof(sa_in->sin_addr)); - p = ngtcp2_put_uint16(p, sa_in->sin_port); - } else { - p = ngtcp2_cpymem(p, empty_address, sizeof(sa_in->sin_addr)); - p = ngtcp2_put_uint16(p, 0); - } - - if (params->preferred_addr.ipv6_present) { - sa_in6 = ¶ms->preferred_addr.ipv6; - p = ngtcp2_cpymem(p, &sa_in6->sin6_addr, sizeof(sa_in6->sin6_addr)); - p = ngtcp2_put_uint16(p, sa_in6->sin6_port); - } else { - p = ngtcp2_cpymem(p, empty_address, sizeof(sa_in6->sin6_addr)); - p = ngtcp2_put_uint16(p, 0); - } - - *p++ = (uint8_t)params->preferred_addr.cid.datalen; - if (params->preferred_addr.cid.datalen) { - p = ngtcp2_cpymem(p, params->preferred_addr.cid.data, - params->preferred_addr.cid.datalen); - } - p = ngtcp2_cpymem(p, params->preferred_addr.stateless_reset_token, - sizeof(params->preferred_addr.stateless_reset_token)); - } - - if (params->retry_scid_present) { - p = write_cid_param(p, NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID, - ¶ms->retry_scid); - } - - if (params->initial_scid_present) { - p = write_cid_param(p, NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID, - ¶ms->initial_scid); - } - - if (params->initial_max_stream_data_bidi_local) { - p = write_varint_param( - p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL, - params->initial_max_stream_data_bidi_local); - } - - if (params->initial_max_stream_data_bidi_remote) { - p = write_varint_param( - p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE, - params->initial_max_stream_data_bidi_remote); - } - - if (params->initial_max_stream_data_uni) { - p = write_varint_param(p, - NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_UNI, - params->initial_max_stream_data_uni); - } - - if (params->initial_max_data) { - p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_DATA, - params->initial_max_data); - } - - if (params->initial_max_streams_bidi) { - p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_BIDI, - params->initial_max_streams_bidi); - } - - if (params->initial_max_streams_uni) { - p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_UNI, - params->initial_max_streams_uni); - } - - if (params->max_udp_payload_size != - NGTCP2_DEFAULT_MAX_RECV_UDP_PAYLOAD_SIZE) { - p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_MAX_UDP_PAYLOAD_SIZE, - params->max_udp_payload_size); - } - - if (params->ack_delay_exponent != NGTCP2_DEFAULT_ACK_DELAY_EXPONENT) { - p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_ACK_DELAY_EXPONENT, - params->ack_delay_exponent); - } - - if (params->disable_active_migration) { - p = write_zero_param(p, NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION); - } - - if (params->max_ack_delay != NGTCP2_DEFAULT_MAX_ACK_DELAY) { - p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY, - params->max_ack_delay / NGTCP2_MILLISECONDS); - } - - if (params->max_idle_timeout) { - p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_MAX_IDLE_TIMEOUT, - params->max_idle_timeout / NGTCP2_MILLISECONDS); - } - - if (params->active_connection_id_limit && - params->active_connection_id_limit != - NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT) { - p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_ACTIVE_CONNECTION_ID_LIMIT, - params->active_connection_id_limit); - } - - if (params->max_datagram_frame_size) { - p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_MAX_DATAGRAM_FRAME_SIZE, - params->max_datagram_frame_size); - } - - if (params->grease_quic_bit) { - p = write_zero_param(p, NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT); - } - - if (params->version_info_present) { - p = ngtcp2_put_uvarint(p, NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION); - p = ngtcp2_put_uvarint(p, version_infolen); - p = ngtcp2_put_uint32be(p, params->version_info.chosen_version); - if (params->version_info.available_versionslen) { - p = ngtcp2_cpymem(p, params->version_info.available_versions, - params->version_info.available_versionslen); - } - } - - assert((size_t)(p - dest) == len); - - return (ngtcp2_ssize)len; -} - -/* - * decode_varint decodes a single varint from the buffer pointed by - * |*pp| of length |end - *pp|. If it decodes an integer - * successfully, it stores the integer in |*pdest|, increment |*pp| by - * the number of bytes read from |*pp|, and returns 0. Otherwise it - * returns -1. - */ -static int decode_varint(uint64_t *pdest, const uint8_t **pp, - const uint8_t *end) { - const uint8_t *p = *pp; - size_t len; - - if (p == end) { - return -1; - } - - len = ngtcp2_get_uvarintlen(p); - if ((uint64_t)(end - p) < len) { - return -1; - } - - *pp = ngtcp2_get_uvarint(pdest, p); - - return 0; -} - -/* - * decode_varint_param decodes length prefixed value from the buffer - * pointed by |*pp| of length |end - *pp|. The length and value are - * encoded in varint form. If it decodes a value successfully, it - * stores the value in |*pdest|, increment |*pp| by the number of - * bytes read from |*pp|, and returns 0. Otherwise it returns -1. - */ -static int decode_varint_param(uint64_t *pdest, const uint8_t **pp, - const uint8_t *end) { - const uint8_t *p = *pp; - uint64_t valuelen; - - if (decode_varint(&valuelen, &p, end) != 0) { - return -1; - } - - if (p == end) { - return -1; - } - - if ((uint64_t)(end - p) < valuelen) { - return -1; - } - - if (ngtcp2_get_uvarintlen(p) != valuelen) { - return -1; - } - - *pp = ngtcp2_get_uvarint(pdest, p); - - return 0; -} - -/* - * decode_zero_param decodes zero length value from the buffer pointed - * by |*pp| of length |end - *pp|. The length is encoded in varint - * form. If it decodes zero length value successfully, it increments - * |*pp| by 1, and returns 0. Otherwise it returns -1. - */ -static int decode_zero_param(const uint8_t **pp, const uint8_t *end) { - if (*pp == end || **pp != 0) { - return -1; - } - - ++*pp; - - return 0; -} - -/* - * decode_cid_param decodes length prefixed ngtcp2_cid from the buffer - * pointed by |*pp| of length |end - *pp|. The length is encoded in - * varint form. If it decodes a value successfully, it stores the - * value in |*pdest|, increment |*pp| by the number of read from - * |*pp|, and returns the number of bytes read. Otherwise it returns - * the one of the negative error code: - * - * NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM - * Could not decode Connection ID. - */ -static int decode_cid_param(ngtcp2_cid *pdest, const uint8_t **pp, - const uint8_t *end) { - const uint8_t *p = *pp; - uint64_t valuelen; - - if (decode_varint(&valuelen, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - - if ((valuelen != 0 && valuelen < NGTCP2_MIN_CIDLEN) || - valuelen > NGTCP2_MAX_CIDLEN || (size_t)(end - p) < valuelen) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - - ngtcp2_cid_init(pdest, p, (size_t)valuelen); - - p += valuelen; - - *pp = p; - - return 0; -} - -int ngtcp2_transport_params_decode_versioned(int transport_params_version, - ngtcp2_transport_params *dest, - const uint8_t *data, - size_t datalen) { - const uint8_t *p, *end, *lend; - size_t len; - uint64_t param_type; - uint64_t valuelen; - int rv; - ngtcp2_sockaddr_in *sa_in; - ngtcp2_sockaddr_in6 *sa_in6; - uint32_t version; - ngtcp2_transport_params *params, paramsbuf; - if (transport_params_version == NGTCP2_TRANSPORT_PARAMS_VERSION) { - params = dest; - } else { - params = ¶msbuf; - } - - /* Set default values */ - memset(params, 0, sizeof(*params)); - params->original_dcid_present = 0; - params->initial_scid_present = 0; - params->initial_max_streams_bidi = 0; - params->initial_max_streams_uni = 0; - params->initial_max_stream_data_bidi_local = 0; - params->initial_max_stream_data_bidi_remote = 0; - params->initial_max_stream_data_uni = 0; - params->max_udp_payload_size = NGTCP2_DEFAULT_MAX_RECV_UDP_PAYLOAD_SIZE; - params->ack_delay_exponent = NGTCP2_DEFAULT_ACK_DELAY_EXPONENT; - params->stateless_reset_token_present = 0; - params->preferred_addr_present = 0; - params->disable_active_migration = 0; - params->max_ack_delay = NGTCP2_DEFAULT_MAX_ACK_DELAY; - params->max_idle_timeout = 0; - params->active_connection_id_limit = - NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT; - params->retry_scid_present = 0; - params->max_datagram_frame_size = 0; - memset(¶ms->retry_scid, 0, sizeof(params->retry_scid)); - memset(¶ms->initial_scid, 0, sizeof(params->initial_scid)); - memset(¶ms->original_dcid, 0, sizeof(params->original_dcid)); - params->version_info_present = 0; - - p = data; - end = data + datalen; - - for (; (size_t)(end - p) >= 2;) { - if (decode_varint(¶m_type, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - - switch (param_type) { - case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL: - if (decode_varint_param(¶ms->initial_max_stream_data_bidi_local, &p, - end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - break; - case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE: - if (decode_varint_param(¶ms->initial_max_stream_data_bidi_remote, &p, - end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - break; - case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_UNI: - if (decode_varint_param(¶ms->initial_max_stream_data_uni, &p, end) != - 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - break; - case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_DATA: - if (decode_varint_param(¶ms->initial_max_data, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - break; - case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_BIDI: - if (decode_varint_param(¶ms->initial_max_streams_bidi, &p, end) != - 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if (params->initial_max_streams_bidi > NGTCP2_MAX_STREAMS) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - break; - case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_UNI: - if (decode_varint_param(¶ms->initial_max_streams_uni, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if (params->initial_max_streams_uni > NGTCP2_MAX_STREAMS) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - break; - case NGTCP2_TRANSPORT_PARAM_MAX_IDLE_TIMEOUT: - if (decode_varint_param(¶ms->max_idle_timeout, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - params->max_idle_timeout *= NGTCP2_MILLISECONDS; - break; - case NGTCP2_TRANSPORT_PARAM_MAX_UDP_PAYLOAD_SIZE: - if (decode_varint_param(¶ms->max_udp_payload_size, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - break; - case NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN: - if (decode_varint(&valuelen, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if ((size_t)valuelen != sizeof(params->stateless_reset_token)) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if ((size_t)(end - p) < sizeof(params->stateless_reset_token)) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - - p = ngtcp2_get_bytes(params->stateless_reset_token, p, - sizeof(params->stateless_reset_token)); - params->stateless_reset_token_present = 1; - - break; - case NGTCP2_TRANSPORT_PARAM_ACK_DELAY_EXPONENT: - if (decode_varint_param(¶ms->ack_delay_exponent, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if (params->ack_delay_exponent > 20) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - break; - case NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS: - if (decode_varint(&valuelen, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if ((size_t)(end - p) < valuelen) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - len = 4 /* ipv4Address */ + 2 /* ipv4Port */ + 16 /* ipv6Address */ + - 2 /* ipv6Port */ - + 1 /* cid length */ + NGTCP2_STATELESS_RESET_TOKENLEN; - if (valuelen < len) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - - sa_in = ¶ms->preferred_addr.ipv4; - - p = ngtcp2_get_bytes(&sa_in->sin_addr, p, sizeof(sa_in->sin_addr)); - p = ngtcp2_get_uint16be(&sa_in->sin_port, p); - - if (sa_in->sin_port || memcmp(empty_address, &sa_in->sin_addr, - sizeof(sa_in->sin_addr)) != 0) { - sa_in->sin_family = NGTCP2_AF_INET; - params->preferred_addr.ipv4_present = 1; - } - - sa_in6 = ¶ms->preferred_addr.ipv6; - - p = ngtcp2_get_bytes(&sa_in6->sin6_addr, p, sizeof(sa_in6->sin6_addr)); - p = ngtcp2_get_uint16be(&sa_in6->sin6_port, p); - - if (sa_in6->sin6_port || memcmp(empty_address, &sa_in6->sin6_addr, - sizeof(sa_in6->sin6_addr)) != 0) { - sa_in6->sin6_family = NGTCP2_AF_INET6; - params->preferred_addr.ipv6_present = 1; - } - - /* cid */ - params->preferred_addr.cid.datalen = *p++; - len += params->preferred_addr.cid.datalen; - if (valuelen != len || - params->preferred_addr.cid.datalen > NGTCP2_MAX_CIDLEN || - params->preferred_addr.cid.datalen < NGTCP2_MIN_CIDLEN) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if (params->preferred_addr.cid.datalen) { - p = ngtcp2_get_bytes(params->preferred_addr.cid.data, p, - params->preferred_addr.cid.datalen); - } - - /* stateless reset token */ - p = ngtcp2_get_bytes( - params->preferred_addr.stateless_reset_token, p, - sizeof(params->preferred_addr.stateless_reset_token)); - params->preferred_addr_present = 1; - break; - case NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION: - if (decode_zero_param(&p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - params->disable_active_migration = 1; - break; - case NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID: - rv = decode_cid_param(¶ms->original_dcid, &p, end); - if (rv != 0) { - return rv; - } - params->original_dcid_present = 1; - break; - case NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID: - rv = decode_cid_param(¶ms->retry_scid, &p, end); - if (rv != 0) { - return rv; - } - params->retry_scid_present = 1; - break; - case NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID: - rv = decode_cid_param(¶ms->initial_scid, &p, end); - if (rv != 0) { - return rv; - } - params->initial_scid_present = 1; - break; - case NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY: - if (decode_varint_param(¶ms->max_ack_delay, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if (params->max_ack_delay >= 16384) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - params->max_ack_delay *= NGTCP2_MILLISECONDS; - break; - case NGTCP2_TRANSPORT_PARAM_ACTIVE_CONNECTION_ID_LIMIT: - if (decode_varint_param(¶ms->active_connection_id_limit, &p, end) != - 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - break; - case NGTCP2_TRANSPORT_PARAM_MAX_DATAGRAM_FRAME_SIZE: - if (decode_varint_param(¶ms->max_datagram_frame_size, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - break; - case NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT: - if (decode_zero_param(&p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - params->grease_quic_bit = 1; - break; - case NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION: - if (decode_varint(&valuelen, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if ((size_t)(end - p) < valuelen) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if (valuelen < sizeof(uint32_t) || (valuelen & 0x3)) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - p = ngtcp2_get_uint32(¶ms->version_info.chosen_version, p); - if (params->version_info.chosen_version == 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if (valuelen > sizeof(uint32_t)) { - params->version_info.available_versions = (uint8_t *)p; - params->version_info.available_versionslen = - (size_t)valuelen - sizeof(uint32_t); - - for (lend = p + (valuelen - sizeof(uint32_t)); p != lend;) { - p = ngtcp2_get_uint32(&version, p); - if (version == 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - } - } - params->version_info_present = 1; - break; - default: - /* Ignore unknown parameter */ - if (decode_varint(&valuelen, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if ((size_t)(end - p) < valuelen) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - p += valuelen; - break; - } - } - - if (end - p != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - - if (transport_params_version != NGTCP2_TRANSPORT_PARAMS_VERSION) { - ngtcp2_transport_params_convert_to_old(transport_params_version, dest, - params); - } - - return 0; -} - -static int transport_params_copy_new(ngtcp2_transport_params **pdest, - const ngtcp2_transport_params *src, - const ngtcp2_mem *mem) { - size_t len = sizeof(**pdest); - ngtcp2_transport_params *dest; - uint8_t *p; - - if (src->version_info_present) { - len += src->version_info.available_versionslen; - } - - dest = ngtcp2_mem_malloc(mem, len); - if (dest == NULL) { - return NGTCP2_ERR_NOMEM; - } - - *dest = *src; - - if (src->version_info_present && src->version_info.available_versionslen) { - p = (uint8_t *)dest + sizeof(*dest); - memcpy(p, src->version_info.available_versions, - src->version_info.available_versionslen); - dest->version_info.available_versions = p; - } - - *pdest = dest; - - return 0; -} - -int ngtcp2_transport_params_decode_new(ngtcp2_transport_params **pparams, - const uint8_t *data, size_t datalen, - const ngtcp2_mem *mem) { - int rv; - ngtcp2_transport_params params; - - rv = ngtcp2_transport_params_decode(¶ms, data, datalen); - if (rv < 0) { - return rv; - } - - if (mem == NULL) { - mem = ngtcp2_mem_default(); - } - - return transport_params_copy_new(pparams, ¶ms, mem); -} - -void ngtcp2_transport_params_del(ngtcp2_transport_params *params, - const ngtcp2_mem *mem) { - if (params == NULL) { - return; - } - - if (mem == NULL) { - mem = ngtcp2_mem_default(); - } - - ngtcp2_mem_free(mem, params); -} - -int ngtcp2_transport_params_copy_new(ngtcp2_transport_params **pdest, - const ngtcp2_transport_params *src, - const ngtcp2_mem *mem) { - if (src == NULL) { - *pdest = NULL; - return 0; - } + dest += ivlen - sizeof(n); - return transport_params_copy_new(pdest, src, mem); + memcpy(&n, dest, sizeof(n)); + n ^= ngtcp2_htonl64((uint64_t)pkt_num); + memcpy(dest, &n, sizeof(n)); } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.h index b78429bb38f582..ca6d494846f324 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -38,36 +38,9 @@ bytes. */ #define NGTCP2_INITIAL_AEAD_OVERHEAD 16 -/* NGTCP2_MAX_AEAD_OVERHEAD is expected maximum AEAD overhead. */ +/* NGTCP2_MAX_AEAD_OVERHEAD is the maximum AEAD overhead. */ #define NGTCP2_MAX_AEAD_OVERHEAD 16 -/* ngtcp2_transport_param_id is the registry of QUIC transport - parameter ID. */ -typedef uint64_t ngtcp2_transport_param_id; - -#define NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID 0x00 -#define NGTCP2_TRANSPORT_PARAM_MAX_IDLE_TIMEOUT 0x01 -#define NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN 0x02 -#define NGTCP2_TRANSPORT_PARAM_MAX_UDP_PAYLOAD_SIZE 0x03 -#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_DATA 0x04 -#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL 0x05 -#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE 0x06 -#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_UNI 0x07 -#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_BIDI 0x08 -#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_UNI 0x09 -#define NGTCP2_TRANSPORT_PARAM_ACK_DELAY_EXPONENT 0x0a -#define NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY 0x0b -#define NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION 0x0c -#define NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS 0x0d -#define NGTCP2_TRANSPORT_PARAM_ACTIVE_CONNECTION_ID_LIMIT 0x0e -#define NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID 0x0f -#define NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID 0x10 -/* https://datatracker.ietf.org/doc/html/rfc9221 */ -#define NGTCP2_TRANSPORT_PARAM_MAX_DATAGRAM_FRAME_SIZE 0x20 -#define NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT 0x2ab2 -/* https://datatracker.ietf.org/doc/html/rfc9368 */ -#define NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION 0x11 - /* NGTCP2_CRYPTO_KM_FLAG_NONE indicates that no flag is set. */ #define NGTCP2_CRYPTO_KM_FLAG_NONE 0x00u /* NGTCP2_CRYPTO_KM_FLAG_KEY_PHASE_ONE is set if key phase bit is @@ -83,8 +56,7 @@ typedef struct ngtcp2_crypto_km { a packet. For decryption key, it is the lowest packet number of a packet which can be decrypted with this keying material. */ int64_t pkt_num; - /* use_count is the number of encryption applied with this key. - This field is only used for tx key. */ + /* use_count is the number of encryption applied with this key. */ uint64_t use_count; /* flags is the bitwise OR of zero or more of NGTCP2_CRYPTO_KM_FLAG_*. */ @@ -92,12 +64,12 @@ typedef struct ngtcp2_crypto_km { } ngtcp2_crypto_km; /* - * ngtcp2_crypto_km_new creates new ngtcp2_crypto_km object and + * ngtcp2_crypto_km_new creates new ngtcp2_crypto_km object, and * assigns its pointer to |*pckm|. The |secret| of length - * |secretlen|, the |key| of length |keylen| and the |iv| of length - * |ivlen| are copied to |*pckm|. If |secretlen| == 0, the function - * assumes no secret is given which is acceptable. The sole reason to - * store secret is update keys. Only 1RTT key can be updated. + * |secretlen|, |aead_ctx|, and the |iv| of length |ivlen| are copied + * to |*pckm|. If |secretlen| == 0, the function assumes no secret is + * given which is acceptable. The sole reason to store secret is + * update keys. Only 1RTT key can be updated. */ int ngtcp2_crypto_km_new(ngtcp2_crypto_km **pckm, const uint8_t *secret, size_t secretlen, @@ -107,7 +79,7 @@ int ngtcp2_crypto_km_new(ngtcp2_crypto_km **pckm, const uint8_t *secret, /* * ngtcp2_crypto_km_nocopy_new is similar to ngtcp2_crypto_km_new, but - * it does not copy secret, key and IV. + * it does not copy secret, aead context, and IV. */ int ngtcp2_crypto_km_nocopy_new(ngtcp2_crypto_km **pckm, size_t secretlen, size_t ivlen, const ngtcp2_mem *mem); @@ -127,21 +99,4 @@ typedef struct ngtcp2_crypto_cc { void ngtcp2_crypto_create_nonce(uint8_t *dest, const uint8_t *iv, size_t ivlen, int64_t pkt_num); -/* - * ngtcp2_transport_params_copy_new makes a copy of |src|, and assigns - * it to |*pdest|. If |src| is NULL, NULL is assigned to |*pdest|. - * - * Caller is responsible to call ngtcp2_transport_params_del to free - * the memory assigned to |*pdest|. - * - * This function returns 0 if it succeeds, or one of the following - * negative error codes: - * - * NGTCP2_ERR_NOMEM - * Out of memory. - */ -int ngtcp2_transport_params_copy_new(ngtcp2_transport_params **pdest, - const ngtcp2_transport_params *src, - const ngtcp2_mem *mem); - -#endif /* NGTCP2_CRYPTO_H */ +#endif /* !defined(NGTCP2_CRYPTO_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_err.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_err.h index 9229f5425a63cf..44527b11bdec47 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_err.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_err.h @@ -27,8 +27,8 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include -#endif /* NGTCP2_ERR_H */ +#endif /* !defined(NGTCP2_ERR_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.c index 41c2a6a755cc8a..6a8a22c3f0d010 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.c @@ -27,7 +27,7 @@ #include #include -ngtcp2_objalloc_def(frame_chain, ngtcp2_frame_chain, oplent); +ngtcp2_objalloc_def(frame_chain, ngtcp2_frame_chain, oplent) int ngtcp2_frame_chain_new(ngtcp2_frame_chain **pfrc, const ngtcp2_mem *mem) { *pfrc = ngtcp2_mem_malloc(mem, sizeof(ngtcp2_frame_chain)); @@ -68,14 +68,11 @@ int ngtcp2_frame_chain_stream_datacnt_objalloc_new(ngtcp2_frame_chain **pfrc, size_t datacnt, ngtcp2_objalloc *objalloc, const ngtcp2_mem *mem) { - size_t need, avail = sizeof(ngtcp2_frame) - sizeof(ngtcp2_stream); - - if (datacnt > 1) { - need = sizeof(ngtcp2_vec) * (datacnt - 1); - - if (need > avail) { - return ngtcp2_frame_chain_extralen_new(pfrc, need - avail, mem); - } + if (datacnt > NGTCP2_FRAME_CHAIN_STREAM_DATACNT_THRES) { + return ngtcp2_frame_chain_extralen_new(pfrc, + sizeof(ngtcp2_vec) * (datacnt - 1) - + NGTCP2_FRAME_CHAIN_STREAM_AVAIL, + mem); } return ngtcp2_frame_chain_objalloc_new(pfrc, objalloc); @@ -139,9 +136,7 @@ void ngtcp2_frame_chain_objalloc_del(ngtcp2_frame_chain *frc, switch (frc->fr.type) { case NGTCP2_FRAME_CRYPTO: case NGTCP2_FRAME_STREAM: - if (frc->fr.stream.datacnt && - sizeof(ngtcp2_vec) * (frc->fr.stream.datacnt - 1) > - sizeof(ngtcp2_frame) - sizeof(ngtcp2_stream)) { + if (frc->fr.stream.datacnt > NGTCP2_FRAME_CHAIN_STREAM_DATACNT_THRES) { ngtcp2_frame_chain_del(frc, mem); return; diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.h index 656fa5b799450e..e5b6779c0f03c2 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -74,7 +74,7 @@ struct ngtcp2_frame_chain { }; }; -ngtcp2_objalloc_decl(frame_chain, ngtcp2_frame_chain, oplent); +ngtcp2_objalloc_decl(frame_chain, ngtcp2_frame_chain, oplent) /* * ngtcp2_bind_frame_chains binds two frame chains |a| and |b| using @@ -121,12 +121,29 @@ int ngtcp2_frame_chain_objalloc_new(ngtcp2_frame_chain **pfrc, int ngtcp2_frame_chain_extralen_new(ngtcp2_frame_chain **pfrc, size_t extralen, const ngtcp2_mem *mem); +/* NGTCP2_FRAME_CHAIN_STREAM_AVAIL is the number of additional bytes + available after ngtcp2_stream when it is embedded in + ngtcp2_frame. */ +#define NGTCP2_FRAME_CHAIN_STREAM_AVAIL \ + (sizeof(ngtcp2_frame) - sizeof(ngtcp2_stream)) + +/* NGTCP2_FRAME_CHAIN_STREAM_DATACNT_THRES is the number of datacnt + that changes allocation method. If datacnt is more than this + value, ngtcp2_frame_chain is allocated without ngtcp2_objalloc. + Otherwise, it is allocated using ngtcp2_objalloc. */ +#define NGTCP2_FRAME_CHAIN_STREAM_DATACNT_THRES \ + (NGTCP2_FRAME_CHAIN_STREAM_AVAIL / sizeof(ngtcp2_vec) + 1) + /* * ngtcp2_frame_chain_stream_datacnt_objalloc_new works like * ngtcp2_frame_chain_new, but it allocates enough data to store * additional |datacnt| - 1 ngtcp2_vec object after ngtcp2_stream - * object. If no additional space is required, - * ngtcp2_frame_chain_objalloc_new is called internally. + * object. If no additional space is required, in other words, + * |datacnt| <= NGTCP2_FRAME_CHAIN_STREAM_DATACNT_THRES, + * ngtcp2_frame_chain_objalloc_new is called internally. Otherwise, + * ngtcp2_frame_chain_extralen_new is used and objalloc is not used. + * Therefore, it is important to call ngtcp2_frame_chain_objalloc_del + * without changing datacnt field. */ int ngtcp2_frame_chain_stream_datacnt_objalloc_new(ngtcp2_frame_chain **pfrc, size_t datacnt, @@ -168,4 +185,4 @@ void ngtcp2_frame_chain_list_objalloc_del(ngtcp2_frame_chain *frc, ngtcp2_objalloc *objalloc, const ngtcp2_mem *mem); -#endif /* NGTCP2_FRAME_CHAIN_H */ +#endif /* !defined(NGTCP2_FRAME_CHAIN_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_gaptr.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_gaptr.c index 87c23898e8207d..3bfa398480c382 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_gaptr.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_gaptr.c @@ -28,22 +28,16 @@ #include void ngtcp2_gaptr_init(ngtcp2_gaptr *gaptr, const ngtcp2_mem *mem) { - ngtcp2_ksl_init(&gaptr->gap, ngtcp2_ksl_range_compar, sizeof(ngtcp2_range), - mem); + ngtcp2_ksl_init(&gaptr->gap, ngtcp2_ksl_range_compar, ngtcp2_ksl_range_search, + sizeof(ngtcp2_range), mem); gaptr->mem = mem; } static int gaptr_gap_init(ngtcp2_gaptr *gaptr) { ngtcp2_range range = {0, UINT64_MAX}; - int rv; - - rv = ngtcp2_ksl_insert(&gaptr->gap, NULL, &range, NULL); - if (rv != 0) { - return rv; - } - return 0; + return ngtcp2_ksl_insert(&gaptr->gap, NULL, &range, NULL); } void ngtcp2_gaptr_free(ngtcp2_gaptr *gaptr) { @@ -66,8 +60,8 @@ int ngtcp2_gaptr_push(ngtcp2_gaptr *gaptr, uint64_t offset, uint64_t datalen) { } } - it = ngtcp2_ksl_lower_bound_compar(&gaptr->gap, &q, - ngtcp2_ksl_range_exclusive_compar); + it = ngtcp2_ksl_lower_bound_search(&gaptr->gap, &q, + ngtcp2_ksl_range_exclusive_search); for (; !ngtcp2_ksl_it_end(&it);) { k = *(ngtcp2_range *)ngtcp2_ksl_it_key(&it); @@ -80,7 +74,9 @@ int ngtcp2_gaptr_push(ngtcp2_gaptr *gaptr, uint64_t offset, uint64_t datalen) { ngtcp2_ksl_remove_hint(&gaptr->gap, &it, &it, &k); continue; } + ngtcp2_range_cut(&l, &r, &k, &m); + if (ngtcp2_range_len(&l)) { ngtcp2_ksl_update_key(&gaptr->gap, &k, &l); @@ -93,26 +89,26 @@ int ngtcp2_gaptr_push(ngtcp2_gaptr *gaptr, uint64_t offset, uint64_t datalen) { } else if (ngtcp2_range_len(&r)) { ngtcp2_ksl_update_key(&gaptr->gap, &k, &r); } + ngtcp2_ksl_it_next(&it); } + return 0; } -uint64_t ngtcp2_gaptr_first_gap_offset(ngtcp2_gaptr *gaptr) { +uint64_t ngtcp2_gaptr_first_gap_offset(const ngtcp2_gaptr *gaptr) { ngtcp2_ksl_it it; - ngtcp2_range r; if (ngtcp2_ksl_len(&gaptr->gap) == 0) { return 0; } it = ngtcp2_ksl_begin(&gaptr->gap); - r = *(ngtcp2_range *)ngtcp2_ksl_it_key(&it); - return r.begin; + return ((ngtcp2_range *)ngtcp2_ksl_it_key(&it))->begin; } -ngtcp2_range ngtcp2_gaptr_get_first_gap_after(ngtcp2_gaptr *gaptr, +ngtcp2_range ngtcp2_gaptr_get_first_gap_after(const ngtcp2_gaptr *gaptr, uint64_t offset) { ngtcp2_range q = {offset, offset + 1}; ngtcp2_ksl_it it; @@ -122,29 +118,27 @@ ngtcp2_range ngtcp2_gaptr_get_first_gap_after(ngtcp2_gaptr *gaptr, return r; } - it = ngtcp2_ksl_lower_bound_compar(&gaptr->gap, &q, - ngtcp2_ksl_range_exclusive_compar); + it = ngtcp2_ksl_lower_bound_search(&gaptr->gap, &q, + ngtcp2_ksl_range_exclusive_search); assert(!ngtcp2_ksl_it_end(&it)); return *(ngtcp2_range *)ngtcp2_ksl_it_key(&it); } -int ngtcp2_gaptr_is_pushed(ngtcp2_gaptr *gaptr, uint64_t offset, +int ngtcp2_gaptr_is_pushed(const ngtcp2_gaptr *gaptr, uint64_t offset, uint64_t datalen) { ngtcp2_range q = {offset, offset + datalen}; ngtcp2_ksl_it it; - ngtcp2_range k; ngtcp2_range m; if (ngtcp2_ksl_len(&gaptr->gap) == 0) { return 0; } - it = ngtcp2_ksl_lower_bound_compar(&gaptr->gap, &q, - ngtcp2_ksl_range_exclusive_compar); - k = *(ngtcp2_range *)ngtcp2_ksl_it_key(&it); - m = ngtcp2_range_intersect(&q, &k); + it = ngtcp2_ksl_lower_bound_search(&gaptr->gap, &q, + ngtcp2_ksl_range_exclusive_search); + m = ngtcp2_range_intersect(&q, (ngtcp2_range *)ngtcp2_ksl_it_key(&it)); return ngtcp2_range_len(&m) == 0; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_gaptr.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_gaptr.h index 0f100a81c4286c..3120676cf849d4 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_gaptr.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_gaptr.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -39,8 +39,9 @@ * ngtcp2_gaptr maintains the gap in the range [0, UINT64_MAX). */ typedef struct ngtcp2_gaptr { - /* gap maintains the range of offset which is not received - yet. Initially, its range is [0, UINT64_MAX). */ + /* gap maintains the range of offset which is not pushed + yet. Initially, its range is [0, UINT64_MAX). "gap" is the range + that is not pushed yet. */ ngtcp2_ksl gap; /* mem is custom memory allocator */ const ngtcp2_mem *mem; @@ -57,8 +58,7 @@ void ngtcp2_gaptr_init(ngtcp2_gaptr *gaptr, const ngtcp2_mem *mem); void ngtcp2_gaptr_free(ngtcp2_gaptr *gaptr); /* - * ngtcp2_gaptr_push adds new data of length |datalen| at the stream - * offset |offset|. + * ngtcp2_gaptr_push pushes the range [offset, offset + datalen). * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -72,20 +72,20 @@ int ngtcp2_gaptr_push(ngtcp2_gaptr *gaptr, uint64_t offset, uint64_t datalen); * ngtcp2_gaptr_first_gap_offset returns the offset to the first gap. * If there is no gap, it returns UINT64_MAX. */ -uint64_t ngtcp2_gaptr_first_gap_offset(ngtcp2_gaptr *gaptr); +uint64_t ngtcp2_gaptr_first_gap_offset(const ngtcp2_gaptr *gaptr); /* * ngtcp2_gaptr_get_first_gap_after returns the first gap which - * overlaps or comes after |offset|. + * includes or comes after |offset|. */ -ngtcp2_range ngtcp2_gaptr_get_first_gap_after(ngtcp2_gaptr *gaptr, +ngtcp2_range ngtcp2_gaptr_get_first_gap_after(const ngtcp2_gaptr *gaptr, uint64_t offset); /* * ngtcp2_gaptr_is_pushed returns nonzero if range [offset, offset + * datalen) is completely pushed into this object. */ -int ngtcp2_gaptr_is_pushed(ngtcp2_gaptr *gaptr, uint64_t offset, +int ngtcp2_gaptr_is_pushed(const ngtcp2_gaptr *gaptr, uint64_t offset, uint64_t datalen); /* @@ -95,4 +95,4 @@ int ngtcp2_gaptr_is_pushed(ngtcp2_gaptr *gaptr, uint64_t offset, */ void ngtcp2_gaptr_drop_first_gap(ngtcp2_gaptr *gaptr); -#endif /* NGTCP2_GAPTR_H */ +#endif /* !defined(NGTCP2_GAPTR_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_idtr.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_idtr.c index d9880227690faf..2cf9d3cbfd1393 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_idtr.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_idtr.c @@ -26,10 +26,8 @@ #include -void ngtcp2_idtr_init(ngtcp2_idtr *idtr, int server, const ngtcp2_mem *mem) { +void ngtcp2_idtr_init(ngtcp2_idtr *idtr, const ngtcp2_mem *mem) { ngtcp2_gaptr_init(&idtr->gap, mem); - - idtr->server = server; } void ngtcp2_idtr_free(ngtcp2_idtr *idtr) { @@ -41,8 +39,7 @@ void ngtcp2_idtr_free(ngtcp2_idtr *idtr) { } /* - * id_from_stream_id translates |stream_id| to id space used by - * ngtcp2_idtr. + * id_from_stream_id translates |stream_id| to an internal ID. */ static uint64_t id_from_stream_id(int64_t stream_id) { return (uint64_t)(stream_id >> 2); @@ -51,9 +48,6 @@ static uint64_t id_from_stream_id(int64_t stream_id) { int ngtcp2_idtr_open(ngtcp2_idtr *idtr, int64_t stream_id) { uint64_t q; - assert((idtr->server && (stream_id % 2)) || - (!idtr->server && (stream_id % 2)) == 0); - q = id_from_stream_id(stream_id); if (ngtcp2_gaptr_is_pushed(&idtr->gap, q, 1)) { @@ -63,17 +57,10 @@ int ngtcp2_idtr_open(ngtcp2_idtr *idtr, int64_t stream_id) { return ngtcp2_gaptr_push(&idtr->gap, q, 1); } -int ngtcp2_idtr_is_open(ngtcp2_idtr *idtr, int64_t stream_id) { +int ngtcp2_idtr_is_open(const ngtcp2_idtr *idtr, int64_t stream_id) { uint64_t q; - assert((idtr->server && (stream_id % 2)) || - (!idtr->server && (stream_id % 2)) == 0); - q = id_from_stream_id(stream_id); return ngtcp2_gaptr_is_pushed(&idtr->gap, q, 1); } - -uint64_t ngtcp2_idtr_first_gap(ngtcp2_idtr *idtr) { - return ngtcp2_gaptr_first_gap_offset(&idtr->gap); -} diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_idtr.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_idtr.h index edb8c68c8db9b5..0671f5ed91a5cd 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_idtr.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_idtr.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -38,21 +38,17 @@ * ngtcp2_idtr tracks the usage of stream ID. */ typedef struct ngtcp2_idtr { - /* gap maintains the range of ID which is not used yet. Initially, - its range is [0, UINT64_MAX). */ + /* gap maintains the range of an internal ID which is not used yet. + Initially, its range is [0, UINT64_MAX). The internal ID and + stream ID are in the different number spaces. See + id_from_stream_id to convert a stream ID to an internal ID. */ ngtcp2_gaptr gap; - /* server is nonzero if this object records server initiated stream - ID. */ - int server; } ngtcp2_idtr; /* * ngtcp2_idtr_init initializes |idtr|. - * - * If this object records server initiated ID (even number), set - * |server| to nonzero. */ -void ngtcp2_idtr_init(ngtcp2_idtr *idtr, int server, const ngtcp2_mem *mem); +void ngtcp2_idtr_init(ngtcp2_idtr *idtr, const ngtcp2_mem *mem); /* * ngtcp2_idtr_free frees resources allocated for |idtr|. @@ -60,30 +56,21 @@ void ngtcp2_idtr_init(ngtcp2_idtr *idtr, int server, const ngtcp2_mem *mem); void ngtcp2_idtr_free(ngtcp2_idtr *idtr); /* - * ngtcp2_idtr_open claims that |stream_id| is in used. + * ngtcp2_idtr_open claims that |stream_id| is in use. * * It returns 0 if it succeeds, or one of the following negative error * codes: * * NGTCP2_ERR_STREAM_IN_USE - * ID has already been used. + * |stream_id| has already been used. * NGTCP2_ERR_NOMEM * Out of memory. */ int ngtcp2_idtr_open(ngtcp2_idtr *idtr, int64_t stream_id); /* - * ngtcp2_idtr_open tells whether ID |stream_id| is in used or not. - * - * It returns nonzero if |stream_id| is used. - */ -int ngtcp2_idtr_is_open(ngtcp2_idtr *idtr, int64_t stream_id); - -/* - * ngtcp2_idtr_first_gap returns the first id of first gap. If there - * is no gap, it returns UINT64_MAX. The returned id is an id space - * used in this object internally, and not stream ID. + * ngtcp2_idtr_open returns nonzero if |stream_id| is in use. */ -uint64_t ngtcp2_idtr_first_gap(ngtcp2_idtr *idtr); +int ngtcp2_idtr_is_open(const ngtcp2_idtr *idtr, int64_t stream_id); -#endif /* NGTCP2_IDTR_H */ +#endif /* !defined(NGTCP2_IDTR_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.c index 0ccc048b5b16b1..5e74f647241816 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.c @@ -35,11 +35,13 @@ static ngtcp2_ksl_blk null_blk = {{{NULL, NULL, 0, 0, {0}}}}; -ngtcp2_objalloc_def(ksl_blk, ngtcp2_ksl_blk, oplent); +ngtcp2_objalloc_def(ksl_blk, ngtcp2_ksl_blk, oplent) static size_t ksl_nodelen(size_t keylen) { - return (sizeof(ngtcp2_ksl_node) + keylen - sizeof(uint64_t) + 0xfu) & - ~(uintptr_t)0xfu; + assert(keylen >= sizeof(uint64_t)); + + return (sizeof(ngtcp2_ksl_node) + keylen - sizeof(uint64_t) + 0x7u) & + ~(uintptr_t)0x7u; } static size_t ksl_blklen(size_t nodelen) { @@ -55,20 +57,21 @@ static void ksl_node_set_key(ngtcp2_ksl *ksl, ngtcp2_ksl_node *node, memcpy(node->key, key, ksl->keylen); } -void ngtcp2_ksl_init(ngtcp2_ksl *ksl, ngtcp2_ksl_compar compar, size_t keylen, +void ngtcp2_ksl_init(ngtcp2_ksl *ksl, ngtcp2_ksl_compar compar, + ngtcp2_ksl_search search, size_t keylen, const ngtcp2_mem *mem) { size_t nodelen = ksl_nodelen(keylen); ngtcp2_objalloc_init(&ksl->blkalloc, - ((ksl_blklen(nodelen) + 0xfu) & ~(uintptr_t)0xfu) * 8, - mem); + (ksl_blklen(nodelen) + 0xfu) & ~(uintptr_t)0xfu, mem); ksl->head = NULL; ksl->front = ksl->back = NULL; ksl->compar = compar; + ksl->search = search; + ksl->n = 0; ksl->keylen = keylen; ksl->nodelen = nodelen; - ksl->n = 0; } static ngtcp2_ksl_blk *ksl_blk_objalloc_new(ngtcp2_ksl *ksl) { @@ -82,6 +85,7 @@ static void ksl_blk_objalloc_del(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk) { static int ksl_head_init(ngtcp2_ksl *ksl) { ngtcp2_ksl_blk *head = ksl_blk_objalloc_new(ksl); + if (!head) { return NGTCP2_ERR_NOMEM; } @@ -111,7 +115,7 @@ static void ksl_free_blk(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk) { ksl_blk_objalloc_del(ksl, blk); } -#endif /* NOMEMPOOL */ +#endif /* defined(NOMEMPOOL) */ void ngtcp2_ksl_free(ngtcp2_ksl *ksl) { if (!ksl || !ksl->head) { @@ -120,7 +124,7 @@ void ngtcp2_ksl_free(ngtcp2_ksl *ksl) { #ifdef NOMEMPOOL ksl_free_blk(ksl, ksl->head); -#endif /* NOMEMPOOL */ +#endif /* defined(NOMEMPOOL) */ ngtcp2_objalloc_free(&ksl->blkalloc); } @@ -143,21 +147,22 @@ static ngtcp2_ksl_blk *ksl_split_blk(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk) { rblk->next = blk->next; blk->next = rblk; + if (rblk->next) { rblk->next->prev = rblk; } else if (ksl->back == blk) { ksl->back = rblk; } + rblk->prev = blk; rblk->leaf = blk->leaf; rblk->n = blk->n / 2; + blk->n -= rblk->n; - memcpy(rblk->nodes, blk->nodes + ksl->nodelen * (blk->n - rblk->n), + memcpy(rblk->nodes, blk->nodes + ksl->nodelen * blk->n, ksl->nodelen * rblk->n); - blk->n -= rblk->n; - assert(blk->n >= NGTCP2_KSL_MIN_NBLK); assert(rblk->n >= NGTCP2_KSL_MIN_NBLK); @@ -173,7 +178,7 @@ static ngtcp2_ksl_blk *ksl_split_blk(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk) { * codes: * * NGTCP2_ERR_NOMEM - * Out of memory. + * Out of memory. */ static int ksl_split_node(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t i) { ngtcp2_ksl_node *node; @@ -207,7 +212,7 @@ static int ksl_split_node(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t i) { * codes: * * NGTCP2_ERR_NOMEM - * Out of memory. + * Out of memory. */ static int ksl_split_head(ngtcp2_ksl *ksl) { ngtcp2_ksl_blk *rblk = NULL, *lblk, *nhead = NULL; @@ -221,10 +226,12 @@ static int ksl_split_head(ngtcp2_ksl *ksl) { lblk = ksl->head; nhead = ksl_blk_objalloc_new(ksl); + if (nhead == NULL) { ksl_blk_objalloc_del(ksl, rblk); return NGTCP2_ERR_NOMEM; } + nhead->next = nhead->prev = NULL; nhead->n = 2; nhead->leaf = 0; @@ -243,9 +250,9 @@ static int ksl_split_head(ngtcp2_ksl *ksl) { } /* - * insert_node inserts a node whose key is |key| with the associated - * |data| at the index of |i|. This function assumes that the number - * of nodes contained by |blk| is strictly less than + * ksl_insert_node inserts a node whose key is |key| with the + * associated |data| at the index of |i|. This function assumes that + * the number of nodes contained by |blk| is strictly less than * NGTCP2_KSL_MAX_NBLK. */ static void ksl_insert_node(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t i, @@ -264,19 +271,6 @@ static void ksl_insert_node(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t i, ++blk->n; } -static size_t ksl_bsearch(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, - const ngtcp2_ksl_key *key, ngtcp2_ksl_compar compar) { - size_t i; - ngtcp2_ksl_node *node; - - for (i = 0, node = (ngtcp2_ksl_node *)(void *)blk->nodes; - i < blk->n && compar((ngtcp2_ksl_key *)node->key, key); - ++i, node = (ngtcp2_ksl_node *)(void *)((uint8_t *)node + ksl->nodelen)) - ; - - return i; -} - int ngtcp2_ksl_insert(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, const ngtcp2_ksl_key *key, void *data) { ngtcp2_ksl_blk *blk; @@ -291,18 +285,17 @@ int ngtcp2_ksl_insert(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, } } - blk = ksl->head; - - if (blk->n == NGTCP2_KSL_MAX_NBLK) { + if (ksl->head->n == NGTCP2_KSL_MAX_NBLK) { rv = ksl_split_head(ksl); if (rv != 0) { return rv; } - blk = ksl->head; } + blk = ksl->head; + for (;;) { - i = ksl_bsearch(ksl, blk, key, ksl->compar); + i = ksl->search(ksl, blk, key); if (blk->leaf) { if (i < blk->n && @@ -310,13 +303,17 @@ int ngtcp2_ksl_insert(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, if (it) { *it = ngtcp2_ksl_end(ksl); } + return NGTCP2_ERR_INVALID_ARGUMENT; } + ksl_insert_node(ksl, blk, i, key, data); ++ksl->n; + if (it) { ngtcp2_ksl_it_init(it, ksl, blk, i); } + return 0; } @@ -329,16 +326,21 @@ int ngtcp2_ksl_insert(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, if (rv != 0) { return rv; } + node = ngtcp2_ksl_nth_node(ksl, blk, blk->n - 1); } + ksl_node_set_key(ksl, node, key); blk = node->blk; } + ksl_insert_node(ksl, blk, blk->n, key, data); ++ksl->n; + if (it) { ngtcp2_ksl_it_init(it, ksl, blk, blk->n - 1); } + return 0; } @@ -349,8 +351,10 @@ int ngtcp2_ksl_insert(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, if (rv != 0) { return rv; } + if (ksl->compar((ngtcp2_ksl_key *)node->key, key)) { node = ngtcp2_ksl_nth_node(ksl, blk, i + 1); + if (ksl->compar((ngtcp2_ksl_key *)node->key, key)) { ksl_node_set_key(ksl, node, key); } @@ -376,19 +380,22 @@ static void ksl_remove_node(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t i) { * ksl_merge_node merges 2 nodes which are the nodes at the index of * |i| and |i + 1|. * - * If |blk| is the direct descendant of head (root) block and the head - * block contains just 2 nodes, the merged block becomes head block, - * which decreases the height of |ksl| by 1. + * If |blk| is the head (root) block and it contains just 2 nodes + * before merging nodes, the merged block becomes head block, which + * decreases the height of |ksl| by 1. * * This function returns the pointer to the merged block. */ static ngtcp2_ksl_blk *ksl_merge_node(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t i) { + ngtcp2_ksl_node *lnode; ngtcp2_ksl_blk *lblk, *rblk; assert(i + 1 < blk->n); - lblk = ngtcp2_ksl_nth_node(ksl, blk, i)->blk; + lnode = ngtcp2_ksl_nth_node(ksl, blk, i); + + lblk = lnode->blk; rblk = ngtcp2_ksl_nth_node(ksl, blk, i + 1)->blk; assert(lblk->n + rblk->n < NGTCP2_KSL_MAX_NBLK); @@ -398,6 +405,7 @@ static ngtcp2_ksl_blk *ksl_merge_node(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, lblk->n += rblk->n; lblk->next = rblk->next; + if (lblk->next) { lblk->next->prev = lblk; } else if (ksl->back == rblk) { @@ -411,7 +419,7 @@ static ngtcp2_ksl_blk *ksl_merge_node(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, ksl->head = lblk; } else { ksl_remove_node(ksl, blk, i + 1); - ksl_node_set_key(ksl, ngtcp2_ksl_nth_node(ksl, blk, i), + ksl_node_set_key(ksl, lnode, ngtcp2_ksl_nth_node(ksl, lblk, lblk->n - 1)->key); } @@ -425,6 +433,7 @@ static ngtcp2_ksl_blk *ksl_merge_node(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, */ static void ksl_shift_left(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t i) { ngtcp2_ksl_node *lnode, *rnode; + ngtcp2_ksl_blk *lblk, *rblk; size_t n; assert(i > 0); @@ -432,35 +441,37 @@ static void ksl_shift_left(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t i) { lnode = ngtcp2_ksl_nth_node(ksl, blk, i - 1); rnode = ngtcp2_ksl_nth_node(ksl, blk, i); - assert(lnode->blk->n < NGTCP2_KSL_MAX_NBLK); - assert(rnode->blk->n > NGTCP2_KSL_MIN_NBLK); + lblk = lnode->blk; + rblk = rnode->blk; + + assert(lblk->n < NGTCP2_KSL_MAX_NBLK); + assert(rblk->n > NGTCP2_KSL_MIN_NBLK); - n = (lnode->blk->n + rnode->blk->n + 1) / 2 - lnode->blk->n; + n = (lblk->n + rblk->n + 1) / 2 - lblk->n; assert(n > 0); - assert(lnode->blk->n <= NGTCP2_KSL_MAX_NBLK - n); - assert(rnode->blk->n >= NGTCP2_KSL_MIN_NBLK + n); + assert(lblk->n <= NGTCP2_KSL_MAX_NBLK - n); + assert(rblk->n >= NGTCP2_KSL_MIN_NBLK + n); - memcpy(lnode->blk->nodes + ksl->nodelen * lnode->blk->n, rnode->blk->nodes, - ksl->nodelen * n); + memcpy(lblk->nodes + ksl->nodelen * lblk->n, rblk->nodes, ksl->nodelen * n); - lnode->blk->n += (uint32_t)n; - rnode->blk->n -= (uint32_t)n; + lblk->n += (uint32_t)n; + rblk->n -= (uint32_t)n; - ksl_node_set_key( - ksl, lnode, ngtcp2_ksl_nth_node(ksl, lnode->blk, lnode->blk->n - 1)->key); + ksl_node_set_key(ksl, lnode, + ngtcp2_ksl_nth_node(ksl, lblk, lblk->n - 1)->key); - memmove(rnode->blk->nodes, rnode->blk->nodes + ksl->nodelen * n, - ksl->nodelen * rnode->blk->n); + memmove(rblk->nodes, rblk->nodes + ksl->nodelen * n, ksl->nodelen * rblk->n); } /* * ksl_shift_right moves the last nodes in blk->nodes[i]->blk->nodes * to blk->nodes[i + 1]->blk->nodes in a manner that they have the - * same amount of nodes as much as possible.. + * same amount of nodes as much as possible. */ static void ksl_shift_right(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t i) { ngtcp2_ksl_node *lnode, *rnode; + ngtcp2_ksl_blk *lblk, *rblk; size_t n; assert(i < blk->n - 1); @@ -468,26 +479,27 @@ static void ksl_shift_right(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t i) { lnode = ngtcp2_ksl_nth_node(ksl, blk, i); rnode = ngtcp2_ksl_nth_node(ksl, blk, i + 1); - assert(lnode->blk->n > NGTCP2_KSL_MIN_NBLK); - assert(rnode->blk->n < NGTCP2_KSL_MAX_NBLK); + lblk = lnode->blk; + rblk = rnode->blk; - n = (lnode->blk->n + rnode->blk->n + 1) / 2 - rnode->blk->n; + assert(lblk->n > NGTCP2_KSL_MIN_NBLK); + assert(rblk->n < NGTCP2_KSL_MAX_NBLK); + + n = (lblk->n + rblk->n + 1) / 2 - rblk->n; assert(n > 0); - assert(lnode->blk->n >= NGTCP2_KSL_MIN_NBLK + n); - assert(rnode->blk->n <= NGTCP2_KSL_MAX_NBLK - n); + assert(lblk->n >= NGTCP2_KSL_MIN_NBLK + n); + assert(rblk->n <= NGTCP2_KSL_MAX_NBLK - n); - memmove(rnode->blk->nodes + ksl->nodelen * n, rnode->blk->nodes, - ksl->nodelen * rnode->blk->n); + memmove(rblk->nodes + ksl->nodelen * n, rblk->nodes, ksl->nodelen * rblk->n); - rnode->blk->n += (uint32_t)n; - lnode->blk->n -= (uint32_t)n; + rblk->n += (uint32_t)n; + lblk->n -= (uint32_t)n; - memcpy(rnode->blk->nodes, lnode->blk->nodes + ksl->nodelen * lnode->blk->n, - ksl->nodelen * n); + memcpy(rblk->nodes, lblk->nodes + ksl->nodelen * lblk->n, ksl->nodelen * n); - ksl_node_set_key( - ksl, lnode, ngtcp2_ksl_nth_node(ksl, lnode->blk, lnode->blk->n - 1)->key); + ksl_node_set_key(ksl, lnode, + ngtcp2_ksl_nth_node(ksl, lblk, lblk->n - 1)->key); } /* @@ -531,23 +543,24 @@ int ngtcp2_ksl_remove(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, ngtcp2_ksl_node *node; size_t i; - if (!ksl->head) { + if (!blk) { return NGTCP2_ERR_INVALID_ARGUMENT; } if (!blk->leaf && blk->n == 2 && ngtcp2_ksl_nth_node(ksl, blk, 0)->blk->n == NGTCP2_KSL_MIN_NBLK && ngtcp2_ksl_nth_node(ksl, blk, 1)->blk->n == NGTCP2_KSL_MIN_NBLK) { - blk = ksl_merge_node(ksl, ksl->head, 0); + blk = ksl_merge_node(ksl, blk, 0); } for (;;) { - i = ksl_bsearch(ksl, blk, key, ksl->compar); + i = ksl->search(ksl, blk, key); if (i == blk->n) { if (it) { *it = ngtcp2_ksl_end(ksl); } + return NGTCP2_ERR_INVALID_ARGUMENT; } @@ -556,10 +569,13 @@ int ngtcp2_ksl_remove(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, if (it) { *it = ngtcp2_ksl_end(ksl); } + return NGTCP2_ERR_INVALID_ARGUMENT; } + ksl_remove_node(ksl, blk, i); --ksl->n; + if (it) { if (blk->n == i && blk->next) { ngtcp2_ksl_it_init(it, ksl, blk->next, 0); @@ -567,6 +583,7 @@ int ngtcp2_ksl_remove(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, ngtcp2_ksl_it_init(it, ksl, blk, i); } } + return 0; } @@ -583,6 +600,7 @@ int ngtcp2_ksl_remove(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, ngtcp2_ksl_nth_node(ksl, blk, i + 1)->blk->n > NGTCP2_KSL_MIN_NBLK) { ksl_shift_left(ksl, blk, i + 1); blk = node->blk; + continue; } @@ -590,6 +608,7 @@ int ngtcp2_ksl_remove(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, ngtcp2_ksl_nth_node(ksl, blk, i - 1)->blk->n > NGTCP2_KSL_MIN_NBLK) { ksl_shift_right(ksl, blk, i - 1); blk = node->blk; + continue; } @@ -604,50 +623,14 @@ int ngtcp2_ksl_remove(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, } } -ngtcp2_ksl_it ngtcp2_ksl_lower_bound(ngtcp2_ksl *ksl, +ngtcp2_ksl_it ngtcp2_ksl_lower_bound(const ngtcp2_ksl *ksl, const ngtcp2_ksl_key *key) { - ngtcp2_ksl_blk *blk = ksl->head; - ngtcp2_ksl_it it; - size_t i; - - if (!blk) { - ngtcp2_ksl_it_init(&it, ksl, &null_blk, 0); - return it; - } - - for (;;) { - i = ksl_bsearch(ksl, blk, key, ksl->compar); - - if (blk->leaf) { - if (i == blk->n && blk->next) { - blk = blk->next; - i = 0; - } - ngtcp2_ksl_it_init(&it, ksl, blk, i); - return it; - } - - if (i == blk->n) { - /* This happens if descendant has smaller key. Fast forward to - find last node in this subtree. */ - for (; !blk->leaf; blk = ngtcp2_ksl_nth_node(ksl, blk, blk->n - 1)->blk) - ; - if (blk->next) { - blk = blk->next; - i = 0; - } else { - i = blk->n; - } - ngtcp2_ksl_it_init(&it, ksl, blk, i); - return it; - } - blk = ngtcp2_ksl_nth_node(ksl, blk, i)->blk; - } + return ngtcp2_ksl_lower_bound_search(ksl, key, ksl->search); } -ngtcp2_ksl_it ngtcp2_ksl_lower_bound_compar(ngtcp2_ksl *ksl, +ngtcp2_ksl_it ngtcp2_ksl_lower_bound_search(const ngtcp2_ksl *ksl, const ngtcp2_ksl_key *key, - ngtcp2_ksl_compar compar) { + ngtcp2_ksl_search search) { ngtcp2_ksl_blk *blk = ksl->head; ngtcp2_ksl_it it; size_t i; @@ -658,14 +641,16 @@ ngtcp2_ksl_it ngtcp2_ksl_lower_bound_compar(ngtcp2_ksl *ksl, } for (;;) { - i = ksl_bsearch(ksl, blk, key, compar); + i = search(ksl, blk, key); if (blk->leaf) { if (i == blk->n && blk->next) { blk = blk->next; i = 0; } + ngtcp2_ksl_it_init(&it, ksl, blk, i); + return it; } @@ -674,15 +659,19 @@ ngtcp2_ksl_it ngtcp2_ksl_lower_bound_compar(ngtcp2_ksl *ksl, find last node in this subtree. */ for (; !blk->leaf; blk = ngtcp2_ksl_nth_node(ksl, blk, blk->n - 1)->blk) ; + if (blk->next) { blk = blk->next; i = 0; } else { i = blk->n; } + ngtcp2_ksl_it_init(&it, ksl, blk, i); + return it; } + blk = ngtcp2_ksl_nth_node(ksl, blk, i)->blk; } } @@ -696,7 +685,7 @@ void ngtcp2_ksl_update_key(ngtcp2_ksl *ksl, const ngtcp2_ksl_key *old_key, assert(ksl->head); for (;;) { - i = ksl_bsearch(ksl, blk, old_key, ksl->compar); + i = ksl->search(ksl, blk, old_key); assert(i < blk->n); node = ngtcp2_ksl_nth_node(ksl, blk, i); @@ -704,6 +693,7 @@ void ngtcp2_ksl_update_key(ngtcp2_ksl *ksl, const ngtcp2_ksl_key *old_key, if (blk->leaf) { assert(key_equal(ksl->compar, (ngtcp2_ksl_key *)node->key, old_key)); ksl_node_set_key(ksl, node, new_key); + return; } @@ -716,7 +706,7 @@ void ngtcp2_ksl_update_key(ngtcp2_ksl *ksl, const ngtcp2_ksl_key *old_key, } } -size_t ngtcp2_ksl_len(ngtcp2_ksl *ksl) { return ksl->n; } +size_t ngtcp2_ksl_len(const ngtcp2_ksl *ksl) { return ksl->n; } void ngtcp2_ksl_clear(ngtcp2_ksl *ksl) { if (!ksl->head) { @@ -725,7 +715,7 @@ void ngtcp2_ksl_clear(ngtcp2_ksl *ksl) { #ifdef NOMEMPOOL ksl_free_blk(ksl, ksl->head); -#endif /* NOMEMPOOL */ +#endif /* defined(NOMEMPOOL) */ ksl->front = ksl->back = ksl->head = NULL; ksl->n = 0; @@ -734,7 +724,8 @@ void ngtcp2_ksl_clear(ngtcp2_ksl *ksl) { } #ifndef WIN32 -static void ksl_print(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t level) { +static void ksl_print(const ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, + size_t level) { size_t i; ngtcp2_ksl_node *node; @@ -745,7 +736,9 @@ static void ksl_print(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t level) { node = ngtcp2_ksl_nth_node(ksl, blk, i); fprintf(stderr, " %" PRId64, *(int64_t *)(void *)node->key); } + fprintf(stderr, "\n"); + return; } @@ -754,14 +747,14 @@ static void ksl_print(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t level) { } } -void ngtcp2_ksl_print(ngtcp2_ksl *ksl) { +void ngtcp2_ksl_print(const ngtcp2_ksl *ksl) { if (!ksl->head) { return; } ksl_print(ksl, ksl->head, 0); } -#endif /* !WIN32 */ +#endif /* !defined(WIN32) */ ngtcp2_ksl_it ngtcp2_ksl_begin(const ngtcp2_ksl *ksl) { ngtcp2_ksl_it it; @@ -815,9 +808,49 @@ int ngtcp2_ksl_range_compar(const ngtcp2_ksl_key *lhs, return a->begin < b->begin; } +ngtcp2_ksl_search_def(range, ngtcp2_ksl_range_compar) + +size_t ngtcp2_ksl_range_search(const ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, + const ngtcp2_ksl_key *key) { + return ksl_range_search(ksl, blk, key); +} + int ngtcp2_ksl_range_exclusive_compar(const ngtcp2_ksl_key *lhs, const ngtcp2_ksl_key *rhs) { const ngtcp2_range *a = lhs, *b = rhs; - return a->begin < b->begin && - !(ngtcp2_max(a->begin, b->begin) < ngtcp2_min(a->end, b->end)); + return a->begin < b->begin && !(ngtcp2_max_uint64(a->begin, b->begin) < + ngtcp2_min_uint64(a->end, b->end)); +} + +ngtcp2_ksl_search_def(range_exclusive, ngtcp2_ksl_range_exclusive_compar) + +size_t ngtcp2_ksl_range_exclusive_search(const ngtcp2_ksl *ksl, + ngtcp2_ksl_blk *blk, + const ngtcp2_ksl_key *key) { + return ksl_range_exclusive_search(ksl, blk, key); +} + +int ngtcp2_ksl_uint64_less(const ngtcp2_ksl_key *lhs, + const ngtcp2_ksl_key *rhs) { + return *(uint64_t *)lhs < *(uint64_t *)rhs; +} + +ngtcp2_ksl_search_def(uint64_less, ngtcp2_ksl_uint64_less) + +size_t ngtcp2_ksl_uint64_less_search(const ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, + const ngtcp2_ksl_key *key) { + return ksl_uint64_less_search(ksl, blk, key); +} + +int ngtcp2_ksl_int64_greater(const ngtcp2_ksl_key *lhs, + const ngtcp2_ksl_key *rhs) { + return *(int64_t *)lhs > *(int64_t *)rhs; +} + +ngtcp2_ksl_search_def(int64_greater, ngtcp2_ksl_int64_greater) + +size_t ngtcp2_ksl_int64_greater_search(const ngtcp2_ksl *ksl, + ngtcp2_ksl_blk *blk, + const ngtcp2_ksl_key *key) { + return ksl_int64_greater_search(ksl, blk, key); } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.h index 7e08f15cdae6e8..de78bcb8070f53 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -35,16 +35,12 @@ #include "ngtcp2_objalloc.h" -/* - * Skip List using single key instead of range. - */ - #define NGTCP2_KSL_DEGR 16 /* NGTCP2_KSL_MAX_NBLK is the maximum number of nodes which a single block can contain. */ #define NGTCP2_KSL_MAX_NBLK (2 * NGTCP2_KSL_DEGR - 1) /* NGTCP2_KSL_MIN_NBLK is the minimum number of nodes which a single - block other than root must contains. */ + block other than root must contain. */ #define NGTCP2_KSL_MIN_NBLK (NGTCP2_KSL_DEGR - 1) /* @@ -85,7 +81,8 @@ struct ngtcp2_ksl_blk { struct { /* next points to the next block if leaf field is nonzero. */ ngtcp2_ksl_blk *next; - /* prev points to the previous block if leaf field is nonzero. */ + /* prev points to the previous block if leaf field is + nonzero. */ ngtcp2_ksl_blk *prev; /* n is the number of nodes this object contains in nodes. */ uint32_t n; @@ -106,7 +103,7 @@ struct ngtcp2_ksl_blk { }; }; -ngtcp2_objalloc_decl(ksl_blk, ngtcp2_ksl_blk, oplent); +ngtcp2_objalloc_decl(ksl_blk, ngtcp2_ksl_blk, oplent) /* * ngtcp2_ksl_compar is a function type which returns nonzero if key @@ -117,10 +114,37 @@ typedef int (*ngtcp2_ksl_compar)(const ngtcp2_ksl_key *lhs, typedef struct ngtcp2_ksl ngtcp2_ksl; +/* + * ngtcp2_ksl_search is a function to search for the first element in + * |blk|->nodes which is not ordered before |key|. It returns the + * index of such element. It returns |blk|->n if there is no such + * element. + */ +typedef size_t (*ngtcp2_ksl_search)(const ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, + const ngtcp2_ksl_key *key); + +/* + * ngtcp2_ksl_search_def is a macro to implement ngtcp2_ksl_search + * with COMPAR which is supposed to be ngtcp2_ksl_compar. + */ +#define ngtcp2_ksl_search_def(NAME, COMPAR) \ + static size_t ksl_##NAME##_search( \ + const ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, const ngtcp2_ksl_key *key) { \ + size_t i; \ + ngtcp2_ksl_node *node; \ + \ + for (i = 0, node = (ngtcp2_ksl_node *)(void *)blk->nodes; \ + i < blk->n && COMPAR((ngtcp2_ksl_key *)node->key, key); ++i, \ + node = (ngtcp2_ksl_node *)(void *)((uint8_t *)node + ksl->nodelen)) \ + ; \ + \ + return i; \ + } + typedef struct ngtcp2_ksl_it ngtcp2_ksl_it; /* - * ngtcp2_ksl_it is a forward iterator to iterate nodes. + * ngtcp2_ksl_it is a bidirectional iterator to iterate nodes. */ struct ngtcp2_ksl_it { const ngtcp2_ksl *ksl; @@ -140,6 +164,8 @@ struct ngtcp2_ksl { /* back points to the last leaf block. */ ngtcp2_ksl_blk *back; ngtcp2_ksl_compar compar; + ngtcp2_ksl_search search; + /* n is the number of elements stored. */ size_t n; /* keylen is the size of key */ size_t keylen; @@ -150,9 +176,12 @@ struct ngtcp2_ksl { /* * ngtcp2_ksl_init initializes |ksl|. |compar| specifies compare - * function. |keylen| is the length of key. + * function. |search| is a search function which must use |compar|. + * |keylen| is the length of key and must be at least + * sizeof(uint64_t). */ -void ngtcp2_ksl_init(ngtcp2_ksl *ksl, ngtcp2_ksl_compar compar, size_t keylen, +void ngtcp2_ksl_init(ngtcp2_ksl *ksl, ngtcp2_ksl_compar compar, + ngtcp2_ksl_search search, size_t keylen, const ngtcp2_mem *mem); /* @@ -165,15 +194,15 @@ void ngtcp2_ksl_free(ngtcp2_ksl *ksl); /* * ngtcp2_ksl_insert inserts |key| with its associated |data|. On * successful insertion, the iterator points to the inserted node is - * stored in |*it|. + * stored in |*it| if |it| is not NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGTCP2_ERR_NOMEM - * Out of memory. + * Out of memory. * NGTCP2_ERR_INVALID_ARGUMENT - * |key| already exists. + * |key| already exists. */ int ngtcp2_ksl_insert(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, const ngtcp2_ksl_key *key, void *data); @@ -184,13 +213,14 @@ int ngtcp2_ksl_insert(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, * This function assigns the iterator to |*it|, which points to the * node which is located at the right next of the removed node if |it| * is not NULL. If |key| is not found, no deletion takes place and - * the return value of ngtcp2_ksl_end(ksl) is assigned to |*it|. + * the return value of ngtcp2_ksl_end(ksl) is assigned to |*it| if + * |it| is not NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGTCP2_ERR_INVALID_ARGUMENT - * |key| does not exist. + * |key| does not exist. */ int ngtcp2_ksl_remove(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, const ngtcp2_ksl_key *key); @@ -213,16 +243,16 @@ int ngtcp2_ksl_remove_hint(ngtcp2_ksl *ksl, ngtcp2_ksl_it *it, * node, it returns the iterator which satisfies ngtcp2_ksl_it_end(it) * != 0. */ -ngtcp2_ksl_it ngtcp2_ksl_lower_bound(ngtcp2_ksl *ksl, +ngtcp2_ksl_it ngtcp2_ksl_lower_bound(const ngtcp2_ksl *ksl, const ngtcp2_ksl_key *key); /* - * ngtcp2_ksl_lower_bound_compar works like ngtcp2_ksl_lower_bound, - * but it takes custom function |compar| to do lower bound search. + * ngtcp2_ksl_lower_bound_search works like ngtcp2_ksl_lower_bound, + * but it takes custom function |search| to do lower bound search. */ -ngtcp2_ksl_it ngtcp2_ksl_lower_bound_compar(ngtcp2_ksl *ksl, +ngtcp2_ksl_it ngtcp2_ksl_lower_bound_search(const ngtcp2_ksl *ksl, const ngtcp2_ksl_key *key, - ngtcp2_ksl_compar compar); + ngtcp2_ksl_search search); /* * ngtcp2_ksl_update_key replaces the key of nodes which has |old_key| @@ -235,7 +265,8 @@ void ngtcp2_ksl_update_key(ngtcp2_ksl *ksl, const ngtcp2_ksl_key *old_key, /* * ngtcp2_ksl_begin returns the iterator which points to the first * node. If there is no node in |ksl|, it returns the iterator which - * satisfies ngtcp2_ksl_it_end(it) != 0. + * satisfies both ngtcp2_ksl_it_begin(it) != 0 and + * ngtcp2_ksl_it_end(it) != 0. */ ngtcp2_ksl_it ngtcp2_ksl_begin(const ngtcp2_ksl *ksl); @@ -243,14 +274,15 @@ ngtcp2_ksl_it ngtcp2_ksl_begin(const ngtcp2_ksl *ksl); * ngtcp2_ksl_end returns the iterator which points to the node * following the last node. The returned object satisfies * ngtcp2_ksl_it_end(). If there is no node in |ksl|, it returns the - * iterator which satisfies ngtcp2_ksl_it_begin(it) != 0. + * iterator which satisfies ngtcp2_ksl_it_begin(it) != 0 and + * ngtcp2_ksl_it_end(it) != 0. */ ngtcp2_ksl_it ngtcp2_ksl_end(const ngtcp2_ksl *ksl); /* * ngtcp2_ksl_len returns the number of elements stored in |ksl|. */ -size_t ngtcp2_ksl_len(ngtcp2_ksl *ksl); +size_t ngtcp2_ksl_len(const ngtcp2_ksl *ksl); /* * ngtcp2_ksl_clear removes all elements stored in |ksl|. @@ -269,8 +301,8 @@ void ngtcp2_ksl_clear(ngtcp2_ksl *ksl); * that the key is of type int64_t. This function should be used for * the debugging purpose only. */ -void ngtcp2_ksl_print(ngtcp2_ksl *ksl); -#endif /* !WIN32 */ +void ngtcp2_ksl_print(const ngtcp2_ksl *ksl); +#endif /* !defined(WIN32) */ /* * ngtcp2_ksl_it_init initializes |it|. @@ -293,8 +325,8 @@ void ngtcp2_ksl_it_init(ngtcp2_ksl_it *it, const ngtcp2_ksl *ksl, */ #define ngtcp2_ksl_it_next(IT) \ (++(IT)->i == (IT)->blk->n && (IT)->blk->next \ - ? ((IT)->blk = (IT)->blk->next, (IT)->i = 0) \ - : 0) + ? ((IT)->blk = (IT)->blk->next, (IT)->i = 0) \ + : 0) /* * ngtcp2_ksl_it_prev moves backward the iterator by one. It is @@ -304,16 +336,16 @@ void ngtcp2_ksl_it_init(ngtcp2_ksl_it *it, const ngtcp2_ksl *ksl, void ngtcp2_ksl_it_prev(ngtcp2_ksl_it *it); /* - * ngtcp2_ksl_it_end returns nonzero if |it| points to the beyond the - * last node. + * ngtcp2_ksl_it_end returns nonzero if |it| points to the one beyond + * the last node. */ #define ngtcp2_ksl_it_end(IT) \ ((IT)->blk->n == (IT)->i && (IT)->blk->next == NULL) /* * ngtcp2_ksl_it_begin returns nonzero if |it| points to the first - * node. |it| might satisfy both ngtcp2_ksl_it_begin(&it) and - * ngtcp2_ksl_it_end(&it) if the skip list has no node. + * node. |it| might satisfy both ngtcp2_ksl_it_begin(it) != 0 and + * ngtcp2_ksl_it_end(it) != 0 if the skip list has no node. */ int ngtcp2_ksl_it_begin(const ngtcp2_ksl_it *it); @@ -327,21 +359,67 @@ int ngtcp2_ksl_it_begin(const ngtcp2_ksl_it *it); /* * ngtcp2_ksl_range_compar is an implementation of ngtcp2_ksl_compar. - * lhs->ptr and rhs->ptr must point to ngtcp2_range object and the - * function returns nonzero if (const ngtcp2_range *)(lhs->ptr)->begin - * < (const ngtcp2_range *)(rhs->ptr)->begin. + * |lhs| and |rhs| must point to ngtcp2_range object, and the function + * returns nonzero if ((const ngtcp2_range *)lhs)->begin < ((const + * ngtcp2_range *)rhs)->begin. */ int ngtcp2_ksl_range_compar(const ngtcp2_ksl_key *lhs, const ngtcp2_ksl_key *rhs); +/* + * ngtcp2_ksl_range_search is an implementation of ngtcp2_ksl_search + * that uses ngtcp2_ksl_range_compar. + */ +size_t ngtcp2_ksl_range_search(const ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, + const ngtcp2_ksl_key *key); + /* * ngtcp2_ksl_range_exclusive_compar is an implementation of - * ngtcp2_ksl_compar. lhs->ptr and rhs->ptr must point to - * ngtcp2_range object and the function returns nonzero if (const - * ngtcp2_range *)(lhs->ptr)->begin < (const ngtcp2_range - * *)(rhs->ptr)->begin and the 2 ranges do not intersect. + * ngtcp2_ksl_compar. |lhs| and |rhs| must point to ngtcp2_range + * object, and the function returns nonzero if ((const ngtcp2_range + * *)lhs)->begin < ((const ngtcp2_range *)rhs)->begin, and the 2 + * ranges do not intersect. */ int ngtcp2_ksl_range_exclusive_compar(const ngtcp2_ksl_key *lhs, const ngtcp2_ksl_key *rhs); -#endif /* NGTCP2_KSL_H */ +/* + * ngtcp2_ksl_range_exclusive_search is an implementation of + * ngtcp2_ksl_search that uses ngtcp2_ksl_range_exclusive_compar. + */ +size_t ngtcp2_ksl_range_exclusive_search(const ngtcp2_ksl *ksl, + ngtcp2_ksl_blk *blk, + const ngtcp2_ksl_key *key); + +/* + * ngtcp2_ksl_uint64_less is an implementation of ngtcp2_ksl_compar. + * |lhs| and |rhs| must point to uint64_t objects, and the function + * returns nonzero if *(uint64_t *)|lhs| < *(uint64_t *)|rhs|. + */ +int ngtcp2_ksl_uint64_less(const ngtcp2_ksl_key *lhs, + const ngtcp2_ksl_key *rhs); + +/* + * ngtcp2_ksl_uint64_less_search is an implementation of + * ngtcp2_ksl_search that uses ngtcp2_ksl_uint64_less. + */ +size_t ngtcp2_ksl_uint64_less_search(const ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, + const ngtcp2_ksl_key *key); + +/* + * ngtcp2_ksl_int64_greater is an implementation of ngtcp2_ksl_compar. + * |lhs| and |rhs| must point to int64_t objects, and the function + * returns nonzero if *(int64_t *)|lhs| > *(int64_t *)|rhs|. + */ +int ngtcp2_ksl_int64_greater(const ngtcp2_ksl_key *lhs, + const ngtcp2_ksl_key *rhs); + +/* + * ngtcp2_ksl_int64_greater_search is an implementation of + * ngtcp2_ksl_search that uses ngtcp2_ksl_int64_greater. + */ +size_t ngtcp2_ksl_int64_greater_search(const ngtcp2_ksl *ksl, + ngtcp2_ksl_blk *blk, + const ngtcp2_ksl_key *key); + +#endif /* !defined(NGTCP2_KSL_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.c index 93922a29c319f4..fc4eb443517d40 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.c @@ -27,7 +27,7 @@ #include #ifdef HAVE_UNISTD_H # include -#endif +#endif /* defined(HAVE_UNISTD_H) */ #include #include @@ -99,11 +99,11 @@ void ngtcp2_log_init(ngtcp2_log *log, const ngtcp2_cid *scid, #define NGTCP2_LOG_FRM_HD_FIELDS(DIR) \ timestamp_cast(log->last_ts - log->ts), (const char *)log->scid, "frm", \ - (DIR), hd->pkt_num, strpkttype(hd) + (DIR), hd->pkt_num, strpkttype(hd) #define NGTCP2_LOG_PKT_HD_FIELDS(DIR) \ timestamp_cast(log->last_ts - log->ts), (const char *)log->scid, "pkt", \ - (DIR), hd->pkt_num, strpkttype(hd) + (DIR), hd->pkt_num, strpkttype(hd) #define NGTCP2_LOG_TP_HD_FIELDS \ timestamp_cast(log->last_ts - log->ts), (const char *)log->scid, "cry" @@ -223,12 +223,12 @@ static uint64_t timestamp_cast(uint64_t ns) { return ns / NGTCP2_MILLISECONDS; } static void log_fr_stream(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_stream *fr, const char *dir) { log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " STREAM(0x%02" PRIx64 ") id=0x%" PRIx64 - " fin=%d offset=%" PRIu64 " len=%" PRIu64 " uni=%d"), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type | fr->flags, fr->stream_id, - fr->fin, fr->offset, ngtcp2_vec_len(fr->data, fr->datacnt), - (fr->stream_id & 0x2) != 0); + log->user_data, + (NGTCP2_LOG_PKT " STREAM(0x%02" PRIx64 ") id=0x%" PRIx64 + " fin=%d offset=%" PRIu64 " len=%" PRIu64 " uni=%d"), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type | fr->flags, fr->stream_id, fr->fin, + fr->offset, ngtcp2_vec_len(fr->data, fr->datacnt), + (fr->stream_id & 0x2) != 0); } static void log_fr_ack(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, @@ -236,13 +236,12 @@ static void log_fr_ack(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, int64_t largest_ack, min_ack; size_t i; - log->log_printf(log->user_data, - (NGTCP2_LOG_PKT " ACK(0x%02" PRIx64 ") largest_ack=%" PRId64 - " ack_delay=%" PRIu64 "(%" PRIu64 - ") ack_range_count=%zu"), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->largest_ack, - fr->ack_delay_unscaled / NGTCP2_MILLISECONDS, fr->ack_delay, - fr->rangecnt); + log->log_printf( + log->user_data, + (NGTCP2_LOG_PKT " ACK(0x%02" PRIx64 ") largest_ack=%" PRId64 + " ack_delay=%" PRIu64 "(%" PRIu64 ") ack_range_count=%zu"), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->largest_ack, + fr->ack_delay_unscaled / NGTCP2_MILLISECONDS, fr->ack_delay, fr->rangecnt); largest_ack = fr->largest_ack; min_ack = fr->largest_ack - (int64_t)fr->first_ack_range; @@ -285,38 +284,37 @@ static void log_fr_reset_stream(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_reset_stream *fr, const char *dir) { log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " RESET_STREAM(0x%02" PRIx64 ") id=0x%" PRIx64 - " app_error_code=%s(0x%" PRIx64 ") final_size=%" PRIu64), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->stream_id, - strapperrorcode(fr->app_error_code), fr->app_error_code, fr->final_size); + log->user_data, + (NGTCP2_LOG_PKT " RESET_STREAM(0x%02" PRIx64 ") id=0x%" PRIx64 + " app_error_code=%s(0x%" PRIx64 ") final_size=%" PRIu64), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->stream_id, + strapperrorcode(fr->app_error_code), fr->app_error_code, fr->final_size); } static void log_fr_connection_close(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_connection_close *fr, const char *dir) { char reason[256]; - size_t reasonlen = ngtcp2_min(sizeof(reason) - 1, fr->reasonlen); + size_t reasonlen = ngtcp2_min_size(sizeof(reason) - 1, fr->reasonlen); - log->log_printf(log->user_data, - (NGTCP2_LOG_PKT " CONNECTION_CLOSE(0x%02" PRIx64 - ") error_code=%s(0x%" PRIx64 ") " - "frame_type=%" PRIx64 - " reason_len=%zu reason=[%s]"), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, - fr->type == NGTCP2_FRAME_CONNECTION_CLOSE - ? strerrorcode(fr->error_code) - : strapperrorcode(fr->error_code), - fr->error_code, fr->frame_type, fr->reasonlen, - ngtcp2_encode_printable_ascii(reason, fr->reason, reasonlen)); + log->log_printf( + log->user_data, + (NGTCP2_LOG_PKT " CONNECTION_CLOSE(0x%02" PRIx64 + ") error_code=%s(0x%" PRIx64 ") " + "frame_type=%" PRIx64 " reason_len=%zu reason=[%s]"), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, + fr->type == NGTCP2_FRAME_CONNECTION_CLOSE ? strerrorcode(fr->error_code) + : strapperrorcode(fr->error_code), + fr->error_code, fr->frame_type, fr->reasonlen, + ngtcp2_encode_printable_ascii(reason, fr->reason, reasonlen)); } static void log_fr_max_data(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_max_data *fr, const char *dir) { log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " MAX_DATA(0x%02" PRIx64 ") max_data=%" PRIu64), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->max_data); + log->user_data, + (NGTCP2_LOG_PKT " MAX_DATA(0x%02" PRIx64 ") max_data=%" PRIu64), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->max_data); } static void log_fr_max_stream_data(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, @@ -333,9 +331,9 @@ static void log_fr_max_stream_data(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, static void log_fr_max_streams(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_max_streams *fr, const char *dir) { log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " MAX_STREAMS(0x%02" PRIx64 ") max_streams=%" PRIu64), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->max_streams); + log->user_data, + (NGTCP2_LOG_PKT " MAX_STREAMS(0x%02" PRIx64 ") max_streams=%" PRIu64), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->max_streams); } static void log_fr_ping(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, @@ -348,9 +346,9 @@ static void log_fr_data_blocked(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_data_blocked *fr, const char *dir) { log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " DATA_BLOCKED(0x%02" PRIx64 ") offset=%" PRIu64), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->offset); + log->user_data, + (NGTCP2_LOG_PKT " DATA_BLOCKED(0x%02" PRIx64 ") offset=%" PRIu64), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->offset); } static void log_fr_stream_data_blocked(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, @@ -367,9 +365,9 @@ static void log_fr_streams_blocked(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_streams_blocked *fr, const char *dir) { log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " STREAMS_BLOCKED(0x%02" PRIx64 ") max_streams=%" PRIu64), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->max_streams); + log->user_data, + (NGTCP2_LOG_PKT " STREAMS_BLOCKED(0x%02" PRIx64 ") max_streams=%" PRIu64), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->max_streams); } static void log_fr_new_connection_id(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, @@ -379,15 +377,15 @@ static void log_fr_new_connection_id(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, uint8_t cid[sizeof(fr->cid.data) * 2 + 1]; log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " NEW_CONNECTION_ID(0x%02" PRIx64 ") seq=%" PRIu64 - " cid=0x%s retire_prior_to=%" PRIu64 - " stateless_reset_token=0x%s"), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->seq, - (const char *)ngtcp2_encode_hex(cid, fr->cid.data, fr->cid.datalen), - fr->retire_prior_to, - (const char *)ngtcp2_encode_hex(buf, fr->stateless_reset_token, - sizeof(fr->stateless_reset_token))); + log->user_data, + (NGTCP2_LOG_PKT " NEW_CONNECTION_ID(0x%02" PRIx64 ") seq=%" PRIu64 + " cid=0x%s retire_prior_to=%" PRIu64 + " stateless_reset_token=0x%s"), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->seq, + (const char *)ngtcp2_encode_hex(cid, fr->cid.data, fr->cid.datalen), + fr->retire_prior_to, + (const char *)ngtcp2_encode_hex(buf, fr->stateless_reset_token, + sizeof(fr->stateless_reset_token))); } static void log_fr_stop_sending(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, @@ -406,10 +404,10 @@ static void log_fr_path_challenge(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, uint8_t buf[sizeof(fr->data) * 2 + 1]; log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " PATH_CHALLENGE(0x%02" PRIx64 ") data=0x%s"), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, - (const char *)ngtcp2_encode_hex(buf, fr->data, sizeof(fr->data))); + log->user_data, + (NGTCP2_LOG_PKT " PATH_CHALLENGE(0x%02" PRIx64 ") data=0x%s"), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, + (const char *)ngtcp2_encode_hex(buf, fr->data, sizeof(fr->data))); } static void log_fr_path_response(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, @@ -418,19 +416,19 @@ static void log_fr_path_response(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, uint8_t buf[sizeof(fr->data) * 2 + 1]; log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " PATH_RESPONSE(0x%02" PRIx64 ") data=0x%s"), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, - (const char *)ngtcp2_encode_hex(buf, fr->data, sizeof(fr->data))); + log->user_data, + (NGTCP2_LOG_PKT " PATH_RESPONSE(0x%02" PRIx64 ") data=0x%s"), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, + (const char *)ngtcp2_encode_hex(buf, fr->data, sizeof(fr->data))); } static void log_fr_crypto(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_stream *fr, const char *dir) { - log->log_printf(log->user_data, - (NGTCP2_LOG_PKT " CRYPTO(0x%02" PRIx64 ") offset=%" PRIu64 - " len=%" PRIu64), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->offset, - ngtcp2_vec_len(fr->data, fr->datacnt)); + log->log_printf( + log->user_data, + (NGTCP2_LOG_PKT " CRYPTO(0x%02" PRIx64 ") offset=%" PRIu64 " len=%" PRIu64), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->offset, + ngtcp2_vec_len(fr->data, fr->datacnt)); } static void log_fr_new_token(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, @@ -448,9 +446,9 @@ static void log_fr_new_token(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, p = ngtcp2_encode_hex(buf, fr->token, fr->tokenlen); } log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " NEW_TOKEN(0x%02" PRIx64 ") token=0x%s len=%zu"), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, (const char *)p, fr->tokenlen); + log->user_data, + (NGTCP2_LOG_PKT " NEW_TOKEN(0x%02" PRIx64 ") token=0x%s len=%zu"), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, (const char *)p, fr->tokenlen); } static void log_fr_retire_connection_id(ngtcp2_log *log, @@ -458,9 +456,9 @@ static void log_fr_retire_connection_id(ngtcp2_log *log, const ngtcp2_retire_connection_id *fr, const char *dir) { log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " RETIRE_CONNECTION_ID(0x%02" PRIx64 ") seq=%" PRIu64), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->seq); + log->user_data, + (NGTCP2_LOG_PKT " RETIRE_CONNECTION_ID(0x%02" PRIx64 ") seq=%" PRIu64), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->seq); } static void log_fr_handshake_done(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, @@ -601,11 +599,11 @@ void ngtcp2_log_rx_sr(ngtcp2_log *log, const ngtcp2_pkt_stateless_reset *sr) { shd.type = NGTCP2_PKT_STATELESS_RESET; log->log_printf( - log->user_data, (NGTCP2_LOG_PKT " token=0x%s randlen=%zu"), - NGTCP2_LOG_PKT_HD_FIELDS("rx"), - (const char *)ngtcp2_encode_hex(buf, sr->stateless_reset_token, - sizeof(sr->stateless_reset_token)), - sr->randlen); + log->user_data, (NGTCP2_LOG_PKT " token=0x%s randlen=%zu"), + NGTCP2_LOG_PKT_HD_FIELDS("rx"), + (const char *)ngtcp2_encode_hex(buf, sr->stateless_reset_token, + sizeof(sr->stateless_reset_token)), + sr->randlen); } void ngtcp2_log_remote_tp(ngtcp2_log *log, @@ -625,10 +623,10 @@ void ngtcp2_log_remote_tp(ngtcp2_log *log, if (params->stateless_reset_token_present) { log->log_printf( - log->user_data, (NGTCP2_LOG_TP " stateless_reset_token=0x%s"), - NGTCP2_LOG_TP_HD_FIELDS, - (const char *)ngtcp2_encode_hex(token, params->stateless_reset_token, - sizeof(params->stateless_reset_token))); + log->user_data, (NGTCP2_LOG_TP " stateless_reset_token=0x%s"), + NGTCP2_LOG_TP_HD_FIELDS, + (const char *)ngtcp2_encode_hex(token, params->stateless_reset_token, + sizeof(params->stateless_reset_token))); } if (params->preferred_addr_present) { @@ -639,7 +637,7 @@ void ngtcp2_log_remote_tp(ngtcp2_log *log, (NGTCP2_LOG_TP " preferred_address.ipv4_addr=%s"), NGTCP2_LOG_TP_HD_FIELDS, (const char *)ngtcp2_encode_ipv4( - addr, (const uint8_t *)&sa_in->sin_addr)); + addr, (const uint8_t *)&sa_in->sin_addr)); log->log_printf(log->user_data, (NGTCP2_LOG_TP " preferred_address.ipv4_port=%u"), NGTCP2_LOG_TP_HD_FIELDS, ngtcp2_ntohs(sa_in->sin_port)); @@ -652,59 +650,59 @@ void ngtcp2_log_remote_tp(ngtcp2_log *log, (NGTCP2_LOG_TP " preferred_address.ipv6_addr=%s"), NGTCP2_LOG_TP_HD_FIELDS, (const char *)ngtcp2_encode_ipv6( - addr, (const uint8_t *)&sa_in6->sin6_addr)); + addr, (const uint8_t *)&sa_in6->sin6_addr)); log->log_printf(log->user_data, (NGTCP2_LOG_TP " preferred_address.ipv6_port=%u"), NGTCP2_LOG_TP_HD_FIELDS, ngtcp2_ntohs(sa_in6->sin6_port)); } log->log_printf( - log->user_data, (NGTCP2_LOG_TP " preferred_address.cid=0x%s"), - NGTCP2_LOG_TP_HD_FIELDS, - (const char *)ngtcp2_encode_hex(cid, params->preferred_addr.cid.data, - params->preferred_addr.cid.datalen)); + log->user_data, (NGTCP2_LOG_TP " preferred_address.cid=0x%s"), + NGTCP2_LOG_TP_HD_FIELDS, + (const char *)ngtcp2_encode_hex(cid, params->preferred_addr.cid.data, + params->preferred_addr.cid.datalen)); log->log_printf( - log->user_data, - (NGTCP2_LOG_TP " preferred_address.stateless_reset_token=0x%s"), - NGTCP2_LOG_TP_HD_FIELDS, - (const char *)ngtcp2_encode_hex( - token, params->preferred_addr.stateless_reset_token, - sizeof(params->preferred_addr.stateless_reset_token))); + log->user_data, + (NGTCP2_LOG_TP " preferred_address.stateless_reset_token=0x%s"), + NGTCP2_LOG_TP_HD_FIELDS, + (const char *)ngtcp2_encode_hex( + token, params->preferred_addr.stateless_reset_token, + sizeof(params->preferred_addr.stateless_reset_token))); } if (params->original_dcid_present) { log->log_printf( - log->user_data, - (NGTCP2_LOG_TP " original_destination_connection_id=0x%s"), - NGTCP2_LOG_TP_HD_FIELDS, - (const char *)ngtcp2_encode_hex(cid, params->original_dcid.data, - params->original_dcid.datalen)); + log->user_data, + (NGTCP2_LOG_TP " original_destination_connection_id=0x%s"), + NGTCP2_LOG_TP_HD_FIELDS, + (const char *)ngtcp2_encode_hex(cid, params->original_dcid.data, + params->original_dcid.datalen)); } if (params->retry_scid_present) { log->log_printf( - log->user_data, (NGTCP2_LOG_TP " retry_source_connection_id=0x%s"), - NGTCP2_LOG_TP_HD_FIELDS, - (const char *)ngtcp2_encode_hex(cid, params->retry_scid.data, - params->retry_scid.datalen)); + log->user_data, (NGTCP2_LOG_TP " retry_source_connection_id=0x%s"), + NGTCP2_LOG_TP_HD_FIELDS, + (const char *)ngtcp2_encode_hex(cid, params->retry_scid.data, + params->retry_scid.datalen)); } if (params->initial_scid_present) { log->log_printf( - log->user_data, (NGTCP2_LOG_TP " initial_source_connection_id=0x%s"), - NGTCP2_LOG_TP_HD_FIELDS, - (const char *)ngtcp2_encode_hex(cid, params->initial_scid.data, - params->initial_scid.datalen)); + log->user_data, (NGTCP2_LOG_TP " initial_source_connection_id=0x%s"), + NGTCP2_LOG_TP_HD_FIELDS, + (const char *)ngtcp2_encode_hex(cid, params->initial_scid.data, + params->initial_scid.datalen)); } log->log_printf( - log->user_data, - (NGTCP2_LOG_TP " initial_max_stream_data_bidi_local=%" PRIu64), - NGTCP2_LOG_TP_HD_FIELDS, params->initial_max_stream_data_bidi_local); + log->user_data, + (NGTCP2_LOG_TP " initial_max_stream_data_bidi_local=%" PRIu64), + NGTCP2_LOG_TP_HD_FIELDS, params->initial_max_stream_data_bidi_local); log->log_printf( - log->user_data, - (NGTCP2_LOG_TP " initial_max_stream_data_bidi_remote=%" PRIu64), - NGTCP2_LOG_TP_HD_FIELDS, params->initial_max_stream_data_bidi_remote); + log->user_data, + (NGTCP2_LOG_TP " initial_max_stream_data_bidi_remote=%" PRIu64), + NGTCP2_LOG_TP_HD_FIELDS, params->initial_max_stream_data_bidi_remote); log->log_printf(log->user_data, (NGTCP2_LOG_TP " initial_max_stream_data_uni=%" PRIu64), NGTCP2_LOG_TP_HD_FIELDS, params->initial_max_stream_data_uni); @@ -742,21 +740,21 @@ void ngtcp2_log_remote_tp(ngtcp2_log *log, if (params->version_info_present) { log->log_printf( - log->user_data, - (NGTCP2_LOG_TP " version_information.chosen_version=0x%08x"), - NGTCP2_LOG_TP_HD_FIELDS, params->version_info.chosen_version); + log->user_data, + (NGTCP2_LOG_TP " version_information.chosen_version=0x%08x"), + NGTCP2_LOG_TP_HD_FIELDS, params->version_info.chosen_version); assert(!(params->version_info.available_versionslen & 0x3)); for (i = 0, p = params->version_info.available_versions; i < params->version_info.available_versionslen; i += sizeof(uint32_t)) { - p = ngtcp2_get_uint32(&version, p); + p = ngtcp2_get_uint32be(&version, p); log->log_printf( - log->user_data, - (NGTCP2_LOG_TP " version_information.available_versions[%zu]=0x%08x"), - NGTCP2_LOG_TP_HD_FIELDS, i >> 2, version); + log->user_data, + (NGTCP2_LOG_TP " version_information.available_versions[%zu]=0x%08x"), + NGTCP2_LOG_TP_HD_FIELDS, i >> 2, version); } } } @@ -783,18 +781,18 @@ static void log_pkt_hd(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, if (hd->type == NGTCP2_PKT_1RTT) { ngtcp2_log_info( - log, NGTCP2_LOG_EVENT_PKT, "%s pkn=%" PRId64 " dcid=0x%s type=%s k=%d", - dir, hd->pkt_num, - (const char *)ngtcp2_encode_hex(dcid, hd->dcid.data, hd->dcid.datalen), - strpkttype(hd), (hd->flags & NGTCP2_PKT_FLAG_KEY_PHASE) != 0); + log, NGTCP2_LOG_EVENT_PKT, "%s pkn=%" PRId64 " dcid=0x%s type=%s k=%d", + dir, hd->pkt_num, + (const char *)ngtcp2_encode_hex(dcid, hd->dcid.data, hd->dcid.datalen), + strpkttype(hd), (hd->flags & NGTCP2_PKT_FLAG_KEY_PHASE) != 0); } else { ngtcp2_log_info( - log, NGTCP2_LOG_EVENT_PKT, - "%s pkn=%" PRId64 " dcid=0x%s scid=0x%s version=0x%08x type=%s len=%zu", - dir, hd->pkt_num, - (const char *)ngtcp2_encode_hex(dcid, hd->dcid.data, hd->dcid.datalen), - (const char *)ngtcp2_encode_hex(scid, hd->scid.data, hd->scid.datalen), - hd->version, strpkttype(hd), hd->len); + log, NGTCP2_LOG_EVENT_PKT, + "%s pkn=%" PRId64 " dcid=0x%s scid=0x%s version=0x%08x type=%s len=%zu", + dir, hd->pkt_num, + (const char *)ngtcp2_encode_hex(dcid, hd->dcid.data, hd->dcid.datalen), + (const char *)ngtcp2_encode_hex(scid, hd->scid.data, hd->scid.datalen), + hd->version, strpkttype(hd), hd->len); } } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.h index 1280ce04d6385a..13fb81a72e1d51 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -129,4 +129,4 @@ void ngtcp2_log_tx_cancel(ngtcp2_log *log, const ngtcp2_pkt_hd *hd); void ngtcp2_log_info(ngtcp2_log *log, ngtcp2_log_event ev, const char *fmt, ...); -#endif /* NGTCP2_LOG_H */ +#endif /* !defined(NGTCP2_LOG_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_macro.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_macro.h index 28d3461bef9238..dfe5e0aed220f8 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_macro.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_macro.h @@ -27,17 +27,14 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include #include -#define ngtcp2_min(A, B) ((A) < (B) ? (A) : (B)) -#define ngtcp2_max(A, B) ((A) > (B) ? (A) : (B)) - #define ngtcp2_struct_of(ptr, type, member) \ - ((type *)(void *)((char *)(ptr)-offsetof(type, member))) + ((type *)(void *)((char *)(ptr) - offsetof(type, member))) /* ngtcp2_list_insert inserts |T| before |*PD|. The contract is that this is singly linked list, and the next element is pointed by next @@ -55,4 +52,30 @@ */ #define ngtcp2_arraylen(A) (sizeof(A) / sizeof(A[0])) -#endif /* NGTCP2_MACRO_H */ +#define ngtcp2_max_def(SUFFIX, T) \ + static inline T ngtcp2_max_##SUFFIX(T a, T b) { return a < b ? b : a; } + +ngtcp2_max_def(int8, int8_t) +ngtcp2_max_def(int16, int16_t) +ngtcp2_max_def(int32, int32_t) +ngtcp2_max_def(int64, int64_t) +ngtcp2_max_def(uint8, uint8_t) +ngtcp2_max_def(uint16, uint16_t) +ngtcp2_max_def(uint32, uint32_t) +ngtcp2_max_def(uint64, uint64_t) +ngtcp2_max_def(size, size_t) + +#define ngtcp2_min_def(SUFFIX, T) \ + static inline T ngtcp2_min_##SUFFIX(T a, T b) { return a < b ? a : b; } + +ngtcp2_min_def(int8, int8_t) +ngtcp2_min_def(int16, int16_t) +ngtcp2_min_def(int32, int32_t) +ngtcp2_min_def(int64, int64_t) +ngtcp2_min_def(uint8, uint8_t) +ngtcp2_min_def(uint16, uint16_t) +ngtcp2_min_def(uint32, uint32_t) +ngtcp2_min_def(uint64, uint64_t) +ngtcp2_min_def(size, size_t) + +#endif /* !defined(NGTCP2_MACRO_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.c index 33e9fcc018b5db..9eb102f16b32e2 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.c @@ -35,8 +35,7 @@ void ngtcp2_map_init(ngtcp2_map *map, const ngtcp2_mem *mem) { map->mem = mem; - map->tablelen = 0; - map->tablelenbits = 0; + map->hashbits = 0; map->table = NULL; map->size = 0; } @@ -49,33 +48,20 @@ void ngtcp2_map_free(ngtcp2_map *map) { ngtcp2_mem_free(map->mem, map->table); } -void ngtcp2_map_each_free(ngtcp2_map *map, int (*func)(void *data, void *ptr), - void *ptr) { - uint32_t i; - ngtcp2_map_bucket *bkt; - - for (i = 0; i < map->tablelen; ++i) { - bkt = &map->table[i]; - - if (bkt->data == NULL) { - continue; - } - - func(bkt->data, ptr); - } -} - -int ngtcp2_map_each(ngtcp2_map *map, int (*func)(void *data, void *ptr), +int ngtcp2_map_each(const ngtcp2_map *map, int (*func)(void *data, void *ptr), void *ptr) { int rv; - uint32_t i; + size_t i; ngtcp2_map_bucket *bkt; + size_t tablelen; if (map->size == 0) { return 0; } - for (i = 0; i < map->tablelen; ++i) { + tablelen = 1u << map->hashbits; + + for (i = 0; i < tablelen; ++i) { bkt = &map->table[i]; if (bkt->data == NULL) { @@ -91,82 +77,61 @@ int ngtcp2_map_each(ngtcp2_map *map, int (*func)(void *data, void *ptr), return 0; } -static uint32_t hash(ngtcp2_map_key_type key) { - return (uint32_t)((key * 11400714819323198485llu) >> 32); -} - -static size_t h2idx(uint32_t hash, uint32_t bits) { - return hash >> (32 - bits); -} - -static size_t distance(uint32_t tablelen, uint32_t tablelenbits, - ngtcp2_map_bucket *bkt, size_t idx) { - return (idx - h2idx(bkt->hash, tablelenbits)) & (tablelen - 1); +static size_t hash(ngtcp2_map_key_type key, size_t bits) { + return (size_t)((key * 11400714819323198485llu) >> (64 - bits)); } -static void map_bucket_swap(ngtcp2_map_bucket *bkt, uint32_t *phash, - ngtcp2_map_key_type *pkey, void **pdata) { - uint32_t h = bkt->hash; - ngtcp2_map_key_type key = bkt->key; - void *data = bkt->data; - - bkt->hash = *phash; - bkt->key = *pkey; - bkt->data = *pdata; +static void map_bucket_swap(ngtcp2_map_bucket *a, ngtcp2_map_bucket *b) { + ngtcp2_map_bucket c = *a; - *phash = h; - *pkey = key; - *pdata = data; -} - -static void map_bucket_set_data(ngtcp2_map_bucket *bkt, uint32_t hash, - ngtcp2_map_key_type key, void *data) { - bkt->hash = hash; - bkt->key = key; - bkt->data = data; + *a = *b; + *b = c; } #ifndef WIN32 -void ngtcp2_map_print_distance(ngtcp2_map *map) { - uint32_t i; +void ngtcp2_map_print_distance(const ngtcp2_map *map) { + size_t i; size_t idx; ngtcp2_map_bucket *bkt; + size_t tablelen; - for (i = 0; i < map->tablelen; ++i) { + if (map->size == 0) { + return; + } + + tablelen = 1u << map->hashbits; + + for (i = 0; i < tablelen; ++i) { bkt = &map->table[i]; if (bkt->data == NULL) { - fprintf(stderr, "@%u \n", i); + fprintf(stderr, "@%zu \n", i); continue; } - idx = h2idx(bkt->hash, map->tablelenbits); - fprintf(stderr, "@%u hash=%08x key=%" PRIu64 " base=%zu distance=%zu\n", i, - bkt->hash, bkt->key, idx, - distance(map->tablelen, map->tablelenbits, bkt, idx)); + idx = hash(bkt->key, map->hashbits); + fprintf(stderr, "@%zu hash=%zu key=%" PRIu64 " base=%zu distance=%u\n", i, + hash(bkt->key, map->hashbits), bkt->key, idx, bkt->psl); } } -#endif /* !WIN32 */ +#endif /* !defined(WIN32) */ -static int insert(ngtcp2_map_bucket *table, uint32_t tablelen, - uint32_t tablelenbits, uint32_t hash, ngtcp2_map_key_type key, - void *data) { - size_t idx = h2idx(hash, tablelenbits); - size_t d = 0, dd; - ngtcp2_map_bucket *bkt; +static int insert(ngtcp2_map_bucket *table, size_t hashbits, + ngtcp2_map_key_type key, void *data) { + size_t idx = hash(key, hashbits); + ngtcp2_map_bucket b = {0, key, data}, *bkt; + size_t mask = (1u << hashbits) - 1; for (;;) { bkt = &table[idx]; if (bkt->data == NULL) { - map_bucket_set_data(bkt, hash, key, data); + *bkt = b; return 0; } - dd = distance(tablelen, tablelenbits, bkt, idx); - if (d > dd) { - map_bucket_swap(bkt, &hash, &key, &data); - d = dd; + if (b.psl > bkt->psl) { + map_bucket_swap(bkt, &b); } else if (bkt->key == key) { /* TODO This check is just a waste after first swap or if this function is called from map_resize. That said, there is no @@ -175,41 +140,42 @@ static int insert(ngtcp2_map_bucket *table, uint32_t tablelen, return NGTCP2_ERR_INVALID_ARGUMENT; } - ++d; - idx = (idx + 1) & (tablelen - 1); + ++b.psl; + idx = (idx + 1) & mask; } } -/* new_tablelen must be power of 2 and new_tablelen == (1 << - new_tablelenbits) must hold. */ -static int map_resize(ngtcp2_map *map, uint32_t new_tablelen, - uint32_t new_tablelenbits) { - uint32_t i; +static int map_resize(ngtcp2_map *map, size_t new_hashbits) { + size_t i; ngtcp2_map_bucket *new_table; ngtcp2_map_bucket *bkt; + size_t tablelen; int rv; (void)rv; new_table = - ngtcp2_mem_calloc(map->mem, new_tablelen, sizeof(ngtcp2_map_bucket)); + ngtcp2_mem_calloc(map->mem, 1u << new_hashbits, sizeof(ngtcp2_map_bucket)); if (new_table == NULL) { return NGTCP2_ERR_NOMEM; } - for (i = 0; i < map->tablelen; ++i) { - bkt = &map->table[i]; - if (bkt->data == NULL) { - continue; - } - rv = insert(new_table, new_tablelen, new_tablelenbits, bkt->hash, bkt->key, - bkt->data); + if (map->size) { + tablelen = 1u << map->hashbits; - assert(0 == rv); + for (i = 0; i < tablelen; ++i) { + bkt = &map->table[i]; + if (bkt->data == NULL) { + continue; + } + + rv = insert(new_table, new_hashbits, bkt->key, bkt->data); + + assert(0 == rv); + } } ngtcp2_mem_free(map->mem, map->table); - map->tablelen = new_tablelen; - map->tablelenbits = new_tablelenbits; + map->hashbits = new_hashbits; map->table = new_table; return 0; @@ -221,48 +187,49 @@ int ngtcp2_map_insert(ngtcp2_map *map, ngtcp2_map_key_type key, void *data) { assert(data); /* Load factor is 0.75 */ - if ((map->size + 1) * 4 > map->tablelen * 3) { - if (map->tablelen) { - rv = map_resize(map, map->tablelen * 2, map->tablelenbits + 1); + /* Under the very initial condition, that is map->size == 0 and + map->hashbits == 0, 4 > 3 still holds nicely. */ + if ((map->size + 1) * 4 > (1u << map->hashbits) * 3) { + if (map->hashbits) { + rv = map_resize(map, map->hashbits + 1); if (rv != 0) { return rv; } } else { - rv = map_resize(map, 1 << NGTCP2_INITIAL_TABLE_LENBITS, - NGTCP2_INITIAL_TABLE_LENBITS); + rv = map_resize(map, NGTCP2_INITIAL_TABLE_LENBITS); if (rv != 0) { return rv; } } } - rv = insert(map->table, map->tablelen, map->tablelenbits, hash(key), key, - data); + rv = insert(map->table, map->hashbits, key, data); if (rv != 0) { return rv; } + ++map->size; + return 0; } -void *ngtcp2_map_find(ngtcp2_map *map, ngtcp2_map_key_type key) { - uint32_t h; +void *ngtcp2_map_find(const ngtcp2_map *map, ngtcp2_map_key_type key) { size_t idx; ngtcp2_map_bucket *bkt; - size_t d = 0; + size_t psl = 0; + size_t mask; if (map->size == 0) { return NULL; } - h = hash(key); - idx = h2idx(h, map->tablelenbits); + idx = hash(key, map->hashbits); + mask = (1u << map->hashbits) - 1; for (;;) { bkt = &map->table[idx]; - if (bkt->data == NULL || - d > distance(map->tablelen, map->tablelenbits, bkt, idx)) { + if (bkt->data == NULL || psl > bkt->psl) { return NULL; } @@ -270,50 +237,47 @@ void *ngtcp2_map_find(ngtcp2_map *map, ngtcp2_map_key_type key) { return bkt->data; } - ++d; - idx = (idx + 1) & (map->tablelen - 1); + ++psl; + idx = (idx + 1) & mask; } } int ngtcp2_map_remove(ngtcp2_map *map, ngtcp2_map_key_type key) { - uint32_t h; - size_t idx, didx; - ngtcp2_map_bucket *bkt; - size_t d = 0; + size_t idx; + ngtcp2_map_bucket *b, *bkt; + size_t psl = 0; + size_t mask; if (map->size == 0) { return NGTCP2_ERR_INVALID_ARGUMENT; } - h = hash(key); - idx = h2idx(h, map->tablelenbits); + idx = hash(key, map->hashbits); + mask = (1u << map->hashbits) - 1; for (;;) { bkt = &map->table[idx]; - if (bkt->data == NULL || - d > distance(map->tablelen, map->tablelenbits, bkt, idx)) { + if (bkt->data == NULL || psl > bkt->psl) { return NGTCP2_ERR_INVALID_ARGUMENT; } if (bkt->key == key) { - map_bucket_set_data(bkt, 0, 0, NULL); - - didx = idx; - idx = (idx + 1) & (map->tablelen - 1); + b = bkt; + idx = (idx + 1) & mask; for (;;) { bkt = &map->table[idx]; - if (bkt->data == NULL || - distance(map->tablelen, map->tablelenbits, bkt, idx) == 0) { + if (bkt->data == NULL || bkt->psl == 0) { + b->data = NULL; break; } - map->table[didx] = *bkt; - map_bucket_set_data(bkt, 0, 0, NULL); - didx = idx; + --bkt->psl; + *b = *bkt; + b = bkt; - idx = (idx + 1) & (map->tablelen - 1); + idx = (idx + 1) & mask; } --map->size; @@ -321,18 +285,18 @@ int ngtcp2_map_remove(ngtcp2_map *map, ngtcp2_map_key_type key) { return 0; } - ++d; - idx = (idx + 1) & (map->tablelen - 1); + ++psl; + idx = (idx + 1) & mask; } } void ngtcp2_map_clear(ngtcp2_map *map) { - if (map->tablelen == 0) { + if (map->size == 0) { return; } - memset(map->table, 0, sizeof(*map->table) * map->tablelen); + memset(map->table, 0, sizeof(*map->table) * (1u << map->hashbits)); map->size = 0; } -size_t ngtcp2_map_size(ngtcp2_map *map) { return map->size; } +size_t ngtcp2_map_size(const ngtcp2_map *map) { return map->size; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.h index d05b1657489e45..9d882fb20088d8 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.h @@ -28,7 +28,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -39,7 +39,7 @@ typedef uint64_t ngtcp2_map_key_type; typedef struct ngtcp2_map_bucket { - uint32_t hash; + uint32_t psl; ngtcp2_map_key_type key; void *data; } ngtcp2_map_bucket; @@ -48,33 +48,24 @@ typedef struct ngtcp2_map { ngtcp2_map_bucket *table; const ngtcp2_mem *mem; size_t size; - uint32_t tablelen; - uint32_t tablelenbits; + size_t hashbits; } ngtcp2_map; /* - * Initializes the map |map|. + * ngtcp2_map_init initializes the map |map|. */ void ngtcp2_map_init(ngtcp2_map *map, const ngtcp2_mem *mem); /* - * Deallocates any resources allocated for |map|. The stored entries - * are not freed by this function. Use ngtcp2_map_each_free() to free - * each entries. + * ngtcp2_map_free deallocates any resources allocated for |map|. The + * stored entries are not freed by this function. Use + * ngtcp2_map_each() to free each entry. */ void ngtcp2_map_free(ngtcp2_map *map); /* - * Deallocates each entries using |func| function and any resources - * allocated for |map|. The |func| function is responsible for freeing - * given the |data| object. The |ptr| will be passed to the |func| as - * send argument. The return value of the |func| will be ignored. - */ -void ngtcp2_map_each_free(ngtcp2_map *map, int (*func)(void *data, void *ptr), - void *ptr); - -/* - * Inserts the new |data| with the |key| to the map |map|. + * ngtcp2_map_insert inserts the new |data| with the |key| to the map + * |map|. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -82,57 +73,56 @@ void ngtcp2_map_each_free(ngtcp2_map *map, int (*func)(void *data, void *ptr), * NGTCP2_ERR_INVALID_ARGUMENT * The item associated by |key| already exists. * NGTCP2_ERR_NOMEM - * Out of memory + * Out of memory */ int ngtcp2_map_insert(ngtcp2_map *map, ngtcp2_map_key_type key, void *data); /* - * Returns the data associated by the key |key|. If there is no such - * data, this function returns NULL. + * ngtcp2_map_find returns the entry associated by the key |key|. If + * there is no such entry, this function returns NULL. */ -void *ngtcp2_map_find(ngtcp2_map *map, ngtcp2_map_key_type key); +void *ngtcp2_map_find(const ngtcp2_map *map, ngtcp2_map_key_type key); /* - * Removes the data associated by the key |key| from the |map|. The - * removed data is not freed by this function. + * ngtcp2_map_remove removes the entry associated by the key |key| + * from the |map|. The removed entry is not freed by this function. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGTCP2_ERR_INVALID_ARGUMENT - * The data associated by |key| does not exist. + * The entry associated by |key| does not exist. */ int ngtcp2_map_remove(ngtcp2_map *map, ngtcp2_map_key_type key); /* - * Removes all entries from |map|. + * ngtcp2_map_clear removes all entries from |map|. The removed entry + * is not freed by this function. */ void ngtcp2_map_clear(ngtcp2_map *map); /* - * Returns the number of items stored in the map |map|. + * ngtcp2_map_size returns the number of items stored in the map + * |map|. */ -size_t ngtcp2_map_size(ngtcp2_map *map); +size_t ngtcp2_map_size(const ngtcp2_map *map); /* - * Applies the function |func| to each data in the |map| with the - * optional user supplied pointer |ptr|. + * ngtcp2_map_each applies the function |func| to each entry in the + * |map| with the optional user supplied pointer |ptr|. * * If the |func| returns 0, this function calls the |func| with the - * next data. If the |func| returns nonzero, it will not call the + * next entry. If the |func| returns nonzero, it will not call the * |func| for further entries and return the return value of the * |func| immediately. Thus, this function returns 0 if all the * invocations of the |func| return 0, or nonzero value which the last * invocation of |func| returns. - * - * Don't use this function to free each data. Use - * ngtcp2_map_each_free() instead. */ -int ngtcp2_map_each(ngtcp2_map *map, int (*func)(void *data, void *ptr), +int ngtcp2_map_each(const ngtcp2_map *map, int (*func)(void *data, void *ptr), void *ptr); #ifndef WIN32 -void ngtcp2_map_print_distance(ngtcp2_map *map); -#endif /* !WIN32 */ +void ngtcp2_map_print_distance(const ngtcp2_map *map); +#endif /* !defined(WIN32) */ -#endif /* NGTCP2_MAP_H */ +#endif /* !defined(NGTCP2_MAP_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_mem.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_mem.c index bcce0b5cdfcf02..d30e1f986e97d3 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_mem.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_mem.c @@ -72,7 +72,7 @@ void *ngtcp2_mem_calloc(const ngtcp2_mem *mem, size_t nmemb, size_t size) { void *ngtcp2_mem_realloc(const ngtcp2_mem *mem, void *ptr, size_t size) { return mem->realloc(ptr, size, mem->user_data); } -#else /* MEMDEBUG */ +#else /* defined(MEMDEBUG) */ void *ngtcp2_mem_malloc_debug(const ngtcp2_mem *mem, size_t size, const char *func, const char *file, size_t line) { void *nptr = mem->malloc(size, mem->user_data); @@ -110,4 +110,4 @@ void *ngtcp2_mem_realloc_debug(const ngtcp2_mem *mem, void *ptr, size_t size, return nptr; } -#endif /* MEMDEBUG */ +#endif /* defined(MEMDEBUG) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_mem.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_mem.h index c99b6c59726891..9f818752125523 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_mem.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_mem.h @@ -28,7 +28,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -42,7 +42,7 @@ void ngtcp2_mem_free(const ngtcp2_mem *mem, void *ptr); void *ngtcp2_mem_calloc(const ngtcp2_mem *mem, size_t nmemb, size_t size); void *ngtcp2_mem_realloc(const ngtcp2_mem *mem, void *ptr, size_t size); -#else /* MEMDEBUG */ +#else /* defined(MEMDEBUG) */ void *ngtcp2_mem_malloc_debug(const ngtcp2_mem *mem, size_t size, const char *func, const char *file, size_t line); @@ -67,6 +67,6 @@ void *ngtcp2_mem_realloc_debug(const ngtcp2_mem *mem, void *ptr, size_t size, # define ngtcp2_mem_realloc(MEM, PTR, SIZE) \ ngtcp2_mem_realloc_debug((MEM), (PTR), (SIZE), __func__, __FILE__, __LINE__) -#endif /* MEMDEBUG */ +#endif /* defined(MEMDEBUG) */ -#endif /* NGTCP2_MEM_H */ +#endif /* !defined(NGTCP2_MEM_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_net.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_net.h index 4a2c4041d4d170..103a2fb2d80714 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_net.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_net.h @@ -30,70 +30,68 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #ifdef HAVE_ARPA_INET_H # include -#endif /* HAVE_ARPA_INET_H */ +#endif /* defined(HAVE_ARPA_INET_H) */ #ifdef HAVE_NETINET_IN_H # include -#endif /* HAVE_NETINET_IN_H */ +#endif /* defined(HAVE_NETINET_IN_H) */ #ifdef HAVE_BYTESWAP_H # include -#endif /* HAVE_BYTESWAP_H */ +#endif /* defined(HAVE_BYTESWAP_H) */ #ifdef HAVE_ENDIAN_H # include -#endif /* HAVE_ENDIAN_H */ +#endif /* defined(HAVE_ENDIAN_H) */ #ifdef HAVE_SYS_ENDIAN_H # include -#endif /* HAVE_SYS_ENDIAN_H */ +#endif /* defined(HAVE_SYS_ENDIAN_H) */ -#if defined(__APPLE__) +#ifdef __APPLE__ # include -#endif // __APPLE__ +#endif /* defined(__APPLE__) */ #include -#if defined(HAVE_BE64TOH) || \ - (defined(HAVE_DECL_BE64TOH) && HAVE_DECL_BE64TOH > 0) +#if HAVE_DECL_BE64TOH # define ngtcp2_ntohl64(N) be64toh(N) # define ngtcp2_htonl64(N) htobe64(N) -#else /* !HAVE_BE64TOH */ -# if defined(WORDS_BIGENDIAN) +#else /* !HAVE_DECL_BE64TOH */ +# ifdef WORDS_BIGENDIAN # define ngtcp2_ntohl64(N) (N) # define ngtcp2_htonl64(N) (N) -# else /* !WORDS_BIGENDIAN */ -# if defined(HAVE_BSWAP_64) || \ - (defined(HAVE_DECL_BSWAP_64) && HAVE_DECL_BSWAP_64 > 0) +# else /* !defined(WORDS_BIGENDIAN) */ +# if HAVE_DECL_BSWAP_64 # define ngtcp2_bswap64 bswap_64 # elif defined(WIN32) # define ngtcp2_bswap64 _byteswap_uint64 # elif defined(__APPLE__) # define ngtcp2_bswap64 OSSwapInt64 -# else /* !HAVE_BSWAP_64 && !WIN32 && !__APPLE__ */ +# else /* !(HAVE_DECL_BSWAP_64 || defined(WIN32) || defined(__APPLE__)) */ # define ngtcp2_bswap64(N) \ ((uint64_t)(ngtcp2_ntohl((uint32_t)(N))) << 32 | \ ngtcp2_ntohl((uint32_t)((N) >> 32))) -# endif /* !HAVE_BSWAP_64 && !WIN32 && !__APPLE__ */ +# endif /* !(HAVE_DECL_BSWAP_64 || defined(WIN32) || defined(__APPLE__)) */ # define ngtcp2_ntohl64(N) ngtcp2_bswap64(N) # define ngtcp2_htonl64(N) ngtcp2_bswap64(N) -# endif /* !WORDS_BIGENDIAN */ -#endif /* !HAVE_BE64TOH */ +# endif /* !defined(WORDS_BIGENDIAN) */ +#endif /* !HAVE_DECL_BE64TOH */ -#if defined(WIN32) +#ifdef WIN32 /* Windows requires ws2_32 library for ntonl family functions. We define inline functions for those function so that we don't have dependency on that lib. */ # ifdef _MSC_VER # define STIN static __inline -# else +# else /* !defined(_MSC_VER) */ # define STIN static inline -# endif +# endif /* !defined(_MSC_VER) */ STIN uint32_t ngtcp2_htonl(uint32_t hostlong) { uint32_t res; @@ -131,13 +129,13 @@ STIN uint16_t ngtcp2_ntohs(uint16_t netshort) { return res; } -#else /* !WIN32 */ +#else /* !defined(WIN32) */ # define ngtcp2_htonl htonl # define ngtcp2_htons htons # define ngtcp2_ntohl ntohl # define ngtcp2_ntohs ntohs -#endif /* !WIN32 */ +#endif /* !defined(WIN32) */ -#endif /* NGTCP2_NET_H */ +#endif /* !defined(NGTCP2_NET_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_objalloc.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_objalloc.h index ea73e788317681..cf23de7b2b7f20 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_objalloc.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_objalloc.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -67,9 +67,9 @@ void ngtcp2_objalloc_clear(ngtcp2_objalloc *objalloc); #ifndef NOMEMPOOL # define ngtcp2_objalloc_decl(NAME, TYPE, OPLENTFIELD) \ inline static void ngtcp2_objalloc_##NAME##_init( \ - ngtcp2_objalloc *objalloc, size_t nmemb, const ngtcp2_mem *mem) { \ + ngtcp2_objalloc *objalloc, size_t nmemb, const ngtcp2_mem *mem) { \ ngtcp2_objalloc_init( \ - objalloc, ((sizeof(TYPE) + 0xfu) & ~(uintptr_t)0xfu) * nmemb, mem); \ + objalloc, ((sizeof(TYPE) + 0xfu) & ~(uintptr_t)0xfu) * nmemb, mem); \ } \ \ TYPE *ngtcp2_objalloc_##NAME##_get(ngtcp2_objalloc *objalloc); \ @@ -78,7 +78,7 @@ void ngtcp2_objalloc_clear(ngtcp2_objalloc *objalloc); size_t len); \ \ inline static void ngtcp2_objalloc_##NAME##_release( \ - ngtcp2_objalloc *objalloc, TYPE *obj) { \ + ngtcp2_objalloc *objalloc, TYPE *obj) { \ ngtcp2_opl_push(&objalloc->opl, &obj->OPLENTFIELD); \ } @@ -90,7 +90,7 @@ void ngtcp2_objalloc_clear(ngtcp2_objalloc *objalloc); \ if (!oplent) { \ rv = \ - ngtcp2_balloc_get(&objalloc->balloc, (void **)&obj, sizeof(TYPE)); \ + ngtcp2_balloc_get(&objalloc->balloc, (void **)&obj, sizeof(TYPE)); \ if (rv != 0) { \ return NULL; \ } \ @@ -118,30 +118,30 @@ void ngtcp2_objalloc_clear(ngtcp2_objalloc *objalloc); \ return ngtcp2_struct_of(oplent, TYPE, OPLENTFIELD); \ } -#else /* NOMEMPOOL */ +#else /* defined(NOMEMPOOL) */ # define ngtcp2_objalloc_decl(NAME, TYPE, OPLENTFIELD) \ inline static void ngtcp2_objalloc_##NAME##_init( \ - ngtcp2_objalloc *objalloc, size_t nmemb, const ngtcp2_mem *mem) { \ + ngtcp2_objalloc *objalloc, size_t nmemb, const ngtcp2_mem *mem) { \ ngtcp2_objalloc_init( \ - objalloc, ((sizeof(TYPE) + 0xfu) & ~(uintptr_t)0xfu) * nmemb, mem); \ + objalloc, ((sizeof(TYPE) + 0xfu) & ~(uintptr_t)0xfu) * nmemb, mem); \ } \ \ inline static TYPE *ngtcp2_objalloc_##NAME##_get( \ - ngtcp2_objalloc *objalloc) { \ + ngtcp2_objalloc *objalloc) { \ return ngtcp2_mem_malloc(objalloc->balloc.mem, sizeof(TYPE)); \ } \ \ inline static TYPE *ngtcp2_objalloc_##NAME##_len_get( \ - ngtcp2_objalloc *objalloc, size_t len) { \ + ngtcp2_objalloc *objalloc, size_t len) { \ return ngtcp2_mem_malloc(objalloc->balloc.mem, len); \ } \ \ inline static void ngtcp2_objalloc_##NAME##_release( \ - ngtcp2_objalloc *objalloc, TYPE *obj) { \ + ngtcp2_objalloc *objalloc, TYPE *obj) { \ ngtcp2_mem_free(objalloc->balloc.mem, obj); \ } # define ngtcp2_objalloc_def(NAME, TYPE, OPLENTFIELD) -#endif /* NOMEMPOOL */ +#endif /* defined(NOMEMPOOL) */ -#endif /* NGTCP2_OBJALLOC_H */ +#endif /* !defined(NGTCP2_OBJALLOC_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_opl.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_opl.h index 714aa366304f0d..f2df3f6dccd45e 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_opl.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_opl.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -62,4 +62,4 @@ ngtcp2_opl_entry *ngtcp2_opl_pop(ngtcp2_opl *opl); void ngtcp2_opl_clear(ngtcp2_opl *opl); -#endif /* NGTCP2_OPL_H */ +#endif /* !defined(NGTCP2_OPL_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_path.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_path.h index 0c360e936231c8..a708378db32fbb 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_path.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_path.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -46,4 +46,4 @@ void ngtcp2_path_init(ngtcp2_path *path, const ngtcp2_addr *local, void ngtcp2_path_storage_init2(ngtcp2_path_storage *ps, const ngtcp2_path *path); -#endif /* NGTCP2_PATH_H */ +#endif /* !defined(NGTCP2_PATH_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.c index 1687ff254d94c7..5c82e1bd503f98 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.c @@ -83,16 +83,19 @@ int ngtcp2_pkt_decode_version_cid(ngtcp2_version_cid *dest, const uint8_t *data, dcidlen = data[5]; len += dcidlen; + if (datalen < len) { return NGTCP2_ERR_INVALID_ARGUMENT; } + scidlen = data[5 + 1 + dcidlen]; len += scidlen; + if (datalen < len) { return NGTCP2_ERR_INVALID_ARGUMENT; } - ngtcp2_get_uint32(&version, &data[1]); + ngtcp2_get_uint32be(&version, &data[1]); supported_version = ngtcp2_is_supported_version(version); @@ -120,6 +123,7 @@ int ngtcp2_pkt_decode_version_cid(ngtcp2_version_cid *dest, const uint8_t *data, if (!supported_version) { return NGTCP2_ERR_VERSION_NEGOTIATION; } + return 0; } @@ -145,16 +149,19 @@ void ngtcp2_pkt_hd_init(ngtcp2_pkt_hd *hd, uint8_t flags, uint8_t type, size_t len) { hd->flags = flags; hd->type = type; + if (dcid) { hd->dcid = *dcid; } else { ngtcp2_cid_zero(&hd->dcid); } + if (scid) { hd->scid = *scid; } else { ngtcp2_cid_zero(&hd->scid); } + hd->pkt_num = pkt_num; hd->token = NULL; hd->tokenlen = 0; @@ -170,10 +177,11 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, size_t dcil, scil; const uint8_t *p; size_t len = 0; - size_t n; size_t ntokenlen = 0; const uint8_t *token = NULL; size_t tokenlen = 0; + size_t nlonglen = 0; + size_t longlen = 0; uint64_t vi; uint8_t flags = NGTCP2_PKT_FLAG_LONG_FORM; @@ -185,7 +193,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, return NGTCP2_ERR_INVALID_ARGUMENT; } - ngtcp2_get_uint32(&version, &pkt[1]); + ngtcp2_get_uint32be(&version, &pkt[1]); if (version == 0) { type = NGTCP2_PKT_VERSION_NEGOTIATION; @@ -227,6 +235,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, p = &pkt[5]; dcil = *p; + if (dcil > NGTCP2_MAX_CIDLEN) { /* QUIC v1 implementation never expect to receive CID length more than NGTCP2_MAX_CIDLEN. */ @@ -240,9 +249,11 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, p += 1 + dcil; scil = *p; + if (scil > NGTCP2_MAX_CIDLEN) { return NGTCP2_ERR_INVALID_ARGUMENT; } + len += scil; if (pktlen < len) { @@ -264,6 +275,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, if (pktlen - len < vi) { return NGTCP2_ERR_INVALID_ARGUMENT; } + tokenlen = (size_t)vi; len += tokenlen; @@ -285,12 +297,21 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, } /* Length */ - n = ngtcp2_get_uvarintlen(p); - len += n - 1; + nlonglen = ngtcp2_get_uvarintlen(p); + len += nlonglen - 1; if (pktlen < len) { return NGTCP2_ERR_INVALID_ARGUMENT; } + + ngtcp2_get_uvarint(&vi, p); +#if SIZE_MAX > UINT32_MAX + if (vi > SIZE_MAX) { + return NGTCP2_ERR_INVALID_ARGUMENT; + } +#endif /* SIZE_MAX > UINT32_MAX */ + + longlen = (size_t)vi; } dest->flags = flags; @@ -309,24 +330,11 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, dest->tokenlen = tokenlen; p += ntokenlen + tokenlen; - switch (type) { - case NGTCP2_PKT_RETRY: - dest->len = 0; - break; - default: - if (!(flags & NGTCP2_PKT_FLAG_LONG_FORM)) { - assert(type == NGTCP2_PKT_VERSION_NEGOTIATION); - /* Version Negotiation is not a long header packet. */ - dest->len = 0; - break; - } + dest->len = longlen; - p = ngtcp2_get_uvarint(&vi, p); - if (vi > SIZE_MAX) { - return NGTCP2_ERR_INVALID_ARGUMENT; - } - dest->len = (size_t)vi; - } +#ifndef NDEBUG + p += nlonglen; +#endif /* !defined(NDEBUG) */ assert((size_t)(p - pkt) == len); @@ -401,6 +409,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_hd_long(uint8_t *out, size_t outlen, *p = (uint8_t)(NGTCP2_HEADER_FORM_BIT | (ngtcp2_pkt_versioned_type(hd->version, hd->type) << 4) | (uint8_t)(hd->pkt_numlen - 1)); + if (!(hd->flags & NGTCP2_PKT_FLAG_FIXED_BIT_CLEAR)) { *p |= NGTCP2_FIXED_BIT_MASK; } @@ -409,16 +418,20 @@ ngtcp2_ssize ngtcp2_pkt_encode_hd_long(uint8_t *out, size_t outlen, p = ngtcp2_put_uint32be(p, hd->version); *p++ = (uint8_t)hd->dcid.datalen; + if (hd->dcid.datalen) { p = ngtcp2_cpymem(p, hd->dcid.data, hd->dcid.datalen); } + *p++ = (uint8_t)hd->scid.datalen; + if (hd->scid.datalen) { p = ngtcp2_cpymem(p, hd->scid.data, hd->scid.datalen); } if (hd->type == NGTCP2_PKT_INITIAL) { p = ngtcp2_put_uvarint(p, hd->tokenlen); + if (hd->tokenlen) { p = ngtcp2_cpymem(p, hd->token, hd->tokenlen); } @@ -446,9 +459,11 @@ ngtcp2_ssize ngtcp2_pkt_encode_hd_short(uint8_t *out, size_t outlen, p = out; *p = (uint8_t)(hd->pkt_numlen - 1); + if (!(hd->flags & NGTCP2_PKT_FLAG_FIXED_BIT_CLEAR)) { *p |= NGTCP2_FIXED_BIT_MASK; } + if (hd->flags & NGTCP2_PKT_FLAG_KEY_PHASE) { *p |= NGTCP2_SHORT_KEY_PHASE_BIT; } @@ -503,7 +518,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_frame(ngtcp2_frame *dest, const uint8_t *payload, payloadlen); case NGTCP2_FRAME_STREAM_DATA_BLOCKED: return ngtcp2_pkt_decode_stream_data_blocked_frame( - &dest->stream_data_blocked, payload, payloadlen); + &dest->stream_data_blocked, payload, payloadlen); case NGTCP2_FRAME_STREAMS_BLOCKED_BIDI: case NGTCP2_FRAME_STREAMS_BLOCKED_UNI: return ngtcp2_pkt_decode_streams_blocked_frame(&dest->streams_blocked, @@ -530,7 +545,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_frame(ngtcp2_frame *dest, const uint8_t *payload, payloadlen); case NGTCP2_FRAME_RETIRE_CONNECTION_ID: return ngtcp2_pkt_decode_retire_connection_id_frame( - &dest->retire_connection_id, payload, payloadlen); + &dest->retire_connection_id, payload, payloadlen); case NGTCP2_FRAME_HANDSHAKE_DONE: return ngtcp2_pkt_decode_handshake_done_frame(&dest->handshake_done, payload, payloadlen); @@ -613,6 +628,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_stream_frame(ngtcp2_stream *dest, if (payloadlen - len < vi) { return NGTCP2_ERR_FRAME_ENCODING; } + datalen = (size_t)vi; len += datalen; } else { @@ -752,7 +768,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_ack_frame(ngtcp2_ack *dest, } /* TODO We might not decode all ranges. It could be very large. */ - max_rangecnt = ngtcp2_min(NGTCP2_MAX_ACK_RANGES, rangecnt); + max_rangecnt = ngtcp2_min_size(NGTCP2_MAX_ACK_RANGES, rangecnt); p = payload + 1; @@ -770,7 +786,8 @@ ngtcp2_ssize ngtcp2_pkt_decode_ack_frame(ngtcp2_ack *dest, p = ngtcp2_get_uvarint(&range->gap, p); p = ngtcp2_get_uvarint(&range->len, p); } - for (i = max_rangecnt; i < rangecnt; ++i) { + + for (; i < rangecnt; ++i) { p += ngtcp2_get_uvarintlen(p); p += ngtcp2_get_uvarintlen(p); } @@ -820,18 +837,23 @@ ngtcp2_ssize ngtcp2_pkt_decode_reset_stream_frame(ngtcp2_reset_stream *dest, n = ngtcp2_get_uvarintlen(p); len += n - 1; + if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } + p += n; n = ngtcp2_get_uvarintlen(p); len += n - 1; + if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } + p += n; n = ngtcp2_get_uvarintlen(p); len += n - 1; + if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } @@ -849,7 +871,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_reset_stream_frame(ngtcp2_reset_stream *dest, } ngtcp2_ssize ngtcp2_pkt_decode_connection_close_frame( - ngtcp2_connection_close *dest, const uint8_t *payload, size_t payloadlen) { + ngtcp2_connection_close *dest, const uint8_t *payload, size_t payloadlen) { size_t len = 1 + 1 + 1; const uint8_t *p; size_t reasonlen; @@ -868,6 +890,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_connection_close_frame( n = ngtcp2_get_uvarintlen(p); len += n - 1; + if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } @@ -879,6 +902,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_connection_close_frame( n = ngtcp2_get_uvarintlen(p); len += n - 1; + if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } @@ -888,6 +912,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_connection_close_frame( nreasonlen = ngtcp2_get_uvarintlen(p); len += nreasonlen - 1; + if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } @@ -896,6 +921,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_connection_close_frame( if (payloadlen - len < vi) { return NGTCP2_ERR_FRAME_ENCODING; } + reasonlen = (size_t)vi; len += reasonlen; @@ -903,13 +929,16 @@ ngtcp2_ssize ngtcp2_pkt_decode_connection_close_frame( dest->type = type; p = ngtcp2_get_uvarint(&dest->error_code, p); + if (type == NGTCP2_FRAME_CONNECTION_CLOSE) { p = ngtcp2_get_uvarint(&dest->frame_type, p); } else { dest->frame_type = 0; } + dest->reasonlen = reasonlen; p += nreasonlen; + if (reasonlen == 0) { dest->reason = NULL; } else { @@ -951,7 +980,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_max_data_frame(ngtcp2_max_data *dest, } ngtcp2_ssize ngtcp2_pkt_decode_max_stream_data_frame( - ngtcp2_max_stream_data *dest, const uint8_t *payload, size_t payloadlen) { + ngtcp2_max_stream_data *dest, const uint8_t *payload, size_t payloadlen) { size_t len = 1 + 1 + 1; const uint8_t *p; size_t n; @@ -1055,10 +1084,8 @@ ngtcp2_ssize ngtcp2_pkt_decode_data_blocked_frame(ngtcp2_data_blocked *dest, return (ngtcp2_ssize)len; } -ngtcp2_ssize -ngtcp2_pkt_decode_stream_data_blocked_frame(ngtcp2_stream_data_blocked *dest, - const uint8_t *payload, - size_t payloadlen) { +ngtcp2_ssize ngtcp2_pkt_decode_stream_data_blocked_frame( + ngtcp2_stream_data_blocked *dest, const uint8_t *payload, size_t payloadlen) { size_t len = 1 + 1 + 1; const uint8_t *p; size_t n; @@ -1097,7 +1124,7 @@ ngtcp2_pkt_decode_stream_data_blocked_frame(ngtcp2_stream_data_blocked *dest, } ngtcp2_ssize ngtcp2_pkt_decode_streams_blocked_frame( - ngtcp2_streams_blocked *dest, const uint8_t *payload, size_t payloadlen) { + ngtcp2_streams_blocked *dest, const uint8_t *payload, size_t payloadlen) { size_t len = 1 + 1; const uint8_t *p; size_t n; @@ -1124,7 +1151,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_streams_blocked_frame( } ngtcp2_ssize ngtcp2_pkt_decode_new_connection_id_frame( - ngtcp2_new_connection_id *dest, const uint8_t *payload, size_t payloadlen) { + ngtcp2_new_connection_id *dest, const uint8_t *payload, size_t payloadlen) { size_t len = 1 + 1 + 1 + 1 + 16; const uint8_t *p; size_t n; @@ -1138,6 +1165,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_new_connection_id_frame( n = ngtcp2_get_uvarintlen(p); len += n - 1; + if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } @@ -1146,6 +1174,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_new_connection_id_frame( n = ngtcp2_get_uvarintlen(p); len += n - 1; + if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } @@ -1197,6 +1226,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_stop_sending_frame(ngtcp2_stop_sending *dest, if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } + p += n; n = ngtcp2_get_uvarintlen(p); len += n - 1; @@ -1307,6 +1337,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_crypto_frame(ngtcp2_stream *dest, p = ngtcp2_get_uvarint(&dest->offset, p); dest->data[0].len = datalen; p += ndatalen; + if (dest->data[0].len) { dest->data[0].base = (uint8_t *)p; p += dest->data[0].len; @@ -1347,6 +1378,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_new_token_frame(ngtcp2_new_token *dest, if (payloadlen - len < vi) { return NGTCP2_ERR_FRAME_ENCODING; } + datalen = (size_t)vi; len += datalen; @@ -1442,6 +1474,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_datagram_frame(ngtcp2_datagram *dest, datalen = (size_t)vi; len += datalen; + break; default: ngtcp2_unreachable(); @@ -1496,7 +1529,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_frame(uint8_t *out, size_t outlen, return ngtcp2_pkt_encode_data_blocked_frame(out, outlen, &fr->data_blocked); case NGTCP2_FRAME_STREAM_DATA_BLOCKED: return ngtcp2_pkt_encode_stream_data_blocked_frame( - out, outlen, &fr->stream_data_blocked); + out, outlen, &fr->stream_data_blocked); case NGTCP2_FRAME_STREAMS_BLOCKED_BIDI: case NGTCP2_FRAME_STREAMS_BLOCKED_UNI: return ngtcp2_pkt_encode_streams_blocked_frame(out, outlen, @@ -1518,7 +1551,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_frame(uint8_t *out, size_t outlen, return ngtcp2_pkt_encode_new_token_frame(out, outlen, &fr->new_token); case NGTCP2_FRAME_RETIRE_CONNECTION_ID: return ngtcp2_pkt_encode_retire_connection_id_frame( - out, outlen, &fr->retire_connection_id); + out, outlen, &fr->retire_connection_id); case NGTCP2_FRAME_HANDSHAKE_DONE: return ngtcp2_pkt_encode_handshake_done_frame(out, outlen, &fr->handshake_done); @@ -1586,7 +1619,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_stream_frame(uint8_t *out, size_t outlen, } ngtcp2_ssize ngtcp2_pkt_encode_ack_frame(uint8_t *out, size_t outlen, - ngtcp2_ack *fr) { + const ngtcp2_ack *fr) { size_t len = 1 + ngtcp2_put_uvarintlen((uint64_t)fr->largest_ack) + ngtcp2_put_uvarintlen(fr->ack_delay) + ngtcp2_put_uvarintlen(fr->rangecnt) + @@ -1676,8 +1709,8 @@ ngtcp2_pkt_encode_connection_close_frame(uint8_t *out, size_t outlen, const ngtcp2_connection_close *fr) { size_t len = 1 + ngtcp2_put_uvarintlen(fr->error_code) + (fr->type == NGTCP2_FRAME_CONNECTION_CLOSE - ? ngtcp2_put_uvarintlen(fr->frame_type) - : 0) + + ? ngtcp2_put_uvarintlen(fr->frame_type) + : 0) + ngtcp2_put_uvarintlen(fr->reasonlen) + fr->reasonlen; uint8_t *p; @@ -1689,10 +1722,13 @@ ngtcp2_pkt_encode_connection_close_frame(uint8_t *out, size_t outlen, *p++ = (uint8_t)fr->type; p = ngtcp2_put_uvarint(p, fr->error_code); + if (fr->type == NGTCP2_FRAME_CONNECTION_CLOSE) { p = ngtcp2_put_uvarint(p, fr->frame_type); } + p = ngtcp2_put_uvarint(p, fr->reasonlen); + if (fr->reasonlen) { p = ngtcp2_cpymem(p, fr->reason, fr->reasonlen); } @@ -1796,7 +1832,7 @@ ngtcp2_pkt_encode_data_blocked_frame(uint8_t *out, size_t outlen, } ngtcp2_ssize ngtcp2_pkt_encode_stream_data_blocked_frame( - uint8_t *out, size_t outlen, const ngtcp2_stream_data_blocked *fr) { + uint8_t *out, size_t outlen, const ngtcp2_stream_data_blocked *fr) { size_t len = 1 + ngtcp2_put_uvarintlen((uint64_t)fr->stream_id) + ngtcp2_put_uvarintlen(fr->offset); uint8_t *p; @@ -1986,7 +2022,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_new_token_frame(uint8_t *out, size_t outlen, } ngtcp2_ssize ngtcp2_pkt_encode_retire_connection_id_frame( - uint8_t *out, size_t outlen, const ngtcp2_retire_connection_id *fr) { + uint8_t *out, size_t outlen, const ngtcp2_retire_connection_id *fr) { size_t len = 1 + ngtcp2_put_uvarintlen(fr->seq); uint8_t *p; @@ -2023,9 +2059,9 @@ ngtcp2_ssize ngtcp2_pkt_encode_datagram_frame(uint8_t *out, size_t outlen, const ngtcp2_datagram *fr) { uint64_t datalen = ngtcp2_vec_len(fr->data, fr->datacnt); uint64_t len = - 1 + - (fr->type == NGTCP2_FRAME_DATAGRAM ? 0 : ngtcp2_put_uvarintlen(datalen)) + - datalen; + 1 + + (fr->type == NGTCP2_FRAME_DATAGRAM ? 0 : ngtcp2_put_uvarintlen(datalen)) + + datalen; uint8_t *p; size_t i; @@ -2039,6 +2075,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_datagram_frame(uint8_t *out, size_t outlen, p = out; *p++ = (uint8_t)fr->type; + if (fr->type == NGTCP2_FRAME_DATAGRAM_LEN) { p = ngtcp2_put_uvarint(p, datalen); } @@ -2055,9 +2092,9 @@ ngtcp2_ssize ngtcp2_pkt_encode_datagram_frame(uint8_t *out, size_t outlen, } ngtcp2_ssize ngtcp2_pkt_write_version_negotiation( - uint8_t *dest, size_t destlen, uint8_t unused_random, const uint8_t *dcid, - size_t dcidlen, const uint8_t *scid, size_t scidlen, const uint32_t *sv, - size_t nsv) { + uint8_t *dest, size_t destlen, uint8_t unused_random, const uint8_t *dcid, + size_t dcidlen, const uint8_t *scid, size_t scidlen, const uint32_t *sv, + size_t nsv) { size_t len = 1 + 4 + 1 + dcidlen + 1 + scidlen + nsv * 4; uint8_t *p; size_t i; @@ -2074,10 +2111,13 @@ ngtcp2_ssize ngtcp2_pkt_write_version_negotiation( *p++ = 0xc0 | unused_random; p = ngtcp2_put_uint32be(p, 0); *p++ = (uint8_t)dcidlen; + if (dcidlen) { p = ngtcp2_cpymem(p, dcid, dcidlen); } + *p++ = (uint8_t)scidlen; + if (scidlen) { p = ngtcp2_cpymem(p, scid, scidlen); } @@ -2099,7 +2139,7 @@ size_t ngtcp2_pkt_decode_version_negotiation(uint32_t *dest, assert((payloadlen % sizeof(uint32_t)) == 0); for (; payload != end;) { - payload = ngtcp2_get_uint32(dest++, payload); + payload = ngtcp2_get_uint32be(dest++, payload); } return payloadlen / sizeof(uint32_t); @@ -2150,13 +2190,15 @@ int64_t ngtcp2_pkt_adjust_pkt_num(int64_t max_pkt_num, int64_t pkt_num, assert(cand <= (int64_t)NGTCP2_MAX_VARINT - win); return cand + win; } + if (cand > expected + hwin && cand >= win) { return cand - win; } + return cand; } -int ngtcp2_pkt_validate_ack(ngtcp2_ack *fr, int64_t min_pkt_num) { +int ngtcp2_pkt_validate_ack(const ngtcp2_ack *fr, int64_t min_pkt_num) { int64_t largest_ack = fr->largest_ack; size_t i; @@ -2208,7 +2250,7 @@ ngtcp2_pkt_write_stateless_reset(uint8_t *dest, size_t destlen, p = dest; - randlen = ngtcp2_min(destlen - NGTCP2_STATELESS_RESET_TOKENLEN, randlen); + randlen = ngtcp2_min_size(destlen - NGTCP2_STATELESS_RESET_TOKENLEN, randlen); p = ngtcp2_cpymem(p, rand, randlen); p = ngtcp2_cpymem(p, stateless_reset_token, NGTCP2_STATELESS_RESET_TOKENLEN); @@ -2218,10 +2260,10 @@ ngtcp2_pkt_write_stateless_reset(uint8_t *dest, size_t destlen, } ngtcp2_ssize ngtcp2_pkt_write_retry( - uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, - const ngtcp2_cid *scid, const ngtcp2_cid *odcid, const uint8_t *token, - size_t tokenlen, ngtcp2_encrypt encrypt, const ngtcp2_crypto_aead *aead, - const ngtcp2_crypto_aead_ctx *aead_ctx) { + uint8_t *dest, size_t destlen, uint32_t version, const ngtcp2_cid *dcid, + const ngtcp2_cid *scid, const ngtcp2_cid *odcid, const uint8_t *token, + size_t tokenlen, ngtcp2_encrypt encrypt, const ngtcp2_crypto_aead *aead, + const ngtcp2_crypto_aead_ctx *aead_ctx) { ngtcp2_pkt_hd hd; uint8_t pseudo_retry[1500]; ngtcp2_ssize pseudo_retrylen; @@ -2247,8 +2289,8 @@ ngtcp2_ssize ngtcp2_pkt_write_retry( /* len = */ 0); pseudo_retrylen = - ngtcp2_pkt_encode_pseudo_retry(pseudo_retry, sizeof(pseudo_retry), &hd, - /* unused = */ 0, odcid, token, tokenlen); + ngtcp2_pkt_encode_pseudo_retry(pseudo_retry, sizeof(pseudo_retry), &hd, + /* unused = */ 0, odcid, token, tokenlen); if (pseudo_retrylen < 0) { return pseudo_retrylen; } @@ -2285,8 +2327,8 @@ ngtcp2_ssize ngtcp2_pkt_write_retry( } ngtcp2_ssize ngtcp2_pkt_encode_pseudo_retry( - uint8_t *dest, size_t destlen, const ngtcp2_pkt_hd *hd, uint8_t unused, - const ngtcp2_cid *odcid, const uint8_t *token, size_t tokenlen) { + uint8_t *dest, size_t destlen, const ngtcp2_pkt_hd *hd, uint8_t unused, + const ngtcp2_cid *odcid, const uint8_t *token, size_t tokenlen) { uint8_t *p = dest; ngtcp2_ssize nwrite; @@ -2382,23 +2424,23 @@ size_t ngtcp2_pkt_stream_max_datalen(int64_t stream_id, uint64_t offset, if (left > 8 + 1073741823 && len > 1073741823) { #if SIZE_MAX > UINT32_MAX - len = ngtcp2_min(len, 4611686018427387903lu); + len = ngtcp2_min_uint64(len, 4611686018427387903lu); #endif /* SIZE_MAX > UINT32_MAX */ - return (size_t)ngtcp2_min(len, (uint64_t)(left - 8)); + return (size_t)ngtcp2_min_uint64(len, (uint64_t)(left - 8)); } if (left > 4 + 16383 && len > 16383) { - len = ngtcp2_min(len, 1073741823); - return (size_t)ngtcp2_min(len, (uint64_t)(left - 4)); + len = ngtcp2_min_uint64(len, 1073741823); + return (size_t)ngtcp2_min_uint64(len, (uint64_t)(left - 4)); } if (left > 2 + 63 && len > 63) { - len = ngtcp2_min(len, 16383); - return (size_t)ngtcp2_min(len, (uint64_t)(left - 2)); + len = ngtcp2_min_uint64(len, 16383); + return (size_t)ngtcp2_min_uint64(len, (uint64_t)(left - 2)); } - len = ngtcp2_min(len, 63); - return (size_t)ngtcp2_min(len, (uint64_t)(left - 1)); + len = ngtcp2_min_uint64(len, 63); + return (size_t)ngtcp2_min_uint64(len, (uint64_t)(left - 1)); } size_t ngtcp2_pkt_crypto_max_datalen(uint64_t offset, size_t len, size_t left) { @@ -2414,23 +2456,23 @@ size_t ngtcp2_pkt_crypto_max_datalen(uint64_t offset, size_t len, size_t left) { if (left > 8 + 1073741823 && len > 1073741823) { #if SIZE_MAX > UINT32_MAX - len = ngtcp2_min(len, 4611686018427387903lu); + len = ngtcp2_min_size(len, 4611686018427387903lu); #endif /* SIZE_MAX > UINT32_MAX */ - return ngtcp2_min(len, left - 8); + return ngtcp2_min_size(len, left - 8); } if (left > 4 + 16383 && len > 16383) { - len = ngtcp2_min(len, 1073741823); - return ngtcp2_min(len, left - 4); + len = ngtcp2_min_size(len, 1073741823); + return ngtcp2_min_size(len, left - 4); } if (left > 2 + 63 && len > 63) { - len = ngtcp2_min(len, 16383); - return ngtcp2_min(len, left - 2); + len = ngtcp2_min_size(len, 16383); + return ngtcp2_min_size(len, left - 2); } - len = ngtcp2_min(len, 63); - return ngtcp2_min(len, left - 1); + len = ngtcp2_min_size(len, 63); + return ngtcp2_min_size(len, left - 1); } size_t ngtcp2_pkt_datagram_framelen(size_t len) { @@ -2528,5 +2570,6 @@ int ngtcp2_pkt_verify_reserved_bits(uint8_t c) { if (c & NGTCP2_HEADER_FORM_BIT) { return (c & NGTCP2_LONG_RESERVED_BIT_MASK) == 0 ? 0 : NGTCP2_ERR_PROTO; } + return (c & NGTCP2_SHORT_RESERVED_BIT_MASK) == 0 ? 0 : NGTCP2_ERR_PROTO; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.h index feec4d32c97bdd..86ebecef7bc870 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -41,7 +41,6 @@ #define NGTCP2_LONG_RESERVED_BIT_MASK 0x0c /* Short header specific macros */ -#define NGTCP2_SHORT_SPIN_BIT_MASK 0x20 #define NGTCP2_SHORT_RESERVED_BIT_MASK 0x18 #define NGTCP2_SHORT_KEY_PHASE_BIT 0x04 @@ -53,6 +52,7 @@ LENGTH<1> + PKN<1> */ #define NGTCP2_MIN_LONG_HEADERLEN (1 + 4 + 1 + 1 + 1 + 1) +/* STREAM frame specific macros */ #define NGTCP2_STREAM_FIN_BIT 0x01 #define NGTCP2_STREAM_LEN_BIT 0x02 #define NGTCP2_STREAM_OFF_BIT 0x04 @@ -72,9 +72,6 @@ the beginning of the payload. */ #define NGTCP2_DATAGRAM_OVERHEAD (1 + 8) -/* NGTCP2_MIN_FRAME_PAYLOADLEN is the minimum frame payload length. */ -#define NGTCP2_MIN_FRAME_PAYLOADLEN 16 - /* NGTCP2_MAX_SERVER_STREAM_ID_BIDI is the maximum bidirectional server stream ID. */ #define NGTCP2_MAX_SERVER_STREAM_ID_BIDI ((int64_t)0x3ffffffffffffffdll) @@ -95,16 +92,15 @@ /* NGTCP2_MAX_PKT_NUM is the maximum packet number. */ #define NGTCP2_MAX_PKT_NUM ((int64_t)((1ll << 62) - 1)) -/* NGTCP2_MIN_PKT_EXPANDLEN is the minimum packet size expansion in - addition to the minimum DCID length to hide/trigger Stateless - Reset. */ +/* NGTCP2_MIN_PKT_EXPANDLEN is the minimum packet size expansion to + hide/trigger Stateless Reset. */ #define NGTCP2_MIN_PKT_EXPANDLEN 22 /* NGTCP2_RETRY_TAGLEN is the length of Retry packet integrity tag. */ #define NGTCP2_RETRY_TAGLEN 16 -/* NGTCP2_HARD_MAX_UDP_PAYLOAD_SIZE is the maximum UDP payload size - that this library can write. */ +/* NGTCP2_HARD_MAX_UDP_PAYLOAD_SIZE is the maximum UDP datagram + payload size that this library can write. */ #define NGTCP2_HARD_MAX_UDP_PAYLOAD_SIZE ((1 << 24) - 1) /* NGTCP2_PKT_LENGTHLEN is the number of bytes that is occupied by @@ -357,6 +353,11 @@ typedef union ngtcp2_frame { ngtcp2_retire_connection_id retire_connection_id; ngtcp2_handshake_done handshake_done; ngtcp2_datagram datagram; + /* Extend ngtcp2_frame so that ngtcp2_stream has at least additional + 3 ngtcp2_vec, totaling 4 slots, which can store HEADERS header, + HEADERS payload, DATA header, and DATA payload in the standard + sized ngtcp2_frame_chain. */ + uint8_t pad[sizeof(ngtcp2_stream) + sizeof(ngtcp2_vec) * 3]; } ngtcp2_frame; typedef struct ngtcp2_pkt_chain ngtcp2_pkt_chain; @@ -371,7 +372,7 @@ struct ngtcp2_pkt_chain { uint8_t *pkt; /* pktlen is length of a QUIC packet. */ size_t pktlen; - /* dgramlen is length of UDP datagram that a QUIC packet is + /* dgramlen is length of UDP datagram payload that a QUIC packet is included. */ size_t dgramlen; ngtcp2_tstamp ts; @@ -403,11 +404,11 @@ void ngtcp2_pkt_chain_del(ngtcp2_pkt_chain *pc, const ngtcp2_mem *mem); /* * ngtcp2_pkt_hd_init initializes |hd| with the given values. If - * |dcid| and/or |scid| is NULL, DCID and SCID of |hd| is empty - * respectively. |pkt_numlen| is the number of bytes used to encode - * |pkt_num| and either 1, 2, or 4. |version| is QUIC version for - * long header. |len| is the length field of Initial, 0RTT, and - * Handshake packets. + * |dcid| and/or |scid| is NULL, Destination Connection ID and/or + * Source Connection ID of |hd| is empty respectively. |pkt_numlen| + * is the number of bytes used to encode |pkt_num| and either 1, 2, or + * 4. |version| is QUIC version for long header. |len| is the length + * field of Initial, 0RTT, and Handshake packets. */ void ngtcp2_pkt_hd_init(ngtcp2_pkt_hd *hd, uint8_t flags, uint8_t type, const ngtcp2_cid *dcid, const ngtcp2_cid *scid, @@ -417,8 +418,8 @@ void ngtcp2_pkt_hd_init(ngtcp2_pkt_hd *hd, uint8_t flags, uint8_t type, /* * ngtcp2_pkt_encode_hd_long encodes |hd| as QUIC long header into * |out| which has length |outlen|. It returns the number of bytes - * written into |outlen| if it succeeds, or one of the following - * negative error codes: + * written into |out| if it succeeds, or one of the following negative + * error codes: * * NGTCP2_ERR_NOBUF * Buffer is too short @@ -429,8 +430,8 @@ ngtcp2_ssize ngtcp2_pkt_encode_hd_long(uint8_t *out, size_t outlen, /* * ngtcp2_pkt_encode_hd_short encodes |hd| as QUIC short header into * |out| which has length |outlen|. It returns the number of bytes - * written into |outlen| if it succeeds, or one of the following - * negative error codes: + * written into |out| if it succeeds, or one of the following negative + * error codes: * * NGTCP2_ERR_NOBUF * Buffer is too short @@ -457,7 +458,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_frame(ngtcp2_frame *dest, const uint8_t *payload, /** * @function * - * `ngtcp2_pkt_encode_frame` encodes a frame |fm| into the buffer + * `ngtcp2_pkt_encode_frame` encodes a frame |fr| into the buffer * pointed by |out| of length |outlen|. * * This function returns the number of bytes written to the buffer, or @@ -543,7 +544,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_ack_frame(ngtcp2_ack *dest, /* * ngtcp2_pkt_decode_padding_frame decodes contiguous PADDING frames * from |payload| of length |payloadlen|. It continues to parse - * frames as long as the frame type is PADDING. This finishes when it + * frames as long as the frame type is PADDING. It finishes when it * encounters the frame type which is not PADDING, or all input data * is read. The first byte (payload[0]) must be NGTCP2_FRAME_PADDING. * This function returns the exact number of bytes read to decode @@ -573,7 +574,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_reset_stream_frame(ngtcp2_reset_stream *dest, * ngtcp2_pkt_decode_connection_close_frame decodes CONNECTION_CLOSE * frame from |payload| of length |payloadlen|. The result is stored * in the object pointed by |dest|. CONNECTION_CLOSE frame must start - * at payload[0]. This function finishes it decodes one + * at payload[0]. This function finishes when it decodes one * CONNECTION_CLOSE frame, and returns the exact number of bytes read * to decode a frame if it succeeds, or one of the following negative * error codes: @@ -582,7 +583,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_reset_stream_frame(ngtcp2_reset_stream *dest, * Payload is too short to include CONNECTION_CLOSE frame. */ ngtcp2_ssize ngtcp2_pkt_decode_connection_close_frame( - ngtcp2_connection_close *dest, const uint8_t *payload, size_t payloadlen); + ngtcp2_connection_close *dest, const uint8_t *payload, size_t payloadlen); /* * ngtcp2_pkt_decode_max_data_frame decodes MAX_DATA frame from @@ -612,7 +613,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_max_data_frame(ngtcp2_max_data *dest, * Payload is too short to include MAX_STREAM_DATA frame. */ ngtcp2_ssize ngtcp2_pkt_decode_max_stream_data_frame( - ngtcp2_max_stream_data *dest, const uint8_t *payload, size_t payloadlen); + ngtcp2_max_stream_data *dest, const uint8_t *payload, size_t payloadlen); /* * ngtcp2_pkt_decode_max_streams_frame decodes MAX_STREAMS frame from @@ -668,10 +669,8 @@ ngtcp2_ssize ngtcp2_pkt_decode_data_blocked_frame(ngtcp2_data_blocked *dest, * NGTCP2_ERR_FRAME_ENCODING * Payload is too short to include STREAM_DATA_BLOCKED frame. */ -ngtcp2_ssize -ngtcp2_pkt_decode_stream_data_blocked_frame(ngtcp2_stream_data_blocked *dest, - const uint8_t *payload, - size_t payloadlen); +ngtcp2_ssize ngtcp2_pkt_decode_stream_data_blocked_frame( + ngtcp2_stream_data_blocked *dest, const uint8_t *payload, size_t payloadlen); /* * ngtcp2_pkt_decode_streams_blocked_frame decodes STREAMS_BLOCKED @@ -686,7 +685,7 @@ ngtcp2_pkt_decode_stream_data_blocked_frame(ngtcp2_stream_data_blocked *dest, * Payload is too short to include STREAMS_BLOCKED frame. */ ngtcp2_ssize ngtcp2_pkt_decode_streams_blocked_frame( - ngtcp2_streams_blocked *dest, const uint8_t *payload, size_t payloadlen); + ngtcp2_streams_blocked *dest, const uint8_t *payload, size_t payloadlen); /* * ngtcp2_pkt_decode_new_connection_id_frame decodes NEW_CONNECTION_ID @@ -699,11 +698,11 @@ ngtcp2_ssize ngtcp2_pkt_decode_streams_blocked_frame( * * NGTCP2_ERR_FRAME_ENCODING * Payload is too short to include NEW_CONNECTION_ID frame; or the - * length of CID is strictly less than NGTCP2_MIN_CIDLEN or - * greater than NGTCP2_MAX_CIDLEN. + * length of Connection ID is strictly less than NGTCP2_MIN_CIDLEN + * or greater than NGTCP2_MAX_CIDLEN. */ ngtcp2_ssize ngtcp2_pkt_decode_new_connection_id_frame( - ngtcp2_new_connection_id *dest, const uint8_t *payload, size_t payloadlen); + ngtcp2_new_connection_id *dest, const uint8_t *payload, size_t payloadlen); /* * ngtcp2_pkt_decode_stop_sending_frame decodes STOP_SENDING frame @@ -784,20 +783,19 @@ ngtcp2_ssize ngtcp2_pkt_decode_new_token_frame(ngtcp2_new_token *dest, size_t payloadlen); /* - * ngtcp2_pkt_decode_retire_connection_id_frame decodes RETIRE_CONNECTION_ID - * frame from |payload| of length |payloadlen|. The result is stored in the - * object pointed by |dest|. RETIRE_CONNECTION_ID frame must start at - * payload[0]. This function finishes when it decodes one RETIRE_CONNECTION_ID - * frame, and returns the exact number of bytes read to decode a frame - * if it succeeds, or one of the following negative error codes: + * ngtcp2_pkt_decode_retire_connection_id_frame decodes + * RETIRE_CONNECTION_ID frame from |payload| of length |payloadlen|. + * The result is stored in the object pointed by |dest|. + * RETIRE_CONNECTION_ID frame must start at payload[0]. This function + * finishes when it decodes one RETIRE_CONNECTION_ID frame, and + * returns the exact number of bytes read to decode a frame if it + * succeeds, or one of the following negative error codes: * * NGTCP2_ERR_FRAME_ENCODING * Payload is too short to include RETIRE_CONNECTION_ID frame. */ -ngtcp2_ssize -ngtcp2_pkt_decode_retire_connection_id_frame(ngtcp2_retire_connection_id *dest, - const uint8_t *payload, - size_t payloadlen); +ngtcp2_ssize ngtcp2_pkt_decode_retire_connection_id_frame( + ngtcp2_retire_connection_id *dest, const uint8_t *payload, size_t payloadlen); /* * ngtcp2_pkt_decode_handshake_done_frame decodes HANDSHAKE_DONE frame @@ -846,9 +844,6 @@ ngtcp2_ssize ngtcp2_pkt_encode_stream_frame(uint8_t *out, size_t outlen, * ngtcp2_pkt_encode_ack_frame encodes ACK frame |fr| into the buffer * pointed by |out| of length |outlen|. * - * This function assigns & - * ~NGTCP2_FRAME_ACK to fr->flags. - * * This function returns the number of bytes written if it succeeds, * or one of the following negative error codes: * @@ -856,7 +851,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_stream_frame(uint8_t *out, size_t outlen, * Buffer does not have enough capacity to write a frame. */ ngtcp2_ssize ngtcp2_pkt_encode_ack_frame(uint8_t *out, size_t outlen, - ngtcp2_ack *fr); + const ngtcp2_ack *fr); /* * ngtcp2_pkt_encode_padding_frame encodes PADDING frame |fr| into the @@ -980,7 +975,7 @@ ngtcp2_pkt_encode_data_blocked_frame(uint8_t *out, size_t outlen, * Buffer does not have enough capacity to write a frame. */ ngtcp2_ssize ngtcp2_pkt_encode_stream_data_blocked_frame( - uint8_t *out, size_t outlen, const ngtcp2_stream_data_blocked *fr); + uint8_t *out, size_t outlen, const ngtcp2_stream_data_blocked *fr); /* * ngtcp2_pkt_encode_streams_blocked_frame encodes STREAMS_BLOCKED @@ -1079,8 +1074,9 @@ ngtcp2_ssize ngtcp2_pkt_encode_new_token_frame(uint8_t *out, size_t outlen, const ngtcp2_new_token *fr); /* - * ngtcp2_pkt_encode_retire_connection_id_frame encodes RETIRE_CONNECTION_ID - * frame |fr| into the buffer pointed by |out| of length |outlen|. + * ngtcp2_pkt_encode_retire_connection_id_frame encodes + * RETIRE_CONNECTION_ID frame |fr| into the buffer pointed by |out| of + * length |outlen|. * * This function returns the number of bytes written if it succeeds, * or one of the following negative error codes: @@ -1089,7 +1085,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_new_token_frame(uint8_t *out, size_t outlen, * Buffer does not have enough capacity to write a frame. */ ngtcp2_ssize ngtcp2_pkt_encode_retire_connection_id_frame( - uint8_t *out, size_t outlen, const ngtcp2_retire_connection_id *fr); + uint8_t *out, size_t outlen, const ngtcp2_retire_connection_id *fr); /* * ngtcp2_pkt_encode_handshake_done_frame encodes HANDSHAKE_DONE frame @@ -1119,7 +1115,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_datagram_frame(uint8_t *out, size_t outlen, const ngtcp2_datagram *fr); /* - * ngtcp2_pkt_adjust_pkt_num find the full 64 bits packet number for + * ngtcp2_pkt_adjust_pkt_num finds the full 62 bits packet number for * |pkt_num|, which is encoded in |pkt_numlen| bytes. The * |max_pkt_num| is the highest successfully authenticated packet * number. @@ -1128,10 +1124,10 @@ int64_t ngtcp2_pkt_adjust_pkt_num(int64_t max_pkt_num, int64_t pkt_num, size_t pkt_numlen); /* - * ngtcp2_pkt_validate_ack checks that ack is malformed or not. - * |min_pkt_num| is the minimum packet number that an endpoint sends. - * It is an error to receive acknowledgements for a packet less than - * |min_pkt_num|. + * ngtcp2_pkt_validate_ack verifies whether |fr| is malformed or not. + * |min_pkt_num| is the minimum packet number that a local endpoint + * sends. It is an error to receive acknowledgements for a packet + * less than |min_pkt_num|. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -1141,13 +1137,13 @@ int64_t ngtcp2_pkt_adjust_pkt_num(int64_t max_pkt_num, int64_t pkt_num, * NGTCP2_ERR_PROTO * |fr| contains a packet number less than |min_pkt_num|. */ -int ngtcp2_pkt_validate_ack(ngtcp2_ack *fr, int64_t min_pkt_num); +int ngtcp2_pkt_validate_ack(const ngtcp2_ack *fr, int64_t min_pkt_num); /* * ngtcp2_pkt_stream_max_datalen returns the maximum number of bytes * which can be sent for stream denoted by |stream_id|. |offset| is - * an offset of within the stream. |len| is the estimated number of - * bytes to be sent. |left| is the size of buffer. If |left| is too + * an offset within the stream. |len| is the estimated number of + * bytes to send. |left| is the size of buffer. If |left| is too * small to write STREAM frame, this function returns (size_t)-1. */ size_t ngtcp2_pkt_stream_max_datalen(int64_t stream_id, uint64_t offset, @@ -1155,10 +1151,10 @@ size_t ngtcp2_pkt_stream_max_datalen(int64_t stream_id, uint64_t offset, /* * ngtcp2_pkt_crypto_max_datalen returns the maximum number of bytes - * which can be sent for crypto stream. |offset| is an offset of - * within the crypto stream. |len| is the estimated number of bytes - * to be sent. |left| is the size of buffer. If |left| is too small - * to write CRYPTO frame, this function returns (size_t)-1. + * which can be sent for crypto stream. |offset| is an offset within + * the crypto stream. |len| is the estimated number of bytes to send. + * |left| is the size of buffer. If |left| is too small to write + * CRYPTO frame, this function returns (size_t)-1. */ size_t ngtcp2_pkt_crypto_max_datalen(uint64_t offset, size_t len, size_t left); @@ -1191,8 +1187,8 @@ int ngtcp2_pkt_verify_reserved_bits(uint8_t c); * Buffer is too short. */ ngtcp2_ssize ngtcp2_pkt_encode_pseudo_retry( - uint8_t *dest, size_t destlen, const ngtcp2_pkt_hd *hd, uint8_t unused, - const ngtcp2_cid *odcid, const uint8_t *token, size_t tokenlen); + uint8_t *dest, size_t destlen, const ngtcp2_pkt_hd *hd, uint8_t unused, + const ngtcp2_cid *odcid, const uint8_t *token, size_t tokenlen); /* * ngtcp2_pkt_verify_retry_tag verifies Retry packet. The buffer @@ -1229,4 +1225,4 @@ uint8_t ngtcp2_pkt_versioned_type(uint32_t version, uint32_t pkt_type); */ uint8_t ngtcp2_pkt_get_type_long(uint32_t version, uint8_t c); -#endif /* NGTCP2_PKT_H */ +#endif /* !defined(NGTCP2_PKT_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pktns_id.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pktns_id.h index 66b0ee9e6c13cf..9cf8444d32d7eb 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pktns_id.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pktns_id.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -59,4 +59,4 @@ typedef enum ngtcp2_pktns_id { NGTCP2_PKTNS_ID_MAX } ngtcp2_pktns_id; -#endif /* NGTCP2_PKTNS_ID_H */ +#endif /* !defined(NGTCP2_PKTNS_ID_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.c index 771ef5e026d12d..ebd113f6746217 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.c @@ -33,18 +33,17 @@ for each probe. */ #define NGTCP2_PMTUD_PROBE_NUM_MAX 3 -static size_t mtu_probes[] = { - 1454 - 48, /* The well known MTU used by a domestic optic fiber - service in Japan. */ - 1390 - 48, /* Typical Tunneled MTU */ - 1280 - 48, /* IPv6 minimum MTU */ - 1492 - 48, /* PPPoE */ +static uint16_t pmtud_default_probes[] = { + 1454 - 48, /* The well known MTU used by a domestic optic fiber + service in Japan. */ + 1390 - 48, /* Typical Tunneled MTU */ + 1280 - 48, /* IPv6 minimum MTU */ + 1492 - 48, /* PPPoE */ }; -#define NGTCP2_MTU_PROBESLEN ngtcp2_arraylen(mtu_probes) - int ngtcp2_pmtud_new(ngtcp2_pmtud **ppmtud, size_t max_udp_payload_size, size_t hard_max_udp_payload_size, int64_t tx_pkt_num, + const uint16_t *probes, size_t probeslen, const ngtcp2_mem *mem) { ngtcp2_pmtud *pmtud = ngtcp2_mem_malloc(mem, sizeof(ngtcp2_pmtud)); @@ -61,11 +60,19 @@ int ngtcp2_pmtud_new(ngtcp2_pmtud **ppmtud, size_t max_udp_payload_size, pmtud->hard_max_udp_payload_size = hard_max_udp_payload_size; pmtud->min_fail_udp_payload_size = SIZE_MAX; - for (; pmtud->mtu_idx < NGTCP2_MTU_PROBESLEN; ++pmtud->mtu_idx) { - if (mtu_probes[pmtud->mtu_idx] > pmtud->hard_max_udp_payload_size) { + if (probeslen) { + pmtud->probes = probes; + pmtud->probeslen = probeslen; + } else { + pmtud->probes = pmtud_default_probes; + pmtud->probeslen = ngtcp2_arraylen(pmtud_default_probes); + } + + for (; pmtud->mtu_idx < pmtud->probeslen; ++pmtud->mtu_idx) { + if (pmtud->probes[pmtud->mtu_idx] > pmtud->hard_max_udp_payload_size) { continue; } - if (mtu_probes[pmtud->mtu_idx] > pmtud->max_udp_payload_size) { + if (pmtud->probes[pmtud->mtu_idx] > pmtud->max_udp_payload_size) { break; } } @@ -84,9 +91,9 @@ void ngtcp2_pmtud_del(ngtcp2_pmtud *pmtud) { } size_t ngtcp2_pmtud_probelen(ngtcp2_pmtud *pmtud) { - assert(pmtud->mtu_idx < NGTCP2_MTU_PROBESLEN); + assert(pmtud->mtu_idx < pmtud->probeslen); - return mtu_probes[pmtud->mtu_idx]; + return pmtud->probes[pmtud->mtu_idx]; } void ngtcp2_pmtud_probe_sent(ngtcp2_pmtud *pmtud, ngtcp2_duration pto, @@ -107,19 +114,19 @@ int ngtcp2_pmtud_require_probe(ngtcp2_pmtud *pmtud) { } static void pmtud_next_probe(ngtcp2_pmtud *pmtud) { - assert(pmtud->mtu_idx < NGTCP2_MTU_PROBESLEN); + assert(pmtud->mtu_idx < pmtud->probeslen); ++pmtud->mtu_idx; pmtud->num_pkts_sent = 0; pmtud->expiry = UINT64_MAX; - for (; pmtud->mtu_idx < NGTCP2_MTU_PROBESLEN; ++pmtud->mtu_idx) { - if (mtu_probes[pmtud->mtu_idx] <= pmtud->max_udp_payload_size || - mtu_probes[pmtud->mtu_idx] > pmtud->hard_max_udp_payload_size) { + for (; pmtud->mtu_idx < pmtud->probeslen; ++pmtud->mtu_idx) { + if (pmtud->probes[pmtud->mtu_idx] <= pmtud->max_udp_payload_size || + pmtud->probes[pmtud->mtu_idx] > pmtud->hard_max_udp_payload_size) { continue; } - if (mtu_probes[pmtud->mtu_idx] < pmtud->min_fail_udp_payload_size) { + if (pmtud->probes[pmtud->mtu_idx] < pmtud->min_fail_udp_payload_size) { break; } } @@ -127,11 +134,11 @@ static void pmtud_next_probe(ngtcp2_pmtud *pmtud) { void ngtcp2_pmtud_probe_success(ngtcp2_pmtud *pmtud, size_t payloadlen) { pmtud->max_udp_payload_size = - ngtcp2_max(pmtud->max_udp_payload_size, payloadlen); + ngtcp2_max_size(pmtud->max_udp_payload_size, payloadlen); - assert(pmtud->mtu_idx < NGTCP2_MTU_PROBESLEN); + assert(pmtud->mtu_idx < pmtud->probeslen); - if (mtu_probes[pmtud->mtu_idx] > pmtud->max_udp_payload_size) { + if (pmtud->probes[pmtud->mtu_idx] > pmtud->max_udp_payload_size) { return; } @@ -149,12 +156,12 @@ void ngtcp2_pmtud_handle_expiry(ngtcp2_pmtud *pmtud, ngtcp2_tstamp ts) { return; } - pmtud->min_fail_udp_payload_size = - ngtcp2_min(pmtud->min_fail_udp_payload_size, mtu_probes[pmtud->mtu_idx]); + pmtud->min_fail_udp_payload_size = ngtcp2_min_size( + pmtud->min_fail_udp_payload_size, pmtud->probes[pmtud->mtu_idx]); pmtud_next_probe(pmtud); } int ngtcp2_pmtud_finished(ngtcp2_pmtud *pmtud) { - return pmtud->mtu_idx >= NGTCP2_MTU_PROBESLEN; + return pmtud->mtu_idx >= pmtud->probeslen; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.h index 6b2e691cfc793a..53fc6a538292e0 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -58,6 +58,10 @@ typedef struct ngtcp2_pmtud { /* min_fail_udp_payload_size is the minimum UDP payload size that is known to fail. */ size_t min_fail_udp_payload_size; + /* probes is the array of UDP datagram payload size to probe. */ + const uint16_t *probes; + /* probeslen is the number of probes pointed by probes. */ + size_t probeslen; } ngtcp2_pmtud; /* @@ -70,6 +74,10 @@ typedef struct ngtcp2_pmtud { * larger than or equal to all UDP payload probe candidates. * Therefore, call ngtcp2_pmtud_finished to check this situation. * + * The array pointed by |pmtud_probes| of length |pmtud_probeslen| + * specifies UDP datagram payload size to probe. If |pmtud_probeslen| + * is zero, the default probes are used. + * * This function returns 0 if it succeeds, or one of the following * negative error codes: * @@ -78,6 +86,7 @@ typedef struct ngtcp2_pmtud { */ int ngtcp2_pmtud_new(ngtcp2_pmtud **ppmtud, size_t max_udp_payload_size, size_t hard_max_udp_payload_size, int64_t tx_pkt_num, + const uint16_t *pmtud_probes, size_t pmtud_probeslen, const ngtcp2_mem *mem); /* @@ -120,4 +129,4 @@ void ngtcp2_pmtud_handle_expiry(ngtcp2_pmtud *pmtud, ngtcp2_tstamp ts); */ int ngtcp2_pmtud_finished(ngtcp2_pmtud *pmtud); -#endif /* NGTCP2_PMTUD_H */ +#endif /* !defined(NGTCP2_PMTUD_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.c index f7c122b1ab406b..13ef2b24908905 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.c @@ -29,11 +29,13 @@ #include "ngtcp2_str.h" #include "ngtcp2_conv.h" +#include "ngtcp2_macro.h" void ngtcp2_ppe_init(ngtcp2_ppe *ppe, uint8_t *out, size_t outlen, - ngtcp2_crypto_cc *cc) { + size_t dgram_offset, ngtcp2_crypto_cc *cc) { ngtcp2_buf_init(&ppe->buf, out, outlen); + ppe->dgram_offset = dgram_offset; ppe->hdlen = 0; ppe->len_offset = 0; ppe->pkt_num_offset = 0; @@ -53,17 +55,22 @@ int ngtcp2_ppe_encode_hd(ngtcp2_ppe *ppe, const ngtcp2_pkt_hd *hd) { if (hd->flags & NGTCP2_PKT_FLAG_LONG_FORM) { ppe->len_offset = 1 + 4 + 1 + hd->dcid.datalen + 1 + hd->scid.datalen; + if (hd->type == NGTCP2_PKT_INITIAL) { ppe->len_offset += ngtcp2_put_uvarintlen(hd->tokenlen) + hd->tokenlen; } + ppe->pkt_num_offset = ppe->len_offset + NGTCP2_PKT_LENGTHLEN; + rv = ngtcp2_pkt_encode_hd_long( - buf->last, ngtcp2_buf_left(buf) - cc->aead.max_overhead, hd); + buf->last, ngtcp2_buf_left(buf) - cc->aead.max_overhead, hd); } else { ppe->pkt_num_offset = 1 + hd->dcid.datalen; + rv = ngtcp2_pkt_encode_hd_short( - buf->last, ngtcp2_buf_left(buf) - cc->aead.max_overhead, hd); + buf->last, ngtcp2_buf_left(buf) - cc->aead.max_overhead, hd); } + if (rv < 0) { return (int)rv; } @@ -72,7 +79,6 @@ int ngtcp2_ppe_encode_hd(ngtcp2_ppe *ppe, const ngtcp2_pkt_hd *hd) { ppe->pkt_numlen = hd->pkt_numlen; ppe->hdlen = (size_t)rv; - ppe->pkt_num = hd->pkt_num; return 0; @@ -88,7 +94,7 @@ int ngtcp2_ppe_encode_frame(ngtcp2_ppe *ppe, ngtcp2_frame *fr) { } rv = ngtcp2_pkt_encode_frame( - buf->last, ngtcp2_buf_left(buf) - cc->aead.max_overhead, fr); + buf->last, ngtcp2_buf_left(buf) - cc->aead.max_overhead, fr); if (rv < 0) { return (int)rv; } @@ -121,8 +127,8 @@ ngtcp2_ssize ngtcp2_ppe_final(ngtcp2_ppe *ppe, const uint8_t **ppkt) { if (ppe->len_offset) { ngtcp2_put_uvarint30( - buf->begin + ppe->len_offset, - (uint16_t)(payloadlen + ppe->pkt_numlen + cc->aead.max_overhead)); + buf->begin + ppe->len_offset, + (uint16_t)(payloadlen + ppe->pkt_numlen + cc->aead.max_overhead)); } ngtcp2_crypto_create_nonce(ppe->nonce, cc->ckm->iv.base, cc->ckm->iv.len, @@ -136,7 +142,7 @@ ngtcp2_ssize ngtcp2_ppe_final(ngtcp2_ppe *ppe, const uint8_t **ppkt) { buf->last = payload + payloadlen + cc->aead.max_overhead; - /* TODO Check that we have enough space to get sample */ + /* Make sure that we have enough space to get sample */ assert(ppe_sample_offset(ppe) + NGTCP2_HP_SAMPLELEN <= ngtcp2_buf_len(buf)); rv = cc->hp_mask(mask, &cc->hp, &cc->hp_ctx, @@ -164,7 +170,7 @@ ngtcp2_ssize ngtcp2_ppe_final(ngtcp2_ppe *ppe, const uint8_t **ppkt) { return (ngtcp2_ssize)ngtcp2_buf_len(buf); } -size_t ngtcp2_ppe_left(ngtcp2_ppe *ppe) { +size_t ngtcp2_ppe_left(const ngtcp2_ppe *ppe) { ngtcp2_crypto_cc *cc = ppe->cc; if (ngtcp2_buf_left(&ppe->buf) < cc->aead.max_overhead) { @@ -174,57 +180,57 @@ size_t ngtcp2_ppe_left(ngtcp2_ppe *ppe) { return ngtcp2_buf_left(&ppe->buf) - cc->aead.max_overhead; } -size_t ngtcp2_ppe_pktlen(ngtcp2_ppe *ppe) { +size_t ngtcp2_ppe_pktlen(const ngtcp2_ppe *ppe) { ngtcp2_crypto_cc *cc = ppe->cc; return ngtcp2_buf_len(&ppe->buf) + cc->aead.max_overhead; } -size_t ngtcp2_ppe_padding(ngtcp2_ppe *ppe) { - ngtcp2_crypto_cc *cc = ppe->cc; - ngtcp2_buf *buf = &ppe->buf; - size_t len; - - assert(ngtcp2_buf_left(buf) >= cc->aead.max_overhead); - - len = ngtcp2_buf_left(buf) - cc->aead.max_overhead; - memset(buf->last, 0, len); - buf->last += len; - - return len; -} - -size_t ngtcp2_ppe_padding_hp_sample(ngtcp2_ppe *ppe) { +size_t ngtcp2_ppe_padding_size(ngtcp2_ppe *ppe, size_t n) { ngtcp2_crypto_cc *cc = ppe->cc; ngtcp2_buf *buf = &ppe->buf; - size_t max_samplelen; + size_t pktlen = ngtcp2_buf_len(buf) + cc->aead.max_overhead; size_t len = 0; + size_t max_samplelen; - assert(cc->aead.max_overhead); + n = ngtcp2_min_size(n, ngtcp2_buf_cap(buf)); + if (pktlen < n) { + len = n - pktlen; + } + /* Ensure header protection sample */ max_samplelen = - ngtcp2_buf_len(buf) + cc->aead.max_overhead - ppe_sample_offset(ppe); + ngtcp2_buf_len(buf) + cc->aead.max_overhead - ppe_sample_offset(ppe); + if (max_samplelen < NGTCP2_HP_SAMPLELEN) { - len = NGTCP2_HP_SAMPLELEN - max_samplelen; - assert(ngtcp2_ppe_left(ppe) >= len); - memset(buf->last, 0, len); - buf->last += len; + len = ngtcp2_max_size(len, NGTCP2_HP_SAMPLELEN - max_samplelen); } + assert(ngtcp2_buf_left(buf) >= len + cc->aead.max_overhead); + + buf->last = ngtcp2_setmem(buf->last, 0, len); + return len; } -size_t ngtcp2_ppe_padding_size(ngtcp2_ppe *ppe, size_t n) { +size_t ngtcp2_ppe_dgram_padding(ngtcp2_ppe *ppe) { + return ngtcp2_ppe_dgram_padding_size(ppe, NGTCP2_MAX_UDP_PAYLOAD_SIZE); +} + +size_t ngtcp2_ppe_dgram_padding_size(ngtcp2_ppe *ppe, size_t n) { ngtcp2_crypto_cc *cc = ppe->cc; ngtcp2_buf *buf = &ppe->buf; - size_t pktlen = ngtcp2_buf_len(buf) + cc->aead.max_overhead; + size_t dgramlen = + ppe->dgram_offset + ngtcp2_buf_len(buf) + cc->aead.max_overhead; size_t len; - if (pktlen >= n) { + n = ngtcp2_min_size(n, ppe->dgram_offset + ngtcp2_buf_cap(buf)); + + if (dgramlen >= n) { return 0; } - len = n - pktlen; + len = n - dgramlen; buf->last = ngtcp2_setmem(buf->last, 0, len); return len; @@ -232,5 +238,6 @@ size_t ngtcp2_ppe_padding_size(ngtcp2_ppe *ppe, size_t n) { int ngtcp2_ppe_ensure_hp_sample(ngtcp2_ppe *ppe) { ngtcp2_buf *buf = &ppe->buf; + return ngtcp2_buf_left(buf) >= (4 - ppe->pkt_numlen) + NGTCP2_HP_SAMPLELEN; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.h index 2a069ef33451ab..660b1482b56671 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -36,11 +36,17 @@ #include "ngtcp2_crypto.h" /* - * ngtcp2_ppe is the Protected Packet Encoder. + * ngtcp2_ppe is the QUIC Packet Encoder. */ typedef struct ngtcp2_ppe { + /* buf is the buffer where a QUIC packet is written. */ ngtcp2_buf buf; + /* cc is the encryption context that includes callback functions to + encrypt a QUIC packet, and AEAD cipher, etc. */ ngtcp2_crypto_cc *cc; + /* dgram_offset is the offset in UDP datagram payload that this QUIC + packet is positioned at. */ + size_t dgram_offset; /* hdlen is the number of bytes for packet header written in buf. */ size_t hdlen; /* len_offset is the offset to Length field. */ @@ -53,7 +59,7 @@ typedef struct ngtcp2_ppe { /* pkt_num is the packet number written in buf. */ int64_t pkt_num; /* nonce is the buffer to store nonce. It should be equal or longer - than then length of IV. */ + than the length of IV. */ uint8_t nonce[32]; } ngtcp2_ppe; @@ -61,7 +67,7 @@ typedef struct ngtcp2_ppe { * ngtcp2_ppe_init initializes |ppe| with the given buffer. */ void ngtcp2_ppe_init(ngtcp2_ppe *ppe, uint8_t *out, size_t outlen, - ngtcp2_crypto_cc *cc); + size_t dgram_offset, ngtcp2_crypto_cc *cc); /* * ngtcp2_ppe_encode_hd encodes |hd|. @@ -86,7 +92,7 @@ int ngtcp2_ppe_encode_hd(ngtcp2_ppe *ppe, const ngtcp2_pkt_hd *hd); int ngtcp2_ppe_encode_frame(ngtcp2_ppe *ppe, ngtcp2_frame *fr); /* - * ngtcp2_ppe_final encrypts QUIC packet payload. If |**ppkt| is not + * ngtcp2_ppe_final encrypts QUIC packet payload. If |ppkt| is not * NULL, the pointer to the packet is assigned to it. * * This function returns the length of QUIC packet, including header, @@ -102,39 +108,46 @@ ngtcp2_ssize ngtcp2_ppe_final(ngtcp2_ppe *ppe, const uint8_t **ppkt); * ngtcp2_ppe_left returns the number of bytes left to write * additional frames. This does not count AEAD overhead. */ -size_t ngtcp2_ppe_left(ngtcp2_ppe *ppe); +size_t ngtcp2_ppe_left(const ngtcp2_ppe *ppe); /* * ngtcp2_ppe_pktlen returns the provisional packet length. It * includes AEAD overhead. */ -size_t ngtcp2_ppe_pktlen(ngtcp2_ppe *ppe); +size_t ngtcp2_ppe_pktlen(const ngtcp2_ppe *ppe); -/** - * @function +/* + * ngtcp2_ppe_dgram_padding is equivalent to call + * ngtcp2_ppe_dgram_padding_size(ppe, NGTCP2_MAX_UDP_PAYLOAD_SIZE). + * This function should be called just before calling + * ngtcp2_ppe_final(). * - * `ngtcp2_ppe_padding` encodes PADDING frames to the end of the - * buffer. This function returns the number of bytes padded. + * This function returns the number of bytes padded. */ -size_t ngtcp2_ppe_padding(ngtcp2_ppe *ppe); +size_t ngtcp2_ppe_dgram_padding(ngtcp2_ppe *ppe); /* - * ngtcp2_ppe_padding_hp_sample adds PADDING frame if the current - * payload does not have enough space for header protection sample. - * This function should be called just before calling - * ngtcp2_ppe_final(). + * ngtcp2_ppe_dgram_padding_size adds PADDING frame so that the size + * of a UDP datagram payload is at least |n| bytes long. If it is + * unable to add PADDING in that way, this function still adds PADDING + * frame as much as possible. This function should be called just + * before calling ngtcp2_ppe_final(). * * This function returns the number of bytes added as padding. */ -size_t ngtcp2_ppe_padding_hp_sample(ngtcp2_ppe *ppe); +size_t ngtcp2_ppe_dgram_padding_size(ngtcp2_ppe *ppe, size_t n); /* * ngtcp2_ppe_padding_size adds PADDING frame so that the size of QUIC - * packet is at least |n| bytes long. If it is unable to add PADDING - * in that way, this function still adds PADDING frame as much as - * possible. This function should be called just before calling - * ngtcp2_ppe_final(). For Short packet, this function should be - * called instead of ngtcp2_ppe_padding_hp_sample. + * packet is at least |n| bytes long and the current payload has + * enough space for header protection sample. If it is unable to add + * PADDING at least |n| bytes, this function still adds PADDING frames + * as much as possible. This function also adds PADDING frames so + * that the minimum padding requirement of header protection is met. + * Those padding may be larger than |n| bytes. It is recommended to + * make sure that ngtcp2_ppe_ensure_hp_sample succeeds after writing + * QUIC packet header. This function should be called just before + * calling ngtcp2_ppe_final(). * * This function returns the number of bytes added as padding. */ @@ -147,4 +160,4 @@ size_t ngtcp2_ppe_padding_size(ngtcp2_ppe *ppe, size_t n); */ int ngtcp2_ppe_ensure_hp_sample(ngtcp2_ppe *ppe); -#endif /* NGTCP2_PPE_H */ +#endif /* !defined(NGTCP2_PPE_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.c index 5e1003d7942e53..19e3e3e36aa5e6 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.c @@ -29,17 +29,20 @@ #include "ngtcp2_macro.h" -void ngtcp2_pq_init(ngtcp2_pq *pq, ngtcp2_less less, const ngtcp2_mem *mem) { - pq->mem = mem; - pq->capacity = 0; +void ngtcp2_pq_init(ngtcp2_pq *pq, ngtcp2_pq_less less, const ngtcp2_mem *mem) { pq->q = NULL; + pq->mem = mem; pq->length = 0; + pq->capacity = 0; pq->less = less; } void ngtcp2_pq_free(ngtcp2_pq *pq) { + if (!pq) { + return; + } + ngtcp2_mem_free(pq->mem, pq->q); - pq->q = NULL; } static void swap(ngtcp2_pq *pq, size_t i, size_t j) { @@ -54,11 +57,13 @@ static void swap(ngtcp2_pq *pq, size_t i, size_t j) { static void bubble_up(ngtcp2_pq *pq, size_t index) { size_t parent; - while (index != 0) { + + while (index) { parent = (index - 1) / 2; if (!pq->less(pq->q[index], pq->q[parent])) { return; } + swap(pq, parent, index); index = parent; } @@ -69,56 +74,64 @@ int ngtcp2_pq_push(ngtcp2_pq *pq, ngtcp2_pq_entry *item) { void *nq; size_t ncapacity; - ncapacity = ngtcp2_max(4, (pq->capacity * 2)); + ncapacity = ngtcp2_max_size(4, pq->capacity * 2); - nq = ngtcp2_mem_realloc(pq->mem, pq->q, - ncapacity * sizeof(ngtcp2_pq_entry *)); + nq = + ngtcp2_mem_realloc(pq->mem, pq->q, ncapacity * sizeof(ngtcp2_pq_entry *)); if (nq == NULL) { return NGTCP2_ERR_NOMEM; } + pq->capacity = ncapacity; pq->q = nq; } + pq->q[pq->length] = item; item->index = pq->length; ++pq->length; - bubble_up(pq, pq->length - 1); + bubble_up(pq, item->index); + return 0; } -ngtcp2_pq_entry *ngtcp2_pq_top(ngtcp2_pq *pq) { +ngtcp2_pq_entry *ngtcp2_pq_top(const ngtcp2_pq *pq) { assert(pq->length); return pq->q[0]; } static void bubble_down(ngtcp2_pq *pq, size_t index) { size_t i, j, minindex; + for (;;) { j = index * 2 + 1; minindex = index; + for (i = 0; i < 2; ++i, ++j) { if (j >= pq->length) { break; } + if (pq->less(pq->q[j], pq->q[minindex])) { minindex = j; } } + if (minindex == index) { return; } + swap(pq, index, minindex); index = minindex; } } void ngtcp2_pq_pop(ngtcp2_pq *pq) { - if (pq->length > 0) { - pq->q[0] = pq->q[pq->length - 1]; - pq->q[0]->index = 0; - --pq->length; - bubble_down(pq, 0); - } + assert(pq->length); + + pq->q[0] = pq->q[pq->length - 1]; + pq->q[0]->index = 0; + --pq->length; + bubble_down(pq, 0); } void ngtcp2_pq_remove(ngtcp2_pq *pq, ngtcp2_pq_entry *item) { @@ -145,20 +158,22 @@ void ngtcp2_pq_remove(ngtcp2_pq *pq, ngtcp2_pq_entry *item) { } } -int ngtcp2_pq_empty(ngtcp2_pq *pq) { return pq->length == 0; } +int ngtcp2_pq_empty(const ngtcp2_pq *pq) { return pq->length == 0; } -size_t ngtcp2_pq_size(ngtcp2_pq *pq) { return pq->length; } +size_t ngtcp2_pq_size(const ngtcp2_pq *pq) { return pq->length; } -int ngtcp2_pq_each(ngtcp2_pq *pq, ngtcp2_pq_item_cb fun, void *arg) { +int ngtcp2_pq_each(const ngtcp2_pq *pq, ngtcp2_pq_item_cb fun, void *arg) { size_t i; if (pq->length == 0) { return 0; } + for (i = 0; i < pq->length; ++i) { if ((*fun)(pq->q[i], arg)) { return 1; } } + return 0; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.h index 484c8f21f75de2..84961c9143978c 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.h @@ -28,7 +28,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -45,37 +45,39 @@ typedef struct ngtcp2_pq_entry { size_t index; } ngtcp2_pq_entry; -/* "less" function, return nonzero if |lhs| is less than |rhs|. */ -typedef int (*ngtcp2_less)(const ngtcp2_pq_entry *lhs, - const ngtcp2_pq_entry *rhs); +/* ngtcp2_pq_less is a "less" function, that returns nonzero if |lhs| + is considered to be less than |rhs|. */ +typedef int (*ngtcp2_pq_less)(const ngtcp2_pq_entry *lhs, + const ngtcp2_pq_entry *rhs); typedef struct ngtcp2_pq { - /* The pointer to the pointer to the item stored */ + /* q is a pointer to an array that stores the items. */ ngtcp2_pq_entry **q; - /* Memory allocator */ + /* mem is a memory allocator. */ const ngtcp2_mem *mem; - /* The number of items stored */ + /* length is the number of items stored. */ size_t length; - /* The maximum number of items this pq can store. This is - automatically extended when length is reached to this value. */ + /* capacity is the maximum number of items this queue can store. + This is automatically extended when length is reached to this + limit. */ size_t capacity; - /* The less function between items */ - ngtcp2_less less; + /* less is the less function to compare items. */ + ngtcp2_pq_less less; } ngtcp2_pq; /* - * Initializes priority queue |pq| with compare function |cmp|. + * ngtcp2_pq_init initializes |pq| with compare function |cmp|. */ -void ngtcp2_pq_init(ngtcp2_pq *pq, ngtcp2_less less, const ngtcp2_mem *mem); +void ngtcp2_pq_init(ngtcp2_pq *pq, ngtcp2_pq_less less, const ngtcp2_mem *mem); /* - * Deallocates any resources allocated for |pq|. The stored items are - * not freed by this function. + * ngtcp2_pq_free deallocates any resources allocated for |pq|. The + * stored items are not freed by this function. */ void ngtcp2_pq_free(ngtcp2_pq *pq); /* - * Adds |item| to the priority queue |pq|. + * ngtcp2_pq_push adds |item| to |pq|. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -86,41 +88,42 @@ void ngtcp2_pq_free(ngtcp2_pq *pq); int ngtcp2_pq_push(ngtcp2_pq *pq, ngtcp2_pq_entry *item); /* - * Returns item at the top of the queue |pq|. It is undefined if the - * queue is empty. + * ngtcp2_pq_top returns item at the top of |pq|. It is undefined if + * |pq| is empty. */ -ngtcp2_pq_entry *ngtcp2_pq_top(ngtcp2_pq *pq); +ngtcp2_pq_entry *ngtcp2_pq_top(const ngtcp2_pq *pq); /* - * Pops item at the top of the queue |pq|. The popped item is not - * freed by this function. + * ngtcp2_pq_pop pops item at the top of |pq|. The popped item is not + * freed by this function. It is undefined if |pq| is empty. */ void ngtcp2_pq_pop(ngtcp2_pq *pq); /* - * Returns nonzero if the queue |pq| is empty. + * ngtcp2_pq_empty returns nonzero if |pq| is empty. */ -int ngtcp2_pq_empty(ngtcp2_pq *pq); +int ngtcp2_pq_empty(const ngtcp2_pq *pq); /* - * Returns the number of items in the queue |pq|. + * ngtcp2_pq_size returns the number of items |pq| contains. */ -size_t ngtcp2_pq_size(ngtcp2_pq *pq); +size_t ngtcp2_pq_size(const ngtcp2_pq *pq); typedef int (*ngtcp2_pq_item_cb)(ngtcp2_pq_entry *item, void *arg); /* - * Applies |fun| to each item in |pq|. The |arg| is passed as arg - * parameter to callback function. This function must not change the - * ordering key. If the return value from callback is nonzero, this - * function returns 1 immediately without iterating remaining items. - * Otherwise this function returns 0. + * ngtcp2_pq_each applies |fun| to each item in |pq|. The |arg| is + * passed as arg parameter to callback function. This function must + * not change the ordering key. If the return value from callback is + * nonzero, this function returns 1 immediately without iterating + * remaining items. Otherwise this function returns 0. */ -int ngtcp2_pq_each(ngtcp2_pq *pq, ngtcp2_pq_item_cb fun, void *arg); +int ngtcp2_pq_each(const ngtcp2_pq *pq, ngtcp2_pq_item_cb fun, void *arg); /* - * Removes |item| from priority queue. + * ngtcp2_pq_remove removes |item| from |pq|. |pq| must contain + * |item| otherwise the behavior is undefined. */ void ngtcp2_pq_remove(ngtcp2_pq *pq, ngtcp2_pq_entry *item); -#endif /* NGTCP2_PQ_H */ +#endif /* !defined(NGTCP2_PQ_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.c index 314e005293c279..e4fee94eb558d3 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.c @@ -143,7 +143,7 @@ int ngtcp2_pv_validation_timed_out(ngtcp2_pv *pv, ngtcp2_tstamp ts) { ent = ngtcp2_ringbuf_get(&pv->ents.rb, ngtcp2_ringbuf_len(&pv->ents.rb) - 1); t = pv->started_ts + pv->timeout; - t = ngtcp2_max(t, ent->expiry); + t = ngtcp2_max_uint64(t, ent->expiry); return t <= ts; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.h index c9da15248a3e2b..e9573da497bee4 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -83,7 +83,7 @@ void ngtcp2_pv_entry_init(ngtcp2_pv_entry *pvent, const uint8_t *data, typedef struct ngtcp2_pv ngtcp2_pv; ngtcp2_static_ringbuf_def(pv_ents, NGTCP2_PV_MAX_ENTRIES, - sizeof(ngtcp2_pv_entry)); + sizeof(ngtcp2_pv_entry)) /* * ngtcp2_pv is the context of a single path validation. */ @@ -191,4 +191,4 @@ ngtcp2_tstamp ngtcp2_pv_next_expiry(ngtcp2_pv *pv); */ void ngtcp2_pv_cancel_expired_timer(ngtcp2_pv *pv, ngtcp2_tstamp ts); -#endif /* NGTCP2_PV_H */ +#endif /* !defined(NGTCP2_PV_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.c index 27675347794b2a..c0f920746a4dff 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.c @@ -184,13 +184,12 @@ static uint8_t *write_pair_cid_impl(uint8_t *p, const uint8_t *name, write_pair_cid_impl((DEST), (const uint8_t *)(NAME), sizeof(NAME) - 1, \ (VALUE)) -#define ngtcp2_make_vec_lit(S) \ - { (uint8_t *)(S), sizeof((S)) - 1 } +#define ngtcp2_make_vec_lit(S) {(uint8_t *)(S), sizeof((S)) - 1} static uint8_t *write_common_fields(uint8_t *p, const ngtcp2_cid *odcid) { p = write_verbatim( - p, "\"common_fields\":{\"protocol_type\":[\"QUIC\"],\"time_format\":" - "\"relative\",\"reference_time\":0,\"group_id\":"); + p, "\"common_fields\":{\"protocol_type\":[\"QUIC\"],\"time_format\":" + "\"relative\",\"reference_time\":0,\"group_id\":"); p = write_cid(p, odcid); *p++ = '}'; return p; @@ -198,7 +197,7 @@ static uint8_t *write_common_fields(uint8_t *p, const ngtcp2_cid *odcid) { static uint8_t *write_trace(uint8_t *p, int server, const ngtcp2_cid *odcid) { p = write_verbatim( - p, "\"trace\":{\"vantage_point\":{\"name\":\"ngtcp2\",\"type\":"); + p, "\"trace\":{\"vantage_point\":{\"name\":\"ngtcp2\",\"type\":"); if (server) { p = write_string(p, "server"); } else { @@ -219,7 +218,7 @@ void ngtcp2_qlog_start(ngtcp2_qlog *qlog, const ngtcp2_cid *odcid, int server) { } p = write_verbatim( - p, "\x1e{\"qlog_format\":\"JSON-SEQ\",\"qlog_version\":\"0.3\","); + p, "\x1e{\"qlog_format\":\"JSON-SEQ\",\"qlog_version\":\"0.3\","); p = write_trace(p, server, odcid); p = write_verbatim(p, "}\n"); @@ -243,9 +242,9 @@ static ngtcp2_vec vec_pkt_type_0rtt = ngtcp2_make_vec_lit("0RTT"); static ngtcp2_vec vec_pkt_type_1rtt = ngtcp2_make_vec_lit("1RTT"); static ngtcp2_vec vec_pkt_type_retry = ngtcp2_make_vec_lit("retry"); static ngtcp2_vec vec_pkt_type_version_negotiation = - ngtcp2_make_vec_lit("version_negotiation"); + ngtcp2_make_vec_lit("version_negotiation"); static ngtcp2_vec vec_pkt_type_stateless_reset = - ngtcp2_make_vec_lit("stateless_reset"); + ngtcp2_make_vec_lit("stateless_reset"); static ngtcp2_vec vec_pkt_type_unknown = ngtcp2_make_vec_lit("unknown"); static const ngtcp2_vec *qlog_pkt_type(const ngtcp2_pkt_hd *hd) { @@ -638,8 +637,8 @@ write_connection_close_frame(uint8_t *p, const ngtcp2_connection_close *fr) { */ #define NGTCP2_QLOG_CONNECTION_CLOSE_FRAME_OVERHEAD 131 - p = write_verbatim(p, - "{\"frame_type\":\"connection_close\",\"error_space\":"); + p = + write_verbatim(p, "{\"frame_type\":\"connection_close\",\"error_space\":"); if (fr->type == NGTCP2_FRAME_CONNECTION_CLOSE) { p = write_string(p, "transport"); } else { @@ -772,10 +771,10 @@ void ngtcp2_qlog_write_frame(ngtcp2_qlog *qlog, const ngtcp2_frame *fr) { case NGTCP2_FRAME_ACK_ECN: if (ngtcp2_buf_left(&qlog->buf) < NGTCP2_QLOG_ACK_FRAME_BASE_OVERHEAD + - (size_t)(fr->type == NGTCP2_FRAME_ACK_ECN - ? NGTCP2_QLOG_ACK_FRAME_ECN_OVERHEAD - : 0) + - NGTCP2_QLOG_ACK_FRAME_RANGE_OVERHEAD * (1 + fr->ack.rangecnt) + 1) { + (size_t)(fr->type == NGTCP2_FRAME_ACK_ECN + ? NGTCP2_QLOG_ACK_FRAME_ECN_OVERHEAD + : 0) + + NGTCP2_QLOG_ACK_FRAME_RANGE_OVERHEAD * (1 + fr->ack.rangecnt) + 1) { return; } p = write_ack_frame(p, &fr->ack); @@ -934,8 +933,8 @@ void ngtcp2_qlog_pkt_sent_end(ngtcp2_qlog *qlog, const ngtcp2_pkt_hd *hd, } void ngtcp2_qlog_parameters_set_transport_params( - ngtcp2_qlog *qlog, const ngtcp2_transport_params *params, int server, - ngtcp2_qlog_side side) { + ngtcp2_qlog *qlog, const ngtcp2_transport_params *params, int server, + ngtcp2_qlog_side side) { uint8_t buf[1024]; uint8_t *p = buf; const ngtcp2_preferred_addr *paddr; @@ -950,7 +949,7 @@ void ngtcp2_qlog_parameters_set_transport_params( *p++ = '{'; p = qlog_write_time(qlog, p); p = write_verbatim( - p, ",\"name\":\"transport:parameters_set\",\"data\":{\"owner\":"); + p, ",\"name\":\"transport:parameters_set\",\"data\":{\"owner\":"); if (side == NGTCP2_QLOG_SIDE_LOCAL) { p = write_string(p, "local"); @@ -982,8 +981,8 @@ void ngtcp2_qlog_parameters_set_transport_params( *p++ = ','; p = write_pair_duration(p, "max_idle_timeout", params->max_idle_timeout); *p++ = ','; - p = write_pair_number(p, "max_udp_payload_size", - params->max_udp_payload_size); + p = + write_pair_number(p, "max_udp_payload_size", params->max_udp_payload_size); *p++ = ','; p = write_pair_number(p, "ack_delay_exponent", params->ack_delay_exponent); *p++ = ','; @@ -1106,7 +1105,7 @@ void ngtcp2_qlog_pkt_lost(ngtcp2_qlog *qlog, ngtcp2_rtb_entry *ent) { *p++ = '{'; p = qlog_write_time(qlog, p); p = write_verbatim( - p, ",\"name\":\"recovery:packet_lost\",\"data\":{\"header\":"); + p, ",\"name\":\"recovery:packet_lost\",\"data\":{\"header\":"); hd.type = ent->hd.type; hd.flags = ent->hd.flags; @@ -1134,13 +1133,11 @@ void ngtcp2_qlog_retry_pkt_received(ngtcp2_qlog *qlog, const ngtcp2_pkt_hd *hd, *buf.last++ = '{'; buf.last = qlog_write_time(qlog, buf.last); buf.last = write_verbatim( - buf.last, - ",\"name\":\"transport:packet_received\",\"data\":{\"header\":"); + buf.last, ",\"name\":\"transport:packet_received\",\"data\":{\"header\":"); - if (ngtcp2_buf_left(&buf) < - NGTCP2_QLOG_PKT_HD_OVERHEAD + hd->tokenlen * 2 + - sizeof(",\"retry_token\":{\"data\":\"\"}}}\n") - 1 + - retry->tokenlen * 2) { + if (ngtcp2_buf_left(&buf) < NGTCP2_QLOG_PKT_HD_OVERHEAD + hd->tokenlen * 2 + + sizeof(",\"retry_token\":{\"data\":\"\"}}}\n") - + 1 + retry->tokenlen * 2) { return; } @@ -1154,7 +1151,7 @@ void ngtcp2_qlog_retry_pkt_received(ngtcp2_qlog *qlog, const ngtcp2_pkt_hd *hd, } void ngtcp2_qlog_stateless_reset_pkt_received( - ngtcp2_qlog *qlog, const ngtcp2_pkt_stateless_reset *sr) { + ngtcp2_qlog *qlog, const ngtcp2_pkt_stateless_reset *sr) { uint8_t buf[256]; uint8_t *p = buf; ngtcp2_pkt_hd hd = {0}; @@ -1169,7 +1166,7 @@ void ngtcp2_qlog_stateless_reset_pkt_received( *p++ = '{'; p = qlog_write_time(qlog, p); p = write_verbatim( - p, ",\"name\":\"transport:packet_received\",\"data\":{\"header\":"); + p, ",\"name\":\"transport:packet_received\",\"data\":{\"header\":"); p = write_pkt_hd(p, &hd); *p++ = ','; p = write_pair_hex(p, "stateless_reset_token", sr->stateless_reset_token, @@ -1199,8 +1196,7 @@ void ngtcp2_qlog_version_negotiation_pkt_received(ngtcp2_qlog *qlog, *buf.last++ = '{'; buf.last = qlog_write_time(qlog, buf.last); buf.last = write_verbatim( - buf.last, - ",\"name\":\"transport:packet_received\",\"data\":{\"header\":"); + buf.last, ",\"name\":\"transport:packet_received\",\"data\":{\"header\":"); buf.last = write_pkt_hd(buf.last, hd); buf.last = write_verbatim(buf.last, ",\"supported_versions\":["); diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.h index b9107c0e5c031a..d2a5f1038c0f42 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -120,8 +120,8 @@ void ngtcp2_qlog_pkt_sent_end(ngtcp2_qlog *qlog, const ngtcp2_pkt_hd *hd, * "local", otherwise "remote". */ void ngtcp2_qlog_parameters_set_transport_params( - ngtcp2_qlog *qlog, const ngtcp2_transport_params *params, int server, - ngtcp2_qlog_side side); + ngtcp2_qlog *qlog, const ngtcp2_transport_params *params, int server, + ngtcp2_qlog_side side); /* * ngtcp2_qlog_metrics_updated writes metrics_updated event of @@ -147,7 +147,7 @@ void ngtcp2_qlog_retry_pkt_received(ngtcp2_qlog *qlog, const ngtcp2_pkt_hd *hd, * event for a received Stateless Reset packet. */ void ngtcp2_qlog_stateless_reset_pkt_received( - ngtcp2_qlog *qlog, const ngtcp2_pkt_stateless_reset *sr); + ngtcp2_qlog *qlog, const ngtcp2_pkt_stateless_reset *sr); /* * ngtcp2_qlog_version_negotiation_pkt_received writes packet_received @@ -158,4 +158,4 @@ void ngtcp2_qlog_version_negotiation_pkt_received(ngtcp2_qlog *qlog, const uint32_t *sv, size_t nsv); -#endif /* NGTCP2_QLOG_H */ +#endif /* !defined(NGTCP2_QLOG_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_range.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_range.c index 9379496b7d4b53..7bbefc0175c595 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_range.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_range.c @@ -33,11 +33,13 @@ void ngtcp2_range_init(ngtcp2_range *r, uint64_t begin, uint64_t end) { ngtcp2_range ngtcp2_range_intersect(const ngtcp2_range *a, const ngtcp2_range *b) { ngtcp2_range r = {0, 0}; - uint64_t begin = ngtcp2_max(a->begin, b->begin); - uint64_t end = ngtcp2_min(a->end, b->end); + uint64_t begin = ngtcp2_max_uint64(a->begin, b->begin); + uint64_t end = ngtcp2_min_uint64(a->end, b->end); + if (begin < end) { ngtcp2_range_init(&r, begin, end); } + return r; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_range.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_range.h index a776c4ec4768ce..22cd2951859ef1 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_range.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_range.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -58,7 +58,7 @@ uint64_t ngtcp2_range_len(const ngtcp2_range *r); /* * ngtcp2_range_eq returns nonzero if |a| equals |b|, such that - * a->begin == b->begin, and a->end == b->end hold. + * a->begin == b->begin and a->end == b->end hold. */ int ngtcp2_range_eq(const ngtcp2_range *a, const ngtcp2_range *b); @@ -77,4 +77,4 @@ void ngtcp2_range_cut(ngtcp2_range *left, ngtcp2_range *right, */ int ngtcp2_range_not_after(const ngtcp2_range *a, const ngtcp2_range *b); -#endif /* NGTCP2_RANGE_H */ +#endif /* !defined(NGTCP2_RANGE_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rcvry.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rcvry.h index 4cb40882192a77..e6321061b59cd1 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rcvry.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rcvry.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -37,4 +37,4 @@ /* NGTCP2_GRANULARITY is kGranularity described in RFC 9002. */ #define NGTCP2_GRANULARITY NGTCP2_MILLISECONDS -#endif /* NGTCP2_RCVRY_H */ +#endif /* !defined(NGTCP2_RCVRY_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.c index ecfdeb63b3485b..41446739bf699d 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.c @@ -27,24 +27,29 @@ #include #ifdef WIN32 # include -#endif +#endif /* defined(WIN32) */ #include "ngtcp2_macro.h" -#if defined(_MSC_VER) && _MSC_VER < 1941 && !defined(__clang__) && \ - (defined(_M_ARM) || defined(_M_ARM64)) -static unsigned int __popcnt(unsigned int x) { - unsigned int c = 0; - for (; x; ++c) { - x &= x - 1; - } - return c; +#ifndef NDEBUG +static int ispow2(size_t n) { +# if defined(_MSC_VER) && !defined(__clang__) && \ + (defined(_M_ARM) || (defined(_M_ARM64) && _MSC_VER < 1941)) + return n && !(n & (n - 1)); +# elif defined(WIN32) + return 1 == __popcnt((unsigned int)n); +# else /* !((defined(_MSC_VER) && !defined(__clang__) && (defined(_M_ARM) || \ + (defined(_M_ARM64) && _MSC_VER < 1941))) || defined(WIN32)) */ + return 1 == __builtin_popcount((unsigned int)n); +# endif /* !((defined(_MSC_VER) && !defined(__clang__) && (defined(_M_ARM) || \ + (defined(_M_ARM64) && _MSC_VER < 1941))) || defined(WIN32)) */ } -#endif +#endif /* !defined(NDEBUG) */ int ngtcp2_ringbuf_init(ngtcp2_ringbuf *rb, size_t nmemb, size_t size, const ngtcp2_mem *mem) { uint8_t *buf = ngtcp2_mem_malloc(mem, nmemb * size); + if (buf == NULL) { return NGTCP2_ERR_NOMEM; } @@ -56,11 +61,7 @@ int ngtcp2_ringbuf_init(ngtcp2_ringbuf *rb, size_t nmemb, size_t size, void ngtcp2_ringbuf_buf_init(ngtcp2_ringbuf *rb, size_t nmemb, size_t size, uint8_t *buf, const ngtcp2_mem *mem) { -#ifdef WIN32 - assert(1 == __popcnt((unsigned int)nmemb)); -#else - assert(1 == __builtin_popcount((unsigned int)nmemb)); -#endif + assert(ispow2(nmemb)); rb->buf = buf; rb->mem = mem; @@ -114,9 +115,10 @@ void ngtcp2_ringbuf_resize(ngtcp2_ringbuf *rb, size_t len) { rb->len = len; } -void *ngtcp2_ringbuf_get(ngtcp2_ringbuf *rb, size_t offset) { +void *ngtcp2_ringbuf_get(const ngtcp2_ringbuf *rb, size_t offset) { assert(offset < rb->len); offset = (rb->first + offset) & rb->mask; + return &rb->buf[offset * rb->size]; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.h index b28a882c4bae84..6953ea6278f88d 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -51,7 +51,7 @@ typedef struct ngtcp2_ringbuf { /* * ngtcp2_ringbuf_init initializes |rb|. |nmemb| is the number of * elements that can be stored in this buffer. |size| is the size of - * each element. |size| must be power of 2. + * each element. |nmemb| must be power of 2. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -64,7 +64,7 @@ int ngtcp2_ringbuf_init(ngtcp2_ringbuf *rb, size_t nmemb, size_t size, /* * ngtcp2_ringbuf_buf_init initializes |rb| with given buffer and - * size. + * size. Same restrictions are applied as ngtcp2_ringbuf_init. */ void ngtcp2_ringbuf_buf_init(ngtcp2_ringbuf *rb, size_t nmemb, size_t size, uint8_t *buf, const ngtcp2_mem *mem); @@ -79,15 +79,16 @@ void ngtcp2_ringbuf_free(ngtcp2_ringbuf *rb); the buffer backward, and returns the pointer to the element. Caller can store data to the buffer pointed by the returned pointer. If this action exceeds the capacity of the ring buffer, - the last element is silently overwritten, and rb->len remains - unchanged. */ + this function returns the pointer to the last element, and rb->len + remains unchanged. */ void *ngtcp2_ringbuf_push_front(ngtcp2_ringbuf *rb); /* ngtcp2_ringbuf_push_back moves the offset to the last element in the buffer forward, and returns the pointer to the element. Caller can store data to the buffer pointed by the returned pointer. If - this action exceeds the capacity of the ring buffer, the first - element is silently overwritten, and rb->len remains unchanged. */ + this action exceeds the capacity of the ring buffer, this function + returns the pointer to the first element, and rb->len remains + unchanged. */ void *ngtcp2_ringbuf_push_back(ngtcp2_ringbuf *rb); /* @@ -106,7 +107,7 @@ void ngtcp2_ringbuf_resize(ngtcp2_ringbuf *rb, size_t len); /* ngtcp2_ringbuf_get returns the pointer to the element at |offset|. */ -void *ngtcp2_ringbuf_get(ngtcp2_ringbuf *rb, size_t offset); +void *ngtcp2_ringbuf_get(const ngtcp2_ringbuf *rb, size_t offset); /* ngtcp2_ringbuf_len returns the number of elements stored. */ #define ngtcp2_ringbuf_len(RB) ((RB)->len) @@ -115,9 +116,8 @@ void *ngtcp2_ringbuf_get(ngtcp2_ringbuf *rb, size_t offset); int ngtcp2_ringbuf_full(ngtcp2_ringbuf *rb); /* ngtcp2_static_ringbuf_def defines ngtcp2_ringbuf struct wrapper - which uses a statically allocated buffer that is suitable for a - usage that does not change buffer size with ngtcp2_ringbuf_resize. - ngtcp2_ringbuf_free should never be called for rb field. */ + which uses a statically allocated buffer. ngtcp2_ringbuf_free + should never be called for rb field. */ #define ngtcp2_static_ringbuf_def(NAME, NMEMB, SIZE) \ typedef struct ngtcp2_static_ringbuf_##NAME { \ ngtcp2_ringbuf rb; \ @@ -125,8 +125,8 @@ int ngtcp2_ringbuf_full(ngtcp2_ringbuf *rb); } ngtcp2_static_ringbuf_##NAME; \ \ static inline void ngtcp2_static_ringbuf_##NAME##_init( \ - ngtcp2_static_ringbuf_##NAME *srb) { \ + ngtcp2_static_ringbuf_##NAME *srb) { \ ngtcp2_ringbuf_buf_init(&srb->rb, (NMEMB), (SIZE), srb->buf, NULL); \ } -#endif /* NGTCP2_RINGBUF_H */ +#endif /* !defined(NGTCP2_RINGBUF_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.c index 5cac383f7bb166..ce6c2113ddf1ce 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.c @@ -56,7 +56,6 @@ int ngtcp2_rob_data_new(ngtcp2_rob_data **pd, uint64_t offset, size_t chunk, (*pd)->range.begin = offset; (*pd)->range.end = offset + chunk; (*pd)->begin = (uint8_t *)(*pd) + sizeof(ngtcp2_rob_data); - (*pd)->end = (*pd)->begin + chunk; return 0; } @@ -69,8 +68,8 @@ int ngtcp2_rob_init(ngtcp2_rob *rob, size_t chunk, const ngtcp2_mem *mem) { int rv; ngtcp2_rob_gap *g; - ngtcp2_ksl_init(&rob->gapksl, ngtcp2_ksl_range_compar, sizeof(ngtcp2_range), - mem); + ngtcp2_ksl_init(&rob->gapksl, ngtcp2_ksl_range_compar, + ngtcp2_ksl_range_search, sizeof(ngtcp2_range), mem); rv = ngtcp2_rob_gap_new(&g, 0, UINT64_MAX, mem); if (rv != 0) { @@ -82,8 +81,8 @@ int ngtcp2_rob_init(ngtcp2_rob *rob, size_t chunk, const ngtcp2_mem *mem) { goto fail_gapksl_ksl_insert; } - ngtcp2_ksl_init(&rob->dataksl, ngtcp2_ksl_range_compar, sizeof(ngtcp2_range), - mem); + ngtcp2_ksl_init(&rob->dataksl, ngtcp2_ksl_range_compar, + ngtcp2_ksl_range_search, sizeof(ngtcp2_range), mem); rob->chunk = chunk; rob->mem = mem; @@ -126,8 +125,8 @@ static int rob_write_data(ngtcp2_rob *rob, uint64_t offset, const uint8_t *data, ngtcp2_range range = {offset, offset + len}; ngtcp2_ksl_it it; - for (it = ngtcp2_ksl_lower_bound_compar(&rob->dataksl, &range, - ngtcp2_ksl_range_exclusive_compar); + for (it = ngtcp2_ksl_lower_bound_search(&rob->dataksl, &range, + ngtcp2_ksl_range_exclusive_search); len; ngtcp2_ksl_it_next(&it)) { if (ngtcp2_ksl_it_end(&it)) { d = NULL; @@ -149,7 +148,8 @@ static int rob_write_data(ngtcp2_rob *rob, uint64_t offset, const uint8_t *data, } } - n = (size_t)ngtcp2_min((uint64_t)len, d->range.begin + rob->chunk - offset); + n = (size_t)ngtcp2_min_uint64((uint64_t)len, + d->range.begin + rob->chunk - offset); memcpy(d->begin + (offset - d->range.begin), data, n); offset += n; data += n; @@ -166,8 +166,8 @@ int ngtcp2_rob_push(ngtcp2_rob *rob, uint64_t offset, const uint8_t *data, ngtcp2_range m, l, r, q = {offset, offset + datalen}; ngtcp2_ksl_it it; - it = ngtcp2_ksl_lower_bound_compar(&rob->gapksl, &q, - ngtcp2_ksl_range_exclusive_compar); + it = ngtcp2_ksl_lower_bound_search(&rob->gapksl, &q, + ngtcp2_ksl_range_exclusive_search); for (; !ngtcp2_ksl_it_end(&it);) { g = ngtcp2_ksl_it_get(&it); @@ -176,9 +176,11 @@ int ngtcp2_rob_push(ngtcp2_rob *rob, uint64_t offset, const uint8_t *data, if (!ngtcp2_range_len(&m)) { break; } + if (ngtcp2_range_eq(&g->range, &m)) { ngtcp2_ksl_remove_hint(&rob->gapksl, &it, &it, &g->range); ngtcp2_rob_gap_del(g, rob->mem); + rv = rob_write_data(rob, m.begin, data + (m.begin - offset), (size_t)ngtcp2_range_len(&m)); if (rv != 0) { @@ -187,17 +189,21 @@ int ngtcp2_rob_push(ngtcp2_rob *rob, uint64_t offset, const uint8_t *data, continue; } + ngtcp2_range_cut(&l, &r, &g->range, &m); + if (ngtcp2_range_len(&l)) { ngtcp2_ksl_update_key(&rob->gapksl, &g->range, &l); g->range = l; if (ngtcp2_range_len(&r)) { ngtcp2_rob_gap *ng; + rv = ngtcp2_rob_gap_new(&ng, r.begin, r.end, rob->mem); if (rv != 0) { return rv; } + rv = ngtcp2_ksl_insert(&rob->gapksl, &it, &ng->range, ng); if (rv != 0) { ngtcp2_rob_gap_del(ng, rob->mem); @@ -208,13 +214,16 @@ int ngtcp2_rob_push(ngtcp2_rob *rob, uint64_t offset, const uint8_t *data, ngtcp2_ksl_update_key(&rob->gapksl, &g->range, &r); g->range = r; } + rv = rob_write_data(rob, m.begin, data + (m.begin - offset), (size_t)ngtcp2_range_len(&m)); if (rv != 0) { return rv; } + ngtcp2_ksl_it_next(&it); } + return 0; } @@ -230,12 +239,16 @@ void ngtcp2_rob_remove_prefix(ngtcp2_rob *rob, uint64_t offset) { if (offset <= g->range.begin) { break; } + if (offset < g->range.end) { ngtcp2_range r = {offset, g->range.end}; + ngtcp2_ksl_update_key(&rob->gapksl, &g->range, &r); g->range.begin = offset; + break; } + ngtcp2_ksl_remove_hint(&rob->gapksl, &it, &it, &g->range); ngtcp2_rob_gap_del(g, rob->mem); } @@ -247,12 +260,13 @@ void ngtcp2_rob_remove_prefix(ngtcp2_rob *rob, uint64_t offset) { if (offset < d->range.begin + rob->chunk) { return; } + ngtcp2_ksl_remove_hint(&rob->dataksl, &it, &it, &d->range); ngtcp2_rob_data_del(d, rob->mem); } } -size_t ngtcp2_rob_data_at(ngtcp2_rob *rob, const uint8_t **pdest, +size_t ngtcp2_rob_data_at(const ngtcp2_rob *rob, const uint8_t **pdest, uint64_t offset) { ngtcp2_rob_gap *g; ngtcp2_rob_data *d; @@ -278,8 +292,9 @@ size_t ngtcp2_rob_data_at(ngtcp2_rob *rob, const uint8_t **pdest, *pdest = d->begin + (offset - d->range.begin); - return (size_t)(ngtcp2_min(g->range.begin, d->range.begin + rob->chunk) - - offset); + return ( + size_t)(ngtcp2_min_uint64(g->range.begin, d->range.begin + rob->chunk) - + offset); } void ngtcp2_rob_pop(ngtcp2_rob *rob, uint64_t offset, size_t len) { @@ -299,7 +314,7 @@ void ngtcp2_rob_pop(ngtcp2_rob *rob, uint64_t offset, size_t len) { ngtcp2_rob_data_del(d, rob->mem); } -uint64_t ngtcp2_rob_first_gap_offset(ngtcp2_rob *rob) { +uint64_t ngtcp2_rob_first_gap_offset(const ngtcp2_rob *rob) { ngtcp2_ksl_it it = ngtcp2_ksl_begin(&rob->gapksl); ngtcp2_rob_gap *g; @@ -312,6 +327,6 @@ uint64_t ngtcp2_rob_first_gap_offset(ngtcp2_rob *rob) { return g->range.begin; } -int ngtcp2_rob_data_buffered(ngtcp2_rob *rob) { +int ngtcp2_rob_data_buffered(const ngtcp2_rob *rob) { return ngtcp2_ksl_len(&rob->dataksl) != 0; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.h index 6518d56c539185..d53b5160b10230 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -70,12 +70,10 @@ void ngtcp2_rob_gap_del(ngtcp2_rob_gap *g, const ngtcp2_mem *mem); * ngtcp2_rob_data holds the buffered stream data. */ typedef struct ngtcp2_rob_data { - /* range is the range of this gap. */ + /* range is the range of this data. */ ngtcp2_range range; /* begin points to the buffer. */ uint8_t *begin; - /* end points to the one beyond of the last byte of the buffer */ - uint8_t *end; } ngtcp2_rob_data; /* @@ -110,8 +108,8 @@ typedef struct ngtcp2_rob { /* gapksl maintains the range of offset which is not received yet. Initially, its range is [0, UINT64_MAX). */ ngtcp2_ksl gapksl; - /* dataksl maintains the list of buffers which store received data - ordered by stream offset. */ + /* dataksl maintains the buffers which store received out-of-order + data ordered by stream offset. */ ngtcp2_ksl dataksl; /* mem is custom memory allocator */ const ngtcp2_mem *mem; @@ -137,8 +135,8 @@ int ngtcp2_rob_init(ngtcp2_rob *rob, size_t chunk, const ngtcp2_mem *mem); void ngtcp2_rob_free(ngtcp2_rob *rob); /* - * ngtcp2_rob_push adds new data of length |datalen| at the stream - * offset |offset|. + * ngtcp2_rob_push adds new data pointed by |data| of length |datalen| + * at the stream offset |offset|. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -151,7 +149,8 @@ int ngtcp2_rob_push(ngtcp2_rob *rob, uint64_t offset, const uint8_t *data, /* * ngtcp2_rob_remove_prefix removes gap up to |offset|, exclusive. It - * also removes data buffer if it is completely included in |offset|. + * also removes buffered data if it is completely included in + * |offset|. */ void ngtcp2_rob_remove_prefix(ngtcp2_rob *rob, uint64_t offset); @@ -159,9 +158,10 @@ void ngtcp2_rob_remove_prefix(ngtcp2_rob *rob, uint64_t offset); * ngtcp2_rob_data_at stores the pointer to the buffer of stream * offset |offset| to |*pdest| if it is available, and returns the * valid length of available data. If no data is available, it - * returns 0. + * returns 0. This function only returns the data before the first + * gap. It returns 0 even if data is available after the first gap. */ -size_t ngtcp2_rob_data_at(ngtcp2_rob *rob, const uint8_t **pdest, +size_t ngtcp2_rob_data_at(const ngtcp2_rob *rob, const uint8_t **pdest, uint64_t offset); /* @@ -181,11 +181,11 @@ void ngtcp2_rob_pop(ngtcp2_rob *rob, uint64_t offset, size_t len); * ngtcp2_rob_first_gap_offset returns the offset to the first gap. * If there is no gap, it returns UINT64_MAX. */ -uint64_t ngtcp2_rob_first_gap_offset(ngtcp2_rob *rob); +uint64_t ngtcp2_rob_first_gap_offset(const ngtcp2_rob *rob); /* * ngtcp2_rob_data_buffered returns nonzero if any data is buffered. */ -int ngtcp2_rob_data_buffered(ngtcp2_rob *rob); +int ngtcp2_rob_data_buffered(const ngtcp2_rob *rob); -#endif /* NGTCP2_ROB_H */ +#endif /* !defined(NGTCP2_ROB_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.c index b8587e3e9dbac8..89c89acdc265a2 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.c @@ -35,26 +35,25 @@ void ngtcp2_rs_init(ngtcp2_rs *rs) { rs->interval = UINT64_MAX; rs->delivered = 0; rs->prior_delivered = 0; - rs->prior_ts = 0; + rs->prior_ts = UINT64_MAX; rs->tx_in_flight = 0; rs->lost = 0; rs->prior_lost = 0; rs->send_elapsed = 0; rs->ack_elapsed = 0; + rs->last_end_seq = -1; rs->is_app_limited = 0; } void ngtcp2_rst_init(ngtcp2_rst *rst) { ngtcp2_rs_init(&rst->rs); - ngtcp2_window_filter_init(&rst->wf, 12); rst->delivered = 0; rst->delivered_ts = 0; rst->first_sent_ts = 0; rst->app_limited = 0; - rst->next_round_delivered = 0; - rst->round_count = 0; rst->is_cwnd_limited = 0; rst->lost = 0; + rst->last_seq = -1; } void ngtcp2_rst_on_pkt_sent(ngtcp2_rst *rst, ngtcp2_rtb_entry *ent, @@ -68,27 +67,21 @@ void ngtcp2_rst_on_pkt_sent(ngtcp2_rst *rst, ngtcp2_rtb_entry *ent, ent->rst.is_app_limited = rst->app_limited != 0; ent->rst.tx_in_flight = cstat->bytes_in_flight + ent->pktlen; ent->rst.lost = rst->lost; + ent->rst.end_seq = ++rst->last_seq; } -void ngtcp2_rst_on_ack_recv(ngtcp2_rst *rst, ngtcp2_conn_stat *cstat, - uint64_t pkt_delivered) { +void ngtcp2_rst_on_ack_recv(ngtcp2_rst *rst, ngtcp2_conn_stat *cstat) { ngtcp2_rs *rs = &rst->rs; - uint64_t rate; if (rst->app_limited && rst->delivered > rst->app_limited) { rst->app_limited = 0; } - if (pkt_delivered >= rst->next_round_delivered) { - rst->next_round_delivered = pkt_delivered; - ++rst->round_count; - } - - if (rs->prior_ts == 0) { + if (rs->prior_ts == UINT64_MAX) { return; } - rs->interval = ngtcp2_max(rs->send_elapsed, rs->ack_elapsed); + rs->interval = ngtcp2_max_uint64(rs->send_elapsed, rs->ack_elapsed); rs->delivered = rst->delivered - rs->prior_delivered; rs->lost = rst->lost - rs->prior_lost; @@ -102,12 +95,13 @@ void ngtcp2_rst_on_ack_recv(ngtcp2_rst *rst, ngtcp2_conn_stat *cstat, return; } - rate = rs->delivered * NGTCP2_SECONDS / rs->interval; + cstat->delivery_rate_sec = rs->delivered * NGTCP2_SECONDS / rs->interval; +} - if (rate > ngtcp2_window_filter_get_best(&rst->wf) || !rst->app_limited) { - ngtcp2_window_filter_update(&rst->wf, rate, rst->round_count); - cstat->delivery_rate_sec = ngtcp2_window_filter_get_best(&rst->wf); - } +static int rst_is_newest_pkt(const ngtcp2_rst *rst, const ngtcp2_rtb_entry *ent, + const ngtcp2_rs *rs) { + return ent->ts > rst->first_sent_ts || + (ent->ts == rst->first_sent_ts && ent->rst.end_seq > rs->last_end_seq); } void ngtcp2_rst_update_rate_sample(ngtcp2_rst *rst, const ngtcp2_rtb_entry *ent, @@ -117,7 +111,7 @@ void ngtcp2_rst_update_rate_sample(ngtcp2_rst *rst, const ngtcp2_rtb_entry *ent, rst->delivered += ent->pktlen; rst->delivered_ts = ts; - if (ent->rst.delivered > rs->prior_delivered) { + if (rs->prior_ts == UINT64_MAX || rst_is_newest_pkt(rst, ent, rs)) { rs->prior_delivered = ent->rst.delivered; rs->prior_ts = ent->rst.delivered_ts; rs->is_app_limited = ent->rst.is_app_limited; @@ -125,6 +119,7 @@ void ngtcp2_rst_update_rate_sample(ngtcp2_rst *rst, const ngtcp2_rtb_entry *ent, rs->ack_elapsed = rst->delivered_ts - ent->rst.delivered_ts; rs->tx_in_flight = ent->rst.tx_in_flight; rs->prior_lost = ent->rst.lost; + rs->last_end_seq = ent->rst.end_seq; rst->first_sent_ts = ent->ts; } } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.h index c9e1e161b7766f..95616eee97d99f 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -51,6 +51,7 @@ typedef struct ngtcp2_rs { uint64_t prior_lost; ngtcp2_duration send_elapsed; ngtcp2_duration ack_elapsed; + int64_t last_end_seq; int is_app_limited; } ngtcp2_rs; @@ -58,18 +59,20 @@ void ngtcp2_rs_init(ngtcp2_rs *rs); /* * ngtcp2_rst implements delivery rate estimation described in - * https://tools.ietf.org/html/draft-cheng-iccrg-delivery-rate-estimation-00 + * https://ietf-wg-ccwg.github.io/draft-cardwell-ccwg-bbr/draft-cardwell-ccwg-bbr.html */ typedef struct ngtcp2_rst { ngtcp2_rs rs; - ngtcp2_window_filter wf; uint64_t delivered; ngtcp2_tstamp delivered_ts; ngtcp2_tstamp first_sent_ts; uint64_t app_limited; - uint64_t next_round_delivered; - uint64_t round_count; uint64_t lost; + /* last_seq is the sequence number of packets across all packet + number spaces. If we would adopt single packet number sequence + across all packet number spaces, we can replace this with a + packet number. */ + int64_t last_seq; int is_cwnd_limited; } ngtcp2_rst; @@ -77,10 +80,9 @@ void ngtcp2_rst_init(ngtcp2_rst *rst); void ngtcp2_rst_on_pkt_sent(ngtcp2_rst *rst, ngtcp2_rtb_entry *ent, const ngtcp2_conn_stat *cstat); -void ngtcp2_rst_on_ack_recv(ngtcp2_rst *rst, ngtcp2_conn_stat *cstat, - uint64_t pkt_delivered); +void ngtcp2_rst_on_ack_recv(ngtcp2_rst *rst, ngtcp2_conn_stat *cstat); void ngtcp2_rst_update_rate_sample(ngtcp2_rst *rst, const ngtcp2_rtb_entry *ent, ngtcp2_tstamp ts); void ngtcp2_rst_update_app_limited(ngtcp2_rst *rst, ngtcp2_conn_stat *cstat); -#endif /* NGTCP2_RST_H */ +#endif /* !defined(NGTCP2_RST_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c index 5ebdce7d0e2715..4d417186e15854 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c @@ -38,7 +38,7 @@ #include "ngtcp2_tstamp.h" #include "ngtcp2_frame_chain.h" -ngtcp2_objalloc_def(rtb_entry, ngtcp2_rtb_entry, oplent); +ngtcp2_objalloc_def(rtb_entry, ngtcp2_rtb_entry, oplent) static void rtb_entry_init(ngtcp2_rtb_entry *ent, const ngtcp2_pkt_hd *hd, ngtcp2_frame_chain *frc, ngtcp2_tstamp ts, @@ -53,7 +53,6 @@ static void rtb_entry_init(ngtcp2_rtb_entry *ent, const ngtcp2_pkt_hd *hd, ent->lost_ts = UINT64_MAX; ent->pktlen = pktlen; ent->flags = flags; - ent->next = NULL; } int ngtcp2_rtb_entry_objalloc_new(ngtcp2_rtb_entry **pent, @@ -82,19 +81,14 @@ void ngtcp2_rtb_entry_objalloc_del(ngtcp2_rtb_entry *ent, ngtcp2_objalloc_rtb_entry_release(objalloc, ent); } -static int greater(const ngtcp2_ksl_key *lhs, const ngtcp2_ksl_key *rhs) { - return *(int64_t *)lhs > *(int64_t *)rhs; -} - -void ngtcp2_rtb_init(ngtcp2_rtb *rtb, ngtcp2_pktns_id pktns_id, - ngtcp2_strm *crypto, ngtcp2_rst *rst, ngtcp2_cc *cc, +void ngtcp2_rtb_init(ngtcp2_rtb *rtb, ngtcp2_rst *rst, ngtcp2_cc *cc, int64_t cc_pkt_num, ngtcp2_log *log, ngtcp2_qlog *qlog, ngtcp2_objalloc *rtb_entry_objalloc, ngtcp2_objalloc *frc_objalloc, const ngtcp2_mem *mem) { rtb->rtb_entry_objalloc = rtb_entry_objalloc; rtb->frc_objalloc = frc_objalloc; - ngtcp2_ksl_init(&rtb->ents, greater, sizeof(int64_t), mem); - rtb->crypto = crypto; + ngtcp2_ksl_init(&rtb->ents, ngtcp2_ksl_int64_greater, + ngtcp2_ksl_int64_greater_search, sizeof(int64_t), mem); rtb->rst = rst; rtb->cc = cc; rtb->log = log; @@ -105,7 +99,6 @@ void ngtcp2_rtb_init(ngtcp2_rtb *rtb, ngtcp2_pktns_id pktns_id, rtb->num_retransmittable = 0; rtb->num_pto_eliciting = 0; rtb->probe_pkt_left = 0; - rtb->pktns_id = pktns_id; rtb->cc_pkt_num = cc_pkt_num; rtb->cc_bytes_in_flight = 0; rtb->persistent_congestion_start_ts = UINT64_MAX; @@ -145,9 +138,11 @@ static void rtb_on_add(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, if (ent->flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING) { ++rtb->num_ack_eliciting; } + if (ent->flags & NGTCP2_RTB_ENTRY_FLAG_RETRANSMITTABLE) { ++rtb->num_retransmittable; } + if (ent->flags & NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING) { ++rtb->num_pto_eliciting; } @@ -209,10 +204,10 @@ static size_t rtb_on_remove(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, #define NGTCP2_RECLAIM_FLAG_ON_LOSS 0x01u /* - * rtb_reclaim_frame queues unacknowledged frames included in |ent| - * for retransmission. The re-queued frames are not deleted from - * |ent|. It returns the number of frames queued. |flags| is bitwise - * OR of 0 or more of NGTCP2_RECLAIM_FLAG_*. + * rtb_reclaim_frame copies and queues frames included in |ent| for + * retransmission. The frames are not deleted from |ent|. It returns + * the number of frames queued. |flags| is bitwise OR of 0 or more of + * NGTCP2_RECLAIM_FLAG_*. */ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, ngtcp2_conn *conn, ngtcp2_pktns *pktns, @@ -229,11 +224,13 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, /* TODO Reconsider the order of pfrc */ for (frc = ent->frc; frc; frc = frc->next) { fr = &frc->fr; + /* Check that a late ACK acknowledged this frame. */ if (frc->binder && (frc->binder->flags & NGTCP2_FRAME_CHAIN_BINDER_FLAG_ACK)) { continue; } + switch (frc->fr.type) { case NGTCP2_FRAME_STREAM: strm = ngtcp2_conn_find_stream(conn, fr->stream.stream_id); @@ -244,14 +241,15 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, gap = ngtcp2_strm_get_unacked_range_after(strm, fr->stream.offset); range.begin = fr->stream.offset; - range.end = fr->stream.offset + - ngtcp2_vec_len(fr->stream.data, fr->stream.datacnt); + range.end = + fr->stream.offset + ngtcp2_vec_len(fr->stream.data, fr->stream.datacnt); range = ngtcp2_range_intersect(&range, &gap); + if (ngtcp2_range_len(&range) == 0) { if (!fr->stream.fin) { /* 0 length STREAM frame with offset == 0 must be - retransmitted if no non-empty data is sent to this stream - and no data in this stream is acknowledged. */ + retransmitted if no non-empty data are sent to this + stream, and no data in this stream are acknowledged. */ if (fr->stream.offset != 0 || fr->stream.datacnt != 0 || strm->tx.offset || (strm->flags & NGTCP2_STRM_FLAG_ANY_ACKED)) { continue; @@ -268,7 +266,7 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, } rv = ngtcp2_frame_chain_stream_datacnt_objalloc_new( - &nfrc, fr->stream.datacnt, rtb->frc_objalloc, rtb->mem); + &nfrc, fr->stream.datacnt, rtb->frc_objalloc, rtb->mem); if (rv != 0) { return rv; } @@ -282,8 +280,10 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, ngtcp2_frame_chain_objalloc_del(nfrc, rtb->frc_objalloc, rtb->mem); return rv; } + if (!ngtcp2_strm_is_tx_queued(strm)) { strm->cycle = ngtcp2_conn_tx_strmq_first_cycle(conn); + rv = ngtcp2_conn_tx_strmq_push(conn, strm); if (rv != 0) { return rv; @@ -294,20 +294,22 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, continue; case NGTCP2_FRAME_CRYPTO: - /* Don't resend CRYPTO frame if the whole region it contains has - been acknowledged */ - gap = ngtcp2_strm_get_unacked_range_after(rtb->crypto, fr->stream.offset); + /* Do not resend CRYPTO frame if the whole region it contains + has been acknowledged */ + gap = ngtcp2_strm_get_unacked_range_after(&pktns->crypto.strm, + fr->stream.offset); range.begin = fr->stream.offset; - range.end = fr->stream.offset + - ngtcp2_vec_len(fr->stream.data, fr->stream.datacnt); + range.end = + fr->stream.offset + ngtcp2_vec_len(fr->stream.data, fr->stream.datacnt); range = ngtcp2_range_intersect(&range, &gap); + if (ngtcp2_range_len(&range) == 0) { continue; } rv = ngtcp2_frame_chain_stream_datacnt_objalloc_new( - &nfrc, fr->stream.datacnt, rtb->frc_objalloc, rtb->mem); + &nfrc, fr->stream.datacnt, rtb->frc_objalloc, rtb->mem); if (rv != 0) { return rv; } @@ -328,8 +330,8 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, continue; case NGTCP2_FRAME_NEW_TOKEN: rv = ngtcp2_frame_chain_new_token_objalloc_new( - &nfrc, fr->new_token.token, fr->new_token.tokenlen, rtb->frc_objalloc, - rtb->mem); + &nfrc, fr->new_token.token, fr->new_token.tokenlen, rtb->frc_objalloc, + rtb->mem); if (rv != 0) { return rv; } @@ -366,7 +368,7 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, case NGTCP2_FRAME_MAX_STREAM_DATA: strm = ngtcp2_conn_find_stream(conn, fr->max_stream_data.stream_id); if (strm == NULL || !ngtcp2_strm_require_retransmit_max_stream_data( - strm, &fr->max_stream_data)) { + strm, &fr->max_stream_data)) { continue; } @@ -374,7 +376,7 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, case NGTCP2_FRAME_STREAM_DATA_BLOCKED: strm = ngtcp2_conn_find_stream(conn, fr->stream_data_blocked.stream_id); if (strm == NULL || !ngtcp2_strm_require_retransmit_stream_data_blocked( - strm, &fr->stream_data_blocked)) { + strm, &fr->stream_data_blocked)) { continue; } @@ -423,6 +425,7 @@ static int conn_process_lost_datagram(ngtcp2_conn *conn, if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } + break; } } @@ -430,10 +433,9 @@ static int conn_process_lost_datagram(ngtcp2_conn *conn, return 0; } -static int rtb_on_pkt_lost(ngtcp2_rtb *rtb, ngtcp2_ksl_it *it, - ngtcp2_rtb_entry *ent, ngtcp2_conn_stat *cstat, - ngtcp2_conn *conn, ngtcp2_pktns *pktns, - ngtcp2_tstamp ts) { +static int rtb_on_pkt_lost(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, + ngtcp2_conn_stat *cstat, ngtcp2_conn *conn, + ngtcp2_pktns *pktns, ngtcp2_tstamp ts) { int rv; ngtcp2_ssize reclaimed; ngtcp2_cc *cc = rtb->cc; @@ -451,7 +453,7 @@ static int rtb_on_pkt_lost(ngtcp2_rtb *rtb, ngtcp2_ksl_it *it, } else if (rtb->cc->on_pkt_lost) { cc->on_pkt_lost(cc, cstat, ngtcp2_cc_pkt_init(&pkt, ent->hd.pkt_num, ent->pktlen, - rtb->pktns_id, ent->ts, ent->rst.lost, + pktns->id, ent->ts, ent->rst.lost, ent->rst.tx_in_flight, ent->rst.is_app_limited), ts); @@ -463,34 +465,25 @@ static int rtb_on_pkt_lost(ngtcp2_rtb *rtb, ngtcp2_ksl_it *it, ent->hd.pkt_num); assert(!(ent->flags & NGTCP2_RTB_ENTRY_FLAG_LOST_RETRANSMITTED)); assert(UINT64_MAX == ent->lost_ts); - - ent->flags |= NGTCP2_RTB_ENTRY_FLAG_LOST_RETRANSMITTED; - ent->lost_ts = ts; - - ++rtb->num_lost_pkts; - - ngtcp2_ksl_it_next(it); - - return 0; - } - - if (conn->callbacks.lost_datagram && - (ent->flags & NGTCP2_RTB_ENTRY_FLAG_DATAGRAM)) { - rv = conn_process_lost_datagram(conn, ent); - if (rv != 0) { - return rv; + } else { + if (conn->callbacks.lost_datagram && + (ent->flags & NGTCP2_RTB_ENTRY_FLAG_DATAGRAM)) { + rv = conn_process_lost_datagram(conn, ent); + if (rv != 0) { + return rv; + } } - } - if (ent->flags & NGTCP2_RTB_ENTRY_FLAG_RETRANSMITTABLE) { - assert(ent->frc); - assert(!(ent->flags & NGTCP2_RTB_ENTRY_FLAG_LOST_RETRANSMITTED)); - assert(UINT64_MAX == ent->lost_ts); + if (ent->flags & NGTCP2_RTB_ENTRY_FLAG_RETRANSMITTABLE) { + assert(ent->frc); + assert(!(ent->flags & NGTCP2_RTB_ENTRY_FLAG_LOST_RETRANSMITTED)); + assert(UINT64_MAX == ent->lost_ts); - reclaimed = + reclaimed = rtb_reclaim_frame(rtb, NGTCP2_RECLAIM_FLAG_ON_LOSS, conn, pktns, ent); - if (reclaimed < 0) { - return (int)reclaimed; + if (reclaimed < 0) { + return (int)reclaimed; + } } } @@ -499,8 +492,6 @@ static int rtb_on_pkt_lost(ngtcp2_rtb *rtb, ngtcp2_ksl_it *it, ++rtb->num_lost_pkts; - ngtcp2_ksl_it_next(it); - return 0; } @@ -518,7 +509,7 @@ int ngtcp2_rtb_add(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, return 0; } -ngtcp2_ksl_it ngtcp2_rtb_head(ngtcp2_rtb *rtb) { +ngtcp2_ksl_it ngtcp2_rtb_head(const ngtcp2_rtb *rtb) { return ngtcp2_ksl_begin(&rtb->ents); } @@ -566,22 +557,24 @@ static void conn_ack_crypto_data(ngtcp2_conn *conn, ngtcp2_pktns *pktns, return; } -static int rtb_process_acked_pkt(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, - ngtcp2_conn *conn) { +static int process_acked_pkt(ngtcp2_rtb_entry *ent, ngtcp2_conn *conn, + ngtcp2_pktns *pktns) { ngtcp2_frame_chain *frc; uint64_t prev_stream_offset, stream_offset; ngtcp2_strm *strm; int rv; uint64_t datalen; - ngtcp2_strm *crypto = rtb->crypto; - ngtcp2_pktns *pktns = NULL; + ngtcp2_strm *crypto = &pktns->crypto.strm; if ((ent->flags & NGTCP2_RTB_ENTRY_FLAG_PMTUD_PROBE) && conn->pmtud && conn->pmtud->tx_pkt_num <= ent->hd.pkt_num) { ngtcp2_pmtud_probe_success(conn->pmtud, ent->pktlen); - conn->dcid.current.max_udp_payload_size = - ngtcp2_max(conn->dcid.current.max_udp_payload_size, ent->pktlen); + if (conn->dcid.current.max_udp_payload_size < ent->pktlen) { + conn->dcid.current.max_udp_payload_size = ent->pktlen; + conn->cstat.max_tx_udp_payload_size = + ngtcp2_conn_get_path_max_tx_udp_payload_size(conn); + } if (ngtcp2_pmtud_finished(conn->pmtud)) { ngtcp2_conn_stop_pmtud(conn); @@ -611,23 +604,25 @@ static int rtb_process_acked_pkt(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, } prev_stream_offset = ngtcp2_strm_get_acked_offset(strm); + rv = ngtcp2_strm_ack_data( - strm, frc->fr.stream.offset, - ngtcp2_vec_len(frc->fr.stream.data, frc->fr.stream.datacnt)); + strm, frc->fr.stream.offset, + ngtcp2_vec_len(frc->fr.stream.data, frc->fr.stream.datacnt)); if (rv != 0) { return rv; } if (conn->callbacks.acked_stream_data_offset) { stream_offset = ngtcp2_strm_get_acked_offset(strm); + datalen = stream_offset - prev_stream_offset; if (datalen == 0 && !frc->fr.stream.fin) { break; } rv = conn->callbacks.acked_stream_data_offset( - conn, strm->stream_id, prev_stream_offset, datalen, conn->user_data, - strm->stream_user_data); + conn, strm->stream_id, prev_stream_offset, datalen, conn->user_data, + strm->stream_user_data); if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -637,36 +632,25 @@ static int rtb_process_acked_pkt(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, if (rv != 0) { return rv; } + break; case NGTCP2_FRAME_CRYPTO: prev_stream_offset = ngtcp2_strm_get_acked_offset(crypto); + rv = ngtcp2_strm_ack_data( - crypto, frc->fr.stream.offset, - ngtcp2_vec_len(frc->fr.stream.data, frc->fr.stream.datacnt)); + crypto, frc->fr.stream.offset, + ngtcp2_vec_len(frc->fr.stream.data, frc->fr.stream.datacnt)); if (rv != 0) { return rv; } stream_offset = ngtcp2_strm_get_acked_offset(crypto); + datalen = stream_offset - prev_stream_offset; if (datalen == 0) { break; } - switch (rtb->pktns_id) { - case NGTCP2_PKTNS_ID_INITIAL: - pktns = conn->in_pktns; - break; - case NGTCP2_PKTNS_ID_HANDSHAKE: - pktns = conn->hs_pktns; - break; - case NGTCP2_PKTNS_ID_APPLICATION: - pktns = &conn->pktns; - break; - default: - ngtcp2_unreachable(); - } - conn_ack_crypto_data(conn, pktns, datalen); break; @@ -675,11 +659,14 @@ static int rtb_process_acked_pkt(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, if (strm == NULL) { break; } + strm->flags |= NGTCP2_STRM_FLAG_RESET_STREAM_ACKED; + rv = ngtcp2_conn_close_stream_if_shut_rdwr(conn, strm); if (rv != 0) { return rv; } + break; case NGTCP2_FRAME_RETIRE_CONNECTION_ID: ngtcp2_conn_untrack_retired_dcid_seq(conn, @@ -702,14 +689,17 @@ static int rtb_process_acked_pkt(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } + break; } } + return 0; } static void rtb_on_pkt_acked(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, - ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts) { + ngtcp2_conn_stat *cstat, const ngtcp2_pktns *pktns, + ngtcp2_tstamp ts) { ngtcp2_cc *cc = rtb->cc; ngtcp2_cc_pkt pkt; @@ -718,7 +708,7 @@ static void rtb_on_pkt_acked(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, if (cc->on_pkt_acked) { cc->on_pkt_acked(cc, cstat, ngtcp2_cc_pkt_init(&pkt, ent->hd.pkt_num, ent->pktlen, - rtb->pktns_id, ent->ts, ent->rst.lost, + pktns->id, ent->ts, ent->rst.lost, ent->rst.tx_in_flight, ent->rst.is_app_limited), ts); @@ -745,12 +735,13 @@ static void conn_verify_ecn(ngtcp2_conn *conn, ngtcp2_pktns *pktns, pktns->rx.ecn.ack.ect1 > fr->ecn.ect1 || pktns->rx.ecn.ack.ce > fr->ecn.ce || (fr->ecn.ect0 - pktns->rx.ecn.ack.ect0) + - (fr->ecn.ce - pktns->rx.ecn.ack.ce) < - ecn_acked || + (fr->ecn.ce - pktns->rx.ecn.ack.ce) < + ecn_acked || fr->ecn.ect0 > pktns->tx.ecn.ect0 || fr->ecn.ect1))) { ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_CON, "path is not ECN capable"); conn->tx.ecn.state = NGTCP2_ECN_STATE_FAILED; + return; } @@ -762,7 +753,7 @@ static void conn_verify_ecn(ngtcp2_conn *conn, ngtcp2_pktns *pktns, if (fr->type == NGTCP2_FRAME_ACK_ECN) { if (cc->congestion_event && largest_pkt_sent_ts != UINT64_MAX && fr->ecn.ce > pktns->rx.ecn.ack.ce) { - cc->congestion_event(cc, cstat, largest_pkt_sent_ts, ts); + cc->congestion_event(cc, cstat, largest_pkt_sent_ts, 0, ts); } pktns->rx.ecn.ack.ect0 = fr->ecn.ect0; @@ -784,7 +775,7 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, size_t i; int rv; ngtcp2_ksl_it it; - ngtcp2_ssize num_acked = 0; + size_t num_acked = 0; ngtcp2_tstamp largest_pkt_sent_ts = UINT64_MAX; int64_t pkt_num; ngtcp2_cc *cc = rtb->cc; @@ -820,6 +811,7 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, conn_verify_ecn(conn, pktns, rtb->cc, cstat, fr, ecn_acked, largest_pkt_sent_ts, ts); } + return 0; } @@ -848,7 +840,7 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, ++num_acked; } - for (i = 0; i < fr->rangecnt;) { + for (i = 0; i < fr->rangecnt; ++i) { largest_ack = min_ack - (int64_t)fr->ranges[i].gap - 2; min_ack = largest_ack - (int64_t)fr->ranges[i].len; @@ -862,6 +854,7 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, if (pkt_num < min_ack) { break; } + ent = ngtcp2_ksl_it_get(&it); if (ent->flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING) { @@ -871,12 +864,11 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, rtb_remove(rtb, &it, &acked_ent, ent, cstat); ++num_acked; } - - ++i; } if (largest_pkt_sent_ts != UINT64_MAX && ack_eliciting_pkt_acked) { - cc_ack.rtt = pkt_ts - largest_pkt_sent_ts; + cc_ack.rtt = + ngtcp2_max_uint64(pkt_ts - largest_pkt_sent_ts, NGTCP2_NANOSECONDS); rv = ngtcp2_conn_update_rtt(conn, cc_ack.rtt, fr->ack_delay_unscaled, ts); if (rv == 0 && cc->new_rtt_sample) { @@ -891,7 +883,7 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, ++ecn_acked; } - rv = rtb_process_acked_pkt(rtb, ent, conn); + rv = process_acked_pkt(ent, conn, pktns); if (rv != 0) { goto fail; } @@ -903,7 +895,7 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, cc_ack.pkt_delivered = ent->rst.delivered; } - rtb_on_pkt_acked(rtb, ent, cstat, ts); + rtb_on_pkt_acked(rtb, ent, cstat, pktns, ts); acked_ent = ent->next; ngtcp2_rtb_entry_objalloc_del(ent, rtb->rtb_entry_objalloc, rtb->frc_objalloc, rtb->mem); @@ -916,7 +908,7 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, } else { /* For unit tests */ for (ent = acked_ent; ent; ent = acked_ent) { - rtb_on_pkt_acked(rtb, ent, cstat, ts); + rtb_on_pkt_acked(rtb, ent, cstat, pktns, ts); acked_ent = ent->next; ngtcp2_rtb_entry_objalloc_del(ent, rtb->rtb_entry_objalloc, rtb->frc_objalloc, rtb->mem); @@ -924,27 +916,29 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, } if (rtb->cc->on_spurious_congestion && num_lost_pkts && - rtb->num_lost_pkts - rtb->num_lost_pmtud_pkts == 0) { + rtb->num_lost_pkts == rtb->num_lost_pmtud_pkts) { rtb->cc->on_spurious_congestion(cc, cstat, ts); } - ngtcp2_rst_on_ack_recv(rtb->rst, cstat, cc_ack.pkt_delivered); + if (num_acked) { + ngtcp2_rst_on_ack_recv(rtb->rst, cstat); - if (conn && num_acked > 0) { - rv = rtb_detect_lost_pkt(rtb, &cc_ack.bytes_lost, conn, pktns, cstat, ts); - if (rv != 0) { - return rv; + if (conn) { + rv = rtb_detect_lost_pkt(rtb, &cc_ack.bytes_lost, conn, pktns, cstat, ts); + if (rv != 0) { + return rv; + } } } rtb->rst->lost += cc_ack.bytes_lost; cc_ack.largest_pkt_sent_ts = largest_pkt_sent_ts; - if (cc->on_ack_recv) { + if (num_acked && cc->on_ack_recv) { cc->on_ack_recv(cc, cstat, &cc_ack, ts); } - return num_acked; + return (ngtcp2_ssize)num_acked; fail: for (ent = acked_ent; ent; ent = acked_ent) { @@ -958,7 +952,8 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, static int rtb_pkt_lost(ngtcp2_rtb *rtb, ngtcp2_conn_stat *cstat, const ngtcp2_rtb_entry *ent, ngtcp2_duration loss_delay, - size_t pkt_thres, ngtcp2_tstamp ts) { + size_t pkt_thres, const ngtcp2_pktns *pktns, + ngtcp2_tstamp ts) { ngtcp2_tstamp loss_time; if (ngtcp2_tstamp_elapsed(ent->ts, loss_delay, ts) || @@ -966,27 +961,27 @@ static int rtb_pkt_lost(ngtcp2_rtb *rtb, ngtcp2_conn_stat *cstat, return 1; } - loss_time = cstat->loss_time[rtb->pktns_id]; + loss_time = cstat->loss_time[pktns->id]; if (loss_time == UINT64_MAX) { loss_time = ent->ts + loss_delay; } else { - loss_time = ngtcp2_min(loss_time, ent->ts + loss_delay); + loss_time = ngtcp2_min_uint64(loss_time, ent->ts + loss_delay); } - cstat->loss_time[rtb->pktns_id] = loss_time; + cstat->loss_time[pktns->id] = loss_time; return 0; } /* - * rtb_compute_pkt_loss_delay computes loss delay. + * compute_pkt_loss_delay computes loss delay. */ static ngtcp2_duration compute_pkt_loss_delay(const ngtcp2_conn_stat *cstat) { /* 9/8 is kTimeThreshold */ ngtcp2_duration loss_delay = - ngtcp2_max(cstat->latest_rtt, cstat->smoothed_rtt) * 9 / 8; - return ngtcp2_max(loss_delay, NGTCP2_GRANULARITY); + ngtcp2_max_uint64(cstat->latest_rtt, cstat->smoothed_rtt) * 9 / 8; + return ngtcp2_max_uint64(loss_delay, NGTCP2_GRANULARITY); } /* @@ -999,9 +994,9 @@ static int conn_all_ecn_pkt_lost(ngtcp2_conn *conn) { ngtcp2_pktns *pktns = &conn->pktns; return (!in_pktns || in_pktns->tx.ecn.validation_pkt_sent == - in_pktns->tx.ecn.validation_pkt_lost) && + in_pktns->tx.ecn.validation_pkt_lost) && (!hs_pktns || hs_pktns->tx.ecn.validation_pkt_sent == - hs_pktns->tx.ecn.validation_pkt_lost) && + hs_pktns->tx.ecn.validation_pkt_lost) && pktns->tx.ecn.validation_pkt_sent == pktns->tx.ecn.validation_pkt_lost; } @@ -1017,16 +1012,16 @@ static int rtb_detect_lost_pkt(ngtcp2_rtb *rtb, uint64_t *ppkt_lost, ngtcp2_cc *cc = rtb->cc; int rv; uint64_t pkt_thres = - rtb->cc_bytes_in_flight / cstat->max_tx_udp_payload_size / 2; + rtb->cc_bytes_in_flight / cstat->max_tx_udp_payload_size / 2; size_t ecn_pkt_lost = 0; ngtcp2_tstamp start_ts; ngtcp2_duration pto = ngtcp2_conn_compute_pto(conn, pktns); uint64_t bytes_lost = 0; ngtcp2_duration max_ack_delay; - pkt_thres = ngtcp2_max(pkt_thres, NGTCP2_PKT_THRESHOLD); - pkt_thres = ngtcp2_min(pkt_thres, 256); - cstat->loss_time[rtb->pktns_id] = UINT64_MAX; + pkt_thres = ngtcp2_max_uint64(pkt_thres, NGTCP2_PKT_THRESHOLD); + pkt_thres = ngtcp2_min_uint64(pkt_thres, 256); + cstat->loss_time[pktns->id] = UINT64_MAX; loss_delay = compute_pkt_loss_delay(cstat); it = ngtcp2_ksl_lower_bound(&rtb->ents, &rtb->largest_acked_tx_pkt_num); @@ -1037,25 +1032,27 @@ static int rtb_detect_lost_pkt(ngtcp2_rtb *rtb, uint64_t *ppkt_lost, break; } - if (rtb_pkt_lost(rtb, cstat, ent, loss_delay, (size_t)pkt_thres, ts)) { + if (rtb_pkt_lost(rtb, cstat, ent, loss_delay, (size_t)pkt_thres, pktns, + ts)) { /* All entries from ent are considered to be lost. */ latest_ts = oldest_ts = ent->ts; /* +1 to pick this packet for persistent congestion in the following loop. */ last_lost_pkt_num = ent->hd.pkt_num + 1; max_ack_delay = conn->remote.transport_params - ? conn->remote.transport_params->max_ack_delay - : 0; + ? conn->remote.transport_params->max_ack_delay + : 0; congestion_period = - (cstat->smoothed_rtt + - ngtcp2_max(4 * cstat->rttvar, NGTCP2_GRANULARITY) + max_ack_delay) * - NGTCP2_PERSISTENT_CONGESTION_THRESHOLD; + (cstat->smoothed_rtt + + ngtcp2_max_uint64(4 * cstat->rttvar, NGTCP2_GRANULARITY) + + max_ack_delay) * + NGTCP2_PERSISTENT_CONGESTION_THRESHOLD; - start_ts = ngtcp2_max(rtb->persistent_congestion_start_ts, - cstat->first_rtt_sample_ts); + start_ts = ngtcp2_max_uint64(rtb->persistent_congestion_start_ts, + cstat->first_rtt_sample_ts); - for (; !ngtcp2_ksl_it_end(&it);) { + for (; !ngtcp2_ksl_it_end(&it); ngtcp2_ksl_it_next(&it)) { ent = ngtcp2_ksl_it_get(&it); if (last_lost_pkt_num == ent->hd.pkt_num + 1 && ent->ts >= start_ts) { @@ -1066,12 +1063,12 @@ static int rtb_detect_lost_pkt(ngtcp2_rtb *rtb, uint64_t *ppkt_lost, } if ((ent->flags & NGTCP2_RTB_ENTRY_FLAG_LOST_RETRANSMITTED)) { - if (rtb->pktns_id != NGTCP2_PKTNS_ID_APPLICATION || + if (pktns->id != NGTCP2_PKTNS_ID_APPLICATION || last_lost_pkt_num == -1 || latest_ts - oldest_ts >= congestion_period) { break; } - ngtcp2_ksl_it_next(&it); + continue; } @@ -1081,7 +1078,7 @@ static int rtb_detect_lost_pkt(ngtcp2_rtb *rtb, uint64_t *ppkt_lost, } bytes_lost += rtb_on_remove(rtb, ent, cstat); - rv = rtb_on_pkt_lost(rtb, &it, ent, cstat, conn, pktns, ts); + rv = rtb_on_pkt_lost(rtb, ent, cstat, conn, pktns, ts); if (rv != 0) { return rv; } @@ -1098,13 +1095,16 @@ static int rtb_detect_lost_pkt(ngtcp2_rtb *rtb, uint64_t *ppkt_lost, if (conn->tx.ecn.validation_start_ts == UINT64_MAX) { break; } + if (ts - conn->tx.ecn.validation_start_ts < 3 * pto) { pktns->tx.ecn.validation_pkt_lost += ecn_pkt_lost; assert(pktns->tx.ecn.validation_pkt_sent >= pktns->tx.ecn.validation_pkt_lost); break; } + conn->tx.ecn.state = NGTCP2_ECN_STATE_UNKNOWN; + /* fall through */ case NGTCP2_ECN_STATE_UNKNOWN: pktns->tx.ecn.validation_pkt_lost += ecn_pkt_lost; @@ -1119,7 +1119,7 @@ static int rtb_detect_lost_pkt(ngtcp2_rtb *rtb, uint64_t *ppkt_lost, } if (cc->congestion_event) { - cc->congestion_event(cc, cstat, latest_ts, ts); + cc->congestion_event(cc, cstat, latest_ts, bytes_lost, ts); } loss_window = latest_ts - oldest_ts; @@ -1130,23 +1130,22 @@ static int rtb_detect_lost_pkt(ngtcp2_rtb *rtb, uint64_t *ppkt_lost, * persistent congestion there, then it is a lot easier to just * not enable it during handshake. */ - if (rtb->pktns_id == NGTCP2_PKTNS_ID_APPLICATION && loss_window > 0) { - if (loss_window >= congestion_period) { - ngtcp2_log_info(rtb->log, NGTCP2_LOG_EVENT_LDC, - "persistent congestion loss_window=%" PRIu64 - " congestion_period=%" PRIu64, - loss_window, congestion_period); - - /* Reset min_rtt, srtt, and rttvar here. Next new RTT - sample will be used to recalculate these values. */ - cstat->min_rtt = UINT64_MAX; - cstat->smoothed_rtt = conn->local.settings.initial_rtt; - cstat->rttvar = conn->local.settings.initial_rtt / 2; - cstat->first_rtt_sample_ts = UINT64_MAX; - - if (cc->on_persistent_congestion) { - cc->on_persistent_congestion(cc, cstat, ts); - } + if (pktns->id == NGTCP2_PKTNS_ID_APPLICATION && loss_window && + loss_window >= congestion_period) { + ngtcp2_log_info(rtb->log, NGTCP2_LOG_EVENT_LDC, + "persistent congestion loss_window=%" PRIu64 + " congestion_period=%" PRIu64, + loss_window, congestion_period); + + /* Reset min_rtt, srtt, and rttvar here. Next new RTT + sample will be used to recalculate these values. */ + cstat->min_rtt = UINT64_MAX; + cstat->smoothed_rtt = conn->local.settings.initial_rtt; + cstat->rttvar = conn->local.settings.initial_rtt / 2; + cstat->first_rtt_sample_ts = UINT64_MAX; + + if (cc->on_persistent_congestion) { + cc->on_persistent_congestion(cc, cstat, ts); } } @@ -1243,7 +1242,7 @@ void ngtcp2_rtb_remove_expired_lost_pkt(ngtcp2_rtb *rtb, ngtcp2_duration pto, } } -ngtcp2_tstamp ngtcp2_rtb_lost_pkt_ts(ngtcp2_rtb *rtb) { +ngtcp2_tstamp ngtcp2_rtb_lost_pkt_ts(const ngtcp2_rtb *rtb) { ngtcp2_ksl_it it; ngtcp2_rtb_entry *ent; @@ -1338,11 +1337,13 @@ static int rtb_on_pkt_lost_resched_move(ngtcp2_rtb *rtb, ngtcp2_conn *conn, ngtcp2_frame_chain_objalloc_del(frc, rtb->frc_objalloc, rtb->mem); break; } + rv = ngtcp2_strm_streamfrq_push(strm, frc); if (rv != 0) { ngtcp2_frame_chain_objalloc_del(frc, rtb->frc_objalloc, rtb->mem); return rv; } + if (!ngtcp2_strm_is_tx_queued(strm)) { strm->cycle = ngtcp2_conn_tx_strmq_first_cycle(conn); rv = ngtcp2_conn_tx_strmq_push(conn, strm); @@ -1350,6 +1351,7 @@ static int rtb_on_pkt_lost_resched_move(ngtcp2_rtb *rtb, ngtcp2_conn *conn, return rv; } } + break; case NGTCP2_FRAME_CRYPTO: frc = *pfrc; @@ -1363,6 +1365,7 @@ static int rtb_on_pkt_lost_resched_move(ngtcp2_rtb *rtb, ngtcp2_conn *conn, ngtcp2_frame_chain_objalloc_del(frc, rtb->frc_objalloc, rtb->mem); return rv; } + break; case NGTCP2_FRAME_DATAGRAM: case NGTCP2_FRAME_DATAGRAM_LEN: @@ -1379,6 +1382,7 @@ static int rtb_on_pkt_lost_resched_move(ngtcp2_rtb *rtb, ngtcp2_conn *conn, *pfrc = (*pfrc)->next; ngtcp2_frame_chain_objalloc_del(frc, rtb->frc_objalloc, rtb->mem); + break; default: pfrc = &(*pfrc)->next; @@ -1408,8 +1412,10 @@ int ngtcp2_rtb_remove_all(ngtcp2_rtb *rtb, ngtcp2_conn *conn, assert(0 == rv); rv = rtb_on_pkt_lost_resched_move(rtb, conn, pktns, ent); + ngtcp2_rtb_entry_objalloc_del(ent, rtb->rtb_entry_objalloc, rtb->frc_objalloc, rtb->mem); + if (rv != 0) { return rv; } @@ -1443,7 +1449,7 @@ void ngtcp2_rtb_remove_early_data(ngtcp2_rtb *rtb, ngtcp2_conn_stat *cstat) { } } -int ngtcp2_rtb_empty(ngtcp2_rtb *rtb) { +int ngtcp2_rtb_empty(const ngtcp2_rtb *rtb) { return ngtcp2_ksl_len(&rtb->ents) == 0; } @@ -1460,7 +1466,7 @@ ngtcp2_ssize ngtcp2_rtb_reclaim_on_pto(ngtcp2_rtb *rtb, ngtcp2_conn *conn, size_t atmost = num_pkts; it = ngtcp2_ksl_end(&rtb->ents); - for (; !ngtcp2_ksl_it_begin(&it) && num_pkts >= 1;) { + for (; !ngtcp2_ksl_it_begin(&it) && num_pkts;) { ngtcp2_ksl_it_prev(&it); ent = ngtcp2_ksl_it_get(&it); @@ -1473,13 +1479,13 @@ ngtcp2_ssize ngtcp2_rtb_reclaim_on_pto(ngtcp2_rtb *rtb, ngtcp2_conn *conn, assert(ent->frc); reclaimed = - rtb_reclaim_frame(rtb, NGTCP2_RECLAIM_FLAG_NONE, conn, pktns, ent); + rtb_reclaim_frame(rtb, NGTCP2_RECLAIM_FLAG_NONE, conn, pktns, ent); if (reclaimed < 0) { return reclaimed; } - /* Mark reclaimed even if reclaimed == 0 so that we can skip it in - the next run. */ + /* Mark ent reclaimed even if reclaimed == 0 so that we can skip + it in the next run. */ ent->flags |= NGTCP2_RTB_ENTRY_FLAG_PTO_RECLAIMED; assert(rtb->num_retransmittable); diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.h index a1ff208b19eac7..2ef772b2e14f4b 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -85,7 +85,7 @@ typedef struct ngtcp2_rtb_entry ngtcp2_rtb_entry; /* * ngtcp2_rtb_entry is an object stored in ngtcp2_rtb. It corresponds - * to the one packet which is waiting for its ACK. + * to the one packet which is waiting for its acknowledgement. */ struct ngtcp2_rtb_entry { union { @@ -98,10 +98,11 @@ struct ngtcp2_rtb_entry { uint8_t flags; } hd; ngtcp2_frame_chain *frc; - /* ts is the time point when a packet included in this entry is sent - to a peer. */ + /* ts is the time point when a packet included in this entry is + sent to a remote endpoint. */ ngtcp2_tstamp ts; - /* lost_ts is the time when this entry is marked lost. */ + /* lost_ts is the time when this entry is declared to be + lost. */ ngtcp2_tstamp lost_ts; /* pktlen is the length of QUIC packet */ size_t pktlen; @@ -111,6 +112,7 @@ struct ngtcp2_rtb_entry { ngtcp2_tstamp first_sent_ts; uint64_t tx_in_flight; uint64_t lost; + int64_t end_seq; int is_app_limited; } rst; /* flags is bitwise-OR of zero or more of @@ -122,11 +124,11 @@ struct ngtcp2_rtb_entry { }; }; -ngtcp2_objalloc_decl(rtb_entry, ngtcp2_rtb_entry, oplent); +ngtcp2_objalloc_decl(rtb_entry, ngtcp2_rtb_entry, oplent) /* - * ngtcp2_rtb_entry_new allocates ngtcp2_rtb_entry object, and assigns - * its pointer to |*pent|. + * ngtcp2_rtb_entry_objalloc_new allocates ngtcp2_rtb_entry object via + * |objalloc|, and assigns its pointer to |*pent|. */ int ngtcp2_rtb_entry_objalloc_new(ngtcp2_rtb_entry **pent, const ngtcp2_pkt_hd *hd, @@ -145,7 +147,7 @@ void ngtcp2_rtb_entry_objalloc_del(ngtcp2_rtb_entry *ent, const ngtcp2_mem *mem); /* - * ngtcp2_rtb tracks sent packets, and its ACK timeout for + * ngtcp2_rtb tracks sent packets, and its acknowledgement timeout for * retransmission. */ typedef struct ngtcp2_rtb { @@ -154,34 +156,33 @@ typedef struct ngtcp2_rtb { /* ents includes ngtcp2_rtb_entry sorted by decreasing order of packet number. */ ngtcp2_ksl ents; - /* crypto is CRYPTO stream. */ - ngtcp2_strm *crypto; ngtcp2_rst *rst; ngtcp2_cc *cc; ngtcp2_log *log; ngtcp2_qlog *qlog; const ngtcp2_mem *mem; /* largest_acked_tx_pkt_num is the largest packet number - acknowledged by the peer. */ + acknowledged by a remote endpoint. */ int64_t largest_acked_tx_pkt_num; - /* num_ack_eliciting is the number of ACK eliciting entries. */ + /* num_ack_eliciting is the number of ACK eliciting entries in + ents. */ size_t num_ack_eliciting; /* num_retransmittable is the number of packets which contain frames - that must be retransmitted on loss. */ + that must be retransmitted on loss in ents. */ size_t num_retransmittable; /* num_pto_eliciting is the number of packets that elicit PTO probe - packets. */ + packets in ents. */ size_t num_pto_eliciting; /* probe_pkt_left is the number of probe packet to send */ size_t probe_pkt_left; - /* pktns_id is the identifier of packet number space. */ - ngtcp2_pktns_id pktns_id; /* cc_pkt_num is the smallest packet number that is contributed to ngtcp2_conn_stat.bytes_in_flight. */ int64_t cc_pkt_num; /* cc_bytes_in_flight is the number of in-flight bytes that is contributed to ngtcp2_conn_stat.bytes_in_flight. It only - includes the bytes after congestion state is reset. */ + includes the bytes after congestion state is reset, that is only + count a packet whose packet number is greater than or equals to + cc_pkt_num. */ uint64_t cc_bytes_in_flight; /* persistent_congestion_start_ts is the time when persistent congestion evaluation is started. It happens roughly after @@ -199,8 +200,7 @@ typedef struct ngtcp2_rtb { /* * ngtcp2_rtb_init initializes |rtb|. */ -void ngtcp2_rtb_init(ngtcp2_rtb *rtb, ngtcp2_pktns_id pktns_id, - ngtcp2_strm *crypto, ngtcp2_rst *rst, ngtcp2_cc *cc, +void ngtcp2_rtb_init(ngtcp2_rtb *rtb, ngtcp2_rst *rst, ngtcp2_cc *cc, int64_t cc_pkt_num, ngtcp2_log *log, ngtcp2_qlog *qlog, ngtcp2_objalloc *rtb_entry_objalloc, ngtcp2_objalloc *frc_objalloc, const ngtcp2_mem *mem); @@ -227,13 +227,13 @@ int ngtcp2_rtb_add(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, * which has the largest packet number. If there is no entry, * returned value satisfies ngtcp2_ksl_it_end(&it) != 0. */ -ngtcp2_ksl_it ngtcp2_rtb_head(ngtcp2_rtb *rtb); +ngtcp2_ksl_it ngtcp2_rtb_head(const ngtcp2_rtb *rtb); /* - * ngtcp2_rtb_recv_ack removes acked ngtcp2_rtb_entry from |rtb|. - * |pkt_num| is a packet number which includes |fr|. |pkt_ts| is the - * timestamp when packet is received. |ts| should be the current - * time. Usually they are the same, but for buffered packets, + * ngtcp2_rtb_recv_ack removes an acknowledged ngtcp2_rtb_entry from + * |rtb|. |pkt_num| is a packet number which includes |fr|. |pkt_ts| + * is the timestamp when packet is received. |ts| should be the + * current time. Usually they are the same, but for buffered packets, * |pkt_ts| would be earlier than |ts|. * * This function returns the number of newly acknowledged packets if @@ -252,7 +252,7 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, /* * ngtcp2_rtb_detect_lost_pkt detects lost packets and prepends the * frames contained them to |*pfrc|. Even when this function fails, - * some frames might be prepended to |*pfrc| and the caller should + * some frames might be prepended to |*pfrc|, and the caller should * handle them. */ int ngtcp2_rtb_detect_lost_pkt(ngtcp2_rtb *rtb, ngtcp2_conn *conn, @@ -266,16 +266,16 @@ void ngtcp2_rtb_remove_expired_lost_pkt(ngtcp2_rtb *rtb, ngtcp2_duration pto, ngtcp2_tstamp ts); /* - * ngtcp2_rtb_lost_pkt_ts returns the earliest time when the still - * retained packet was lost. It returns UINT64_MAX if no such packet - * exists. + * ngtcp2_rtb_lost_pkt_ts returns the timestamp when an oldest lost + * packet tracked by |rtb| was declared lost. It returns UINT64_MAX + * if no such packet exists. */ -ngtcp2_tstamp ngtcp2_rtb_lost_pkt_ts(ngtcp2_rtb *rtb); +ngtcp2_tstamp ngtcp2_rtb_lost_pkt_ts(const ngtcp2_rtb *rtb); /* - * ngtcp2_rtb_remove_all removes all packets from |rtb| and prepends + * ngtcp2_rtb_remove_all removes all packets from |rtb|, and prepends * all frames to |*pfrc|. Even when this function fails, some frames - * might be prepended to |*pfrc| and the caller should handle them. + * might be prepended to |*pfrc|, and the caller should handle them. */ int ngtcp2_rtb_remove_all(ngtcp2_rtb *rtb, ngtcp2_conn *conn, ngtcp2_pktns *pktns, ngtcp2_conn_stat *cstat); @@ -286,9 +286,9 @@ int ngtcp2_rtb_remove_all(ngtcp2_rtb *rtb, ngtcp2_conn *conn, void ngtcp2_rtb_remove_early_data(ngtcp2_rtb *rtb, ngtcp2_conn_stat *cstat); /* - * ngtcp2_rtb_empty returns nonzero if |rtb| have no entry. + * ngtcp2_rtb_empty returns nonzero if |rtb| has no entry. */ -int ngtcp2_rtb_empty(ngtcp2_rtb *rtb); +int ngtcp2_rtb_empty(const ngtcp2_rtb *rtb); /* * ngtcp2_rtb_reset_cc_state resets congestion state in |rtb|. @@ -298,15 +298,15 @@ int ngtcp2_rtb_empty(ngtcp2_rtb *rtb); void ngtcp2_rtb_reset_cc_state(ngtcp2_rtb *rtb, int64_t cc_pkt_num); /* - * ngtcp2_rtb_remove_expired_lost_pkt ensures that the number of lost - * packets at most |n|. + * ngtcp2_rtb_remove_excessive_lost_pkt ensures that the number of + * lost packets is at most |n|. */ void ngtcp2_rtb_remove_excessive_lost_pkt(ngtcp2_rtb *rtb, size_t n); /* * ngtcp2_rtb_reclaim_on_pto reclaims up to |num_pkts| packets which - * are in-flight and not marked lost to send them in PTO probe. The - * reclaimed frames are chained to |*pfrc|. + * are in-flight and not marked lost. The reclaimed frames may be + * sent in a PTO probe packet. * * This function returns the number of packets reclaimed if it * succeeds, or one of the following negative error codes: @@ -317,4 +317,4 @@ void ngtcp2_rtb_remove_excessive_lost_pkt(ngtcp2_rtb *rtb, size_t n); ngtcp2_ssize ngtcp2_rtb_reclaim_on_pto(ngtcp2_rtb *rtb, ngtcp2_conn *conn, ngtcp2_pktns *pktns, size_t num_pkts); -#endif /* NGTCP2_RTB_H */ +#endif /* !defined(NGTCP2_RTB_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_settings.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_settings.c new file mode 100644 index 00000000000000..77a68bd112e3b2 --- /dev/null +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_settings.c @@ -0,0 +1,91 @@ +/* + * ngtcp2 + * + * Copyright (c) 2024 ngtcp2 contributors + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#include "ngtcp2_settings.h" + +#include +#include + +#include "ngtcp2_unreachable.h" + +void ngtcp2_settings_default_versioned(int settings_version, + ngtcp2_settings *settings) { + size_t len = ngtcp2_settingslen_version(settings_version); + + memset(settings, 0, len); + + switch (settings_version) { + case NGTCP2_SETTINGS_VERSION: + case NGTCP2_SETTINGS_V1: + settings->cc_algo = NGTCP2_CC_ALGO_CUBIC; + settings->initial_rtt = NGTCP2_DEFAULT_INITIAL_RTT; + settings->ack_thresh = 2; + settings->max_tx_udp_payload_size = 1500 - 48; + settings->handshake_timeout = UINT64_MAX; + + break; + } +} + +static void settings_copy(ngtcp2_settings *dest, const ngtcp2_settings *src, + int settings_version) { + assert(settings_version != NGTCP2_SETTINGS_VERSION); + + memcpy(dest, src, ngtcp2_settingslen_version(settings_version)); +} + +const ngtcp2_settings * +ngtcp2_settings_convert_to_latest(ngtcp2_settings *dest, int settings_version, + const ngtcp2_settings *src) { + if (settings_version == NGTCP2_SETTINGS_VERSION) { + return src; + } + + ngtcp2_settings_default(dest); + + settings_copy(dest, src, settings_version); + + return dest; +} + +void ngtcp2_settings_convert_to_old(int settings_version, ngtcp2_settings *dest, + const ngtcp2_settings *src) { + assert(settings_version != NGTCP2_SETTINGS_VERSION); + + settings_copy(dest, src, settings_version); +} + +size_t ngtcp2_settingslen_version(int settings_version) { + ngtcp2_settings settings; + + switch (settings_version) { + case NGTCP2_SETTINGS_VERSION: + return sizeof(settings); + case NGTCP2_SETTINGS_V1: + return offsetof(ngtcp2_settings, initial_pkt_num) + + sizeof(settings.initial_pkt_num); + default: + ngtcp2_unreachable(); + } +} diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_settings.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_settings.h new file mode 100644 index 00000000000000..80466d43e47a7a --- /dev/null +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_settings.h @@ -0,0 +1,73 @@ +/* + * ngtcp2 + * + * Copyright (c) 2024 ngtcp2 contributors + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef NGTCP2_SETTINGS_H +#define NGTCP2_SETTINGS_H + +#ifdef HAVE_CONFIG_H +# include +#endif /* defined(HAVE_CONFIG_H) */ + +#include + +/* + * ngtcp2_settings_convert_to_latest converts |src| of version + * |settings_version| to the latest version NGTCP2_SETTINGS_VERSION. + * + * |dest| must point to the latest version. |src| may be the older + * version, and if so, it may have fewer fields. Accessing those + * fields causes undefined behavior. + * + * If |settings_version| == NGTCP2_SETTINGS_VERSION, no conversion is + * made, and |src| is returned. Otherwise, first |dest| is + * initialized via ngtcp2_settings_default, and then all valid fields + * in |src| are copied into |dest|. Finally, |dest| is returned. + */ +const ngtcp2_settings * +ngtcp2_settings_convert_to_latest(ngtcp2_settings *dest, int settings_version, + const ngtcp2_settings *src); + +/* + * ngtcp2_settings_convert_to_old converts |src| of the latest version + * to |dest| of version |settings_version|. + * + * |settings_version| must not be the latest version + * NGTCP2_SETTINGS_VERSION. + * + * |dest| points to the older version, and it may have fewer fields. + * Accessing those fields causes undefined behavior. + * + * This function copies all valid fields in version |settings_version| + * from |src| to |dest|. + */ +void ngtcp2_settings_convert_to_old(int settings_version, ngtcp2_settings *dest, + const ngtcp2_settings *src); + +/* + * ngtcp2_settingslen_version returns the effective length of + * ngtcp2_settings at the version |settings_version|. + */ +size_t ngtcp2_settingslen_version(int settings_version); + +#endif /* !defined(NGTCP2_SETTINGS_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_str.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_str.h index deb75e356d70d4..f970c153e805a8 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_str.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_str.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -91,4 +91,4 @@ char *ngtcp2_encode_printable_ascii(char *dest, const uint8_t *data, */ int ngtcp2_cmemeq(const uint8_t *a, const uint8_t *b, size_t n); -#endif /* NGTCP2_STR_H */ +#endif /* !defined(NGTCP2_STR_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.c index c00e86fa8c1afa..8ea969c4addbdc 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.c @@ -32,16 +32,13 @@ #include "ngtcp2_vec.h" #include "ngtcp2_frame_chain.h" -static int offset_less(const ngtcp2_ksl_key *lhs, const ngtcp2_ksl_key *rhs) { - return *(int64_t *)lhs < *(int64_t *)rhs; -} - void ngtcp2_strm_init(ngtcp2_strm *strm, int64_t stream_id, uint32_t flags, uint64_t max_rx_offset, uint64_t max_tx_offset, void *stream_user_data, ngtcp2_objalloc *frc_objalloc, const ngtcp2_mem *mem) { - strm->frc_objalloc = frc_objalloc; + strm->pe.index = NGTCP2_PQ_BAD_INDEX; strm->cycle = 0; + strm->frc_objalloc = frc_objalloc; strm->tx.acked_offset = NULL; strm->tx.cont_acked_offset = 0; strm->tx.streamfrq = NULL; @@ -56,13 +53,12 @@ void ngtcp2_strm_init(ngtcp2_strm *strm, int64_t stream_id, uint32_t flags, strm->rx.rob = NULL; strm->rx.cont_offset = 0; strm->rx.last_offset = 0; + strm->rx.max_offset = strm->rx.unsent_max_offset = strm->rx.window = + max_rx_offset; + strm->mem = mem; strm->stream_id = stream_id; - strm->flags = flags; strm->stream_user_data = stream_user_data; - strm->rx.window = strm->rx.max_offset = strm->rx.unsent_max_offset = - max_rx_offset; - strm->pe.index = NGTCP2_PQ_BAD_INDEX; - strm->mem = mem; + strm->flags = flags; strm->app_error_code = 0; } @@ -114,7 +110,7 @@ static int strm_rob_init(ngtcp2_strm *strm) { return 0; } -uint64_t ngtcp2_strm_rx_offset(ngtcp2_strm *strm) { +uint64_t ngtcp2_strm_rx_offset(const ngtcp2_strm *strm) { if (strm->rx.rob == NULL) { return strm->rx.cont_offset; } @@ -123,7 +119,7 @@ uint64_t ngtcp2_strm_rx_offset(ngtcp2_strm *strm) { /* strm_rob_heavily_fragmented returns nonzero if the number of gaps in |rob| exceeds the limit. */ -static int strm_rob_heavily_fragmented(ngtcp2_rob *rob) { +static int strm_rob_heavily_fragmented(const ngtcp2_rob *rob) { return ngtcp2_ksl_len(&rob->gapksl) >= 5000; } @@ -180,7 +176,8 @@ static int strm_streamfrq_init(ngtcp2_strm *strm) { return NGTCP2_ERR_NOMEM; } - ngtcp2_ksl_init(streamfrq, offset_less, sizeof(uint64_t), strm->mem); + ngtcp2_ksl_init(streamfrq, ngtcp2_ksl_uint64_less, + ngtcp2_ksl_uint64_less_search, sizeof(uint64_t), strm->mem); strm->tx.streamfrq = streamfrq; @@ -271,6 +268,7 @@ static int strm_streamfrq_unacked_pop(ngtcp2_strm *strm, } ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); + continue; } @@ -306,11 +304,12 @@ static int strm_streamfrq_unacked_pop(ngtcp2_strm *strm, fr->data[0].len -= (size_t)base_offset; *pfrc = frc; + return 0; } rv = ngtcp2_frame_chain_stream_datacnt_objalloc_new( - &nfrc, fr->datacnt - end_idx, strm->frc_objalloc, strm->mem); + &nfrc, fr->datacnt - end_idx, strm->frc_objalloc, strm->mem); if (rv != 0) { ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); return rv; @@ -336,6 +335,7 @@ static int strm_streamfrq_unacked_pop(ngtcp2_strm *strm, assert(ngtcp2_err_is_fatal(rv)); ngtcp2_frame_chain_objalloc_del(nfrc, strm->frc_objalloc, strm->mem); ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); + return rv; } @@ -350,14 +350,17 @@ static int strm_streamfrq_unacked_pop(ngtcp2_strm *strm, fr->fin = 0; fr->offset = offset + base_offset; fr->datacnt = end_idx - idx; + if (end_base_offset) { assert(fr->data[fr->datacnt - 1].len > end_base_offset); fr->data[fr->datacnt - 1].len = (size_t)end_base_offset; } + fr->data[0].base += base_offset; fr->data[0].len -= (size_t)base_offset; *pfrc = frc; + return 0; } @@ -367,7 +370,7 @@ static int strm_streamfrq_unacked_pop(ngtcp2_strm *strm, int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, size_t left) { ngtcp2_stream *fr, *nfr; - ngtcp2_frame_chain *frc, *nfrc; + ngtcp2_frame_chain *frc, *nfrc, *sfrc; int rv; size_t nmerged; uint64_t datalen; @@ -385,6 +388,7 @@ int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, if (rv != 0) { return rv; } + if (frc == NULL) { *pfrc = NULL; return 0; @@ -401,7 +405,9 @@ int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); return rv; } + *pfrc = NULL; + return 0; } @@ -410,13 +416,13 @@ int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, acnt = fr->datacnt; bcnt = 0; - ngtcp2_vec_split(a, &acnt, b, &bcnt, left, NGTCP2_MAX_STREAM_DATACNT); + ngtcp2_vec_split(b, &bcnt, a, &acnt, left, NGTCP2_MAX_STREAM_DATACNT); assert(acnt > 0); assert(bcnt > 0); rv = ngtcp2_frame_chain_stream_datacnt_objalloc_new( - &nfrc, bcnt, strm->frc_objalloc, strm->mem); + &nfrc, bcnt, strm->frc_objalloc, strm->mem); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); @@ -437,11 +443,12 @@ int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, assert(ngtcp2_err_is_fatal(rv)); ngtcp2_frame_chain_objalloc_del(nfrc, strm->frc_objalloc, strm->mem); ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); + return rv; } rv = ngtcp2_frame_chain_stream_datacnt_objalloc_new( - &nfrc, acnt, strm->frc_objalloc, strm->mem); + &nfrc, acnt, strm->frc_objalloc, strm->mem); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); @@ -491,7 +498,9 @@ int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, break; } - nmerged = ngtcp2_vec_merge(a, &acnt, nfr->data, &nfr->datacnt, left, + bcnt = nfr->datacnt; + + nmerged = ngtcp2_vec_merge(a, &acnt, nfr->data, &bcnt, left, NGTCP2_MAX_STREAM_DATACNT); if (nmerged == 0) { rv = ngtcp2_ksl_insert(strm->tx.streamfrq, NULL, &nfr->offset, nfrc); @@ -499,27 +508,56 @@ int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, assert(ngtcp2_err_is_fatal(rv)); ngtcp2_frame_chain_objalloc_del(nfrc, strm->frc_objalloc, strm->mem); ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); + return rv; } + break; } datalen += nmerged; left -= nmerged; - if (nfr->datacnt == 0) { + if (bcnt == 0) { fr->fin = nfr->fin; ngtcp2_frame_chain_objalloc_del(nfrc, strm->frc_objalloc, strm->mem); continue; } - nfr->offset += nmerged; + if (nfr->datacnt <= NGTCP2_FRAME_CHAIN_STREAM_DATACNT_THRES || + bcnt > NGTCP2_FRAME_CHAIN_STREAM_DATACNT_THRES) { + nfr->offset += nmerged; + nfr->datacnt = bcnt; + + rv = ngtcp2_ksl_insert(strm->tx.streamfrq, NULL, &nfr->offset, nfrc); + if (rv != 0) { + ngtcp2_frame_chain_objalloc_del(nfrc, strm->frc_objalloc, strm->mem); + ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); + return rv; + } + } else { + rv = ngtcp2_frame_chain_stream_datacnt_objalloc_new( + &sfrc, bcnt, strm->frc_objalloc, strm->mem); + if (rv != 0) { + ngtcp2_frame_chain_objalloc_del(nfrc, strm->frc_objalloc, strm->mem); + ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); + return rv; + } + + sfrc->fr.stream = nfrc->fr.stream; + sfrc->fr.stream.offset += nmerged; + sfrc->fr.stream.datacnt = bcnt; + ngtcp2_vec_copy(sfrc->fr.stream.data, nfrc->fr.stream.data, bcnt); - rv = ngtcp2_ksl_insert(strm->tx.streamfrq, NULL, &nfr->offset, nfrc); - if (rv != 0) { ngtcp2_frame_chain_objalloc_del(nfrc, strm->frc_objalloc, strm->mem); - ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); - return rv; + + rv = ngtcp2_ksl_insert(strm->tx.streamfrq, NULL, &sfrc->fr.stream.offset, + sfrc); + if (rv != 0) { + ngtcp2_frame_chain_objalloc_del(sfrc, strm->frc_objalloc, strm->mem); + ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); + return rv; + } } break; @@ -531,13 +569,14 @@ int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, } *pfrc = frc; + return 0; } assert(acnt > fr->datacnt); rv = ngtcp2_frame_chain_stream_datacnt_objalloc_new( - &nfrc, acnt, strm->frc_objalloc, strm->mem); + &nfrc, acnt, strm->frc_objalloc, strm->mem); if (rv != 0) { ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); return rv; @@ -555,7 +594,7 @@ int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, return 0; } -uint64_t ngtcp2_strm_streamfrq_unacked_offset(ngtcp2_strm *strm) { +uint64_t ngtcp2_strm_streamfrq_unacked_offset(const ngtcp2_strm *strm) { ngtcp2_frame_chain *frc; ngtcp2_stream *fr; ngtcp2_range gap; @@ -577,9 +616,11 @@ uint64_t ngtcp2_strm_streamfrq_unacked_offset(ngtcp2_strm *strm) { if (gap.begin <= fr->offset) { return fr->offset; } + if (gap.begin < fr->offset + datalen) { return gap.begin; } + if (fr->offset + datalen == gap.begin && fr->fin && !(strm->flags & NGTCP2_STRM_FLAG_FIN_ACKED)) { return fr->offset + datalen; @@ -589,17 +630,18 @@ uint64_t ngtcp2_strm_streamfrq_unacked_offset(ngtcp2_strm *strm) { return (uint64_t)-1; } -ngtcp2_frame_chain *ngtcp2_strm_streamfrq_top(ngtcp2_strm *strm) { +ngtcp2_frame_chain *ngtcp2_strm_streamfrq_top(const ngtcp2_strm *strm) { ngtcp2_ksl_it it; assert(strm->tx.streamfrq); assert(ngtcp2_ksl_len(strm->tx.streamfrq)); it = ngtcp2_ksl_begin(strm->tx.streamfrq); + return ngtcp2_ksl_it_get(&it); } -int ngtcp2_strm_streamfrq_empty(ngtcp2_strm *strm) { +int ngtcp2_strm_streamfrq_empty(const ngtcp2_strm *strm) { return strm->tx.streamfrq == NULL || ngtcp2_ksl_len(strm->tx.streamfrq) == 0; } @@ -616,14 +658,15 @@ void ngtcp2_strm_streamfrq_clear(ngtcp2_strm *strm) { frc = ngtcp2_ksl_it_get(&it); ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); } + ngtcp2_ksl_clear(strm->tx.streamfrq); } -int ngtcp2_strm_is_tx_queued(ngtcp2_strm *strm) { +int ngtcp2_strm_is_tx_queued(const ngtcp2_strm *strm) { return strm->pe.index != NGTCP2_PQ_BAD_INDEX; } -int ngtcp2_strm_is_all_tx_data_acked(ngtcp2_strm *strm) { +int ngtcp2_strm_is_all_tx_data_acked(const ngtcp2_strm *strm) { if (strm->tx.acked_offset == NULL) { return strm->tx.cont_acked_offset == strm->tx.offset; } @@ -632,12 +675,12 @@ int ngtcp2_strm_is_all_tx_data_acked(ngtcp2_strm *strm) { strm->tx.offset; } -int ngtcp2_strm_is_all_tx_data_fin_acked(ngtcp2_strm *strm) { +int ngtcp2_strm_is_all_tx_data_fin_acked(const ngtcp2_strm *strm) { return (strm->flags & NGTCP2_STRM_FLAG_FIN_ACKED) && ngtcp2_strm_is_all_tx_data_acked(strm); } -ngtcp2_range ngtcp2_strm_get_unacked_range_after(ngtcp2_strm *strm, +ngtcp2_range ngtcp2_strm_get_unacked_range_after(const ngtcp2_strm *strm, uint64_t offset) { ngtcp2_range gap; @@ -650,7 +693,7 @@ ngtcp2_range ngtcp2_strm_get_unacked_range_after(ngtcp2_strm *strm, return ngtcp2_gaptr_get_first_gap_after(strm->tx.acked_offset, offset); } -uint64_t ngtcp2_strm_get_acked_offset(ngtcp2_strm *strm) { +uint64_t ngtcp2_strm_get_acked_offset(const ngtcp2_strm *strm) { if (strm->tx.acked_offset == NULL) { return strm->tx.cont_acked_offset; } @@ -660,7 +703,7 @@ uint64_t ngtcp2_strm_get_acked_offset(ngtcp2_strm *strm) { static int strm_acked_offset_init(ngtcp2_strm *strm) { ngtcp2_gaptr *acked_offset = - ngtcp2_mem_malloc(strm->mem, sizeof(*acked_offset)); + ngtcp2_mem_malloc(strm->mem, sizeof(*acked_offset)); if (acked_offset == NULL) { return NGTCP2_ERR_NOMEM; @@ -688,7 +731,7 @@ int ngtcp2_strm_ack_data(ngtcp2_strm *strm, uint64_t offset, uint64_t len) { } rv = - ngtcp2_gaptr_push(strm->tx.acked_offset, 0, strm->tx.cont_acked_offset); + ngtcp2_gaptr_push(strm->tx.acked_offset, 0, strm->tx.cont_acked_offset); if (rv != 0) { return rv; } @@ -709,24 +752,24 @@ void ngtcp2_strm_set_app_error_code(ngtcp2_strm *strm, strm->app_error_code = app_error_code; } -int ngtcp2_strm_require_retransmit_reset_stream(ngtcp2_strm *strm) { +int ngtcp2_strm_require_retransmit_reset_stream(const ngtcp2_strm *strm) { return !ngtcp2_strm_is_all_tx_data_fin_acked(strm); } -int ngtcp2_strm_require_retransmit_stop_sending(ngtcp2_strm *strm) { +int ngtcp2_strm_require_retransmit_stop_sending(const ngtcp2_strm *strm) { return !(strm->flags & NGTCP2_STRM_FLAG_SHUT_RD) || ngtcp2_strm_rx_offset(strm) != strm->rx.last_offset; } -int ngtcp2_strm_require_retransmit_max_stream_data(ngtcp2_strm *strm, - ngtcp2_max_stream_data *fr) { +int ngtcp2_strm_require_retransmit_max_stream_data( + const ngtcp2_strm *strm, const ngtcp2_max_stream_data *fr) { return fr->max_stream_data == strm->rx.max_offset && !(strm->flags & (NGTCP2_STRM_FLAG_SHUT_RD | NGTCP2_STRM_FLAG_STOP_SENDING)); } int ngtcp2_strm_require_retransmit_stream_data_blocked( - ngtcp2_strm *strm, ngtcp2_stream_data_blocked *fr) { + const ngtcp2_strm *strm, const ngtcp2_stream_data_blocked *fr) { return fr->offset == strm->tx.max_offset && !(strm->flags & NGTCP2_STRM_FLAG_SHUT_WR); } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h index 385302a5eafa9f..c72f8b9dc89aca 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -86,6 +86,9 @@ typedef struct ngtcp2_frame_chain ngtcp2_frame_chain; received from the remote endpoint. In this case, NGTCP2_STRM_FLAG_SHUT_WR is also set. */ #define NGTCP2_STRM_FLAG_STOP_SENDING_RECVED 0x800u +/* NGTCP2_STRM_FLAG_ANY_SENT indicates that any STREAM frame, + including empty one, has been sent. */ +#define NGTCP2_STRM_FLAG_ANY_SENT 0x1000u typedef struct ngtcp2_strm ngtcp2_strm; @@ -105,13 +108,13 @@ struct ngtcp2_strm { acked_offset is used instead. */ uint64_t cont_acked_offset; /* streamfrq contains STREAM or CRYPTO frame for - retransmission. The flow control credits have been paid - when they are transmitted first time. There are no + retransmission. The flow control credits have already been + paid when they are transmitted first time. There are no restriction regarding flow control for retransmission. */ ngtcp2_ksl *streamfrq; - /* offset is the next offset of outgoing data. In other words, it - is the number of bytes sent in this stream without - duplication. */ + /* offset is the next offset of new outgoing data. In other + words, it is the number of bytes sent in this stream + without duplication. */ uint64_t offset; /* max_tx_offset is the maximum offset that local endpoint can send for this stream. */ @@ -200,7 +203,7 @@ void ngtcp2_strm_free(ngtcp2_strm *strm); * ngtcp2_strm_rx_offset returns the minimum offset of stream data * which is not received yet. */ -uint64_t ngtcp2_strm_rx_offset(ngtcp2_strm *strm); +uint64_t ngtcp2_strm_rx_offset(const ngtcp2_strm *strm); /* * ngtcp2_strm_recv_reordering handles reordered data. @@ -215,8 +218,8 @@ int ngtcp2_strm_recv_reordering(ngtcp2_strm *strm, const uint8_t *data, size_t datalen, uint64_t offset); /* - * ngtcp2_strm_update_rx_offset tells that data up to offset bytes are - * received in order. + * ngtcp2_strm_update_rx_offset tells that data up to |offset| bytes + * are received in order. */ void ngtcp2_strm_update_rx_offset(ngtcp2_strm *strm, uint64_t offset); @@ -227,13 +230,14 @@ void ngtcp2_strm_update_rx_offset(ngtcp2_strm *strm, uint64_t offset); void ngtcp2_strm_discard_reordered_data(ngtcp2_strm *strm); /* - * ngtcp2_strm_shutdown shutdowns |strm|. |flags| should be - * NGTCP2_STRM_FLAG_SHUT_RD, and/or NGTCP2_STRM_FLAG_SHUT_WR. + * ngtcp2_strm_shutdown shutdowns |strm|. |flags| should be one of + * NGTCP2_STRM_FLAG_SHUT_RD, NGTCP2_STRM_FLAG_SHUT_WR, and + * NGTCP2_STRM_FLAG_SHUT_RDWR. */ void ngtcp2_strm_shutdown(ngtcp2_strm *strm, uint32_t flags); /* - * ngtcp2_strm_streamfrq_push pushes |frc| to streamfrq for + * ngtcp2_strm_streamfrq_push pushes |frc| to strm->tx.streamfrq for * retransmission. * * This function returns 0 if it succeeds, or one of the following @@ -245,11 +249,12 @@ void ngtcp2_strm_shutdown(ngtcp2_strm *strm, uint32_t flags); int ngtcp2_strm_streamfrq_push(ngtcp2_strm *strm, ngtcp2_frame_chain *frc); /* - * ngtcp2_strm_streamfrq_pop pops the first ngtcp2_frame_chain and - * assigns it to |*pfrc|. This function splits into or merges several - * ngtcp2_frame_chain objects so that the returned ngtcp2_frame_chain - * has at most |left| data length. If there is no frames to send, - * this function returns 0 and |*pfrc| is NULL. + * ngtcp2_strm_streamfrq_pop assigns a ngtcp2_frame_chain that only + * contains unacknowledged stream data with smallest offset to |*pfrc| + * for retransmission. The assigned ngtcp2_frame_chain has stream + * data at most |left| bytes. strm->tx.streamfrq is adjusted to + * exclude the portion of data included in it. If there is no stream + * data to send, this function returns 0 and |*pfrc| is NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -264,18 +269,18 @@ int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, * ngtcp2_strm_streamfrq_unacked_offset returns the smallest offset of * unacknowledged stream data held in strm->tx.streamfrq. */ -uint64_t ngtcp2_strm_streamfrq_unacked_offset(ngtcp2_strm *strm); +uint64_t ngtcp2_strm_streamfrq_unacked_offset(const ngtcp2_strm *strm); /* * ngtcp2_strm_streamfrq_top returns the first ngtcp2_frame_chain. * The queue must not be empty. */ -ngtcp2_frame_chain *ngtcp2_strm_streamfrq_top(ngtcp2_strm *strm); +ngtcp2_frame_chain *ngtcp2_strm_streamfrq_top(const ngtcp2_strm *strm); /* * ngtcp2_strm_streamfrq_empty returns nonzero if streamfrq is empty. */ -int ngtcp2_strm_streamfrq_empty(ngtcp2_strm *strm); +int ngtcp2_strm_streamfrq_empty(const ngtcp2_strm *strm); /* * ngtcp2_strm_streamfrq_clear removes all frames from streamfrq. @@ -285,26 +290,26 @@ void ngtcp2_strm_streamfrq_clear(ngtcp2_strm *strm); /* * ngtcp2_strm_is_tx_queued returns nonzero if |strm| is queued. */ -int ngtcp2_strm_is_tx_queued(ngtcp2_strm *strm); +int ngtcp2_strm_is_tx_queued(const ngtcp2_strm *strm); /* * ngtcp2_strm_is_all_tx_data_acked returns nonzero if all outgoing * data for |strm| which have sent so far have been acknowledged. */ -int ngtcp2_strm_is_all_tx_data_acked(ngtcp2_strm *strm); +int ngtcp2_strm_is_all_tx_data_acked(const ngtcp2_strm *strm); /* * ngtcp2_strm_is_all_tx_data_fin_acked behaves like * ngtcp2_strm_is_all_tx_data_acked, but it also requires that STREAM * frame with fin bit set is acknowledged. */ -int ngtcp2_strm_is_all_tx_data_fin_acked(ngtcp2_strm *strm); +int ngtcp2_strm_is_all_tx_data_fin_acked(const ngtcp2_strm *strm); /* * ngtcp2_strm_get_unacked_range_after returns the range that is not - * acknowledged yet and intersects or comes after |offset|. + * acknowledged yet and includes or comes after |offset|. */ -ngtcp2_range ngtcp2_strm_get_unacked_range_after(ngtcp2_strm *strm, +ngtcp2_range ngtcp2_strm_get_unacked_range_after(const ngtcp2_strm *strm, uint64_t offset); /* @@ -312,11 +317,11 @@ ngtcp2_range ngtcp2_strm_get_unacked_range_after(ngtcp2_strm *strm, * this offset have been acknowledged by a remote endpoint. It * returns 0 if no data is acknowledged. */ -uint64_t ngtcp2_strm_get_acked_offset(ngtcp2_strm *strm); +uint64_t ngtcp2_strm_get_acked_offset(const ngtcp2_strm *strm); /* - * ngtcp2_strm_ack_data tells |strm| that the data [offset, - * offset+len) is acknowledged by a remote endpoint. + * ngtcp2_strm_ack_data tells |strm| that the data [|offset|, |offset| + * + |len|) is acknowledged by a remote endpoint. */ int ngtcp2_strm_ack_data(ngtcp2_strm *strm, uint64_t offset, uint64_t len); @@ -331,26 +336,26 @@ void ngtcp2_strm_set_app_error_code(ngtcp2_strm *strm, uint64_t app_error_code); * ngtcp2_strm_require_retransmit_reset_stream returns nonzero if * RESET_STREAM frame should be retransmitted. */ -int ngtcp2_strm_require_retransmit_reset_stream(ngtcp2_strm *strm); +int ngtcp2_strm_require_retransmit_reset_stream(const ngtcp2_strm *strm); /* * ngtcp2_strm_require_retransmit_stop_sending returns nonzero if * STOP_SENDING frame should be retransmitted. */ -int ngtcp2_strm_require_retransmit_stop_sending(ngtcp2_strm *strm); +int ngtcp2_strm_require_retransmit_stop_sending(const ngtcp2_strm *strm); /* * ngtcp2_strm_require_retransmit_max_stream_data returns nonzero if * MAX_STREAM_DATA frame should be retransmitted. */ -int ngtcp2_strm_require_retransmit_max_stream_data(ngtcp2_strm *strm, - ngtcp2_max_stream_data *fr); +int ngtcp2_strm_require_retransmit_max_stream_data( + const ngtcp2_strm *strm, const ngtcp2_max_stream_data *fr); /* * ngtcp2_strm_require_retransmit_stream_data_blocked returns nonzero * if STREAM_DATA_BLOCKED frame frame should be retransmitted. */ int ngtcp2_strm_require_retransmit_stream_data_blocked( - ngtcp2_strm *strm, ngtcp2_stream_data_blocked *fr); + const ngtcp2_strm *strm, const ngtcp2_stream_data_blocked *fr); -#endif /* NGTCP2_STRM_H */ +#endif /* !defined(NGTCP2_STRM_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_transport_params.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_transport_params.c new file mode 100644 index 00000000000000..dda59c48858185 --- /dev/null +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_transport_params.c @@ -0,0 +1,886 @@ +/* + * ngtcp2 + * + * Copyright (c) 2023 ngtcp2 contributors + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#include "ngtcp2_transport_params.h" + +#include +#include + +#include "ngtcp2_conv.h" +#include "ngtcp2_str.h" +#include "ngtcp2_mem.h" +#include "ngtcp2_unreachable.h" + +void ngtcp2_transport_params_default_versioned( + int transport_params_version, ngtcp2_transport_params *params) { + size_t len; + + switch (transport_params_version) { + case NGTCP2_TRANSPORT_PARAMS_VERSION: + len = sizeof(*params); + + break; + default: + ngtcp2_unreachable(); + } + + memset(params, 0, len); + + switch (transport_params_version) { + case NGTCP2_TRANSPORT_PARAMS_VERSION: + params->max_udp_payload_size = NGTCP2_DEFAULT_MAX_RECV_UDP_PAYLOAD_SIZE; + params->active_connection_id_limit = + NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT; + params->ack_delay_exponent = NGTCP2_DEFAULT_ACK_DELAY_EXPONENT; + params->max_ack_delay = NGTCP2_DEFAULT_MAX_ACK_DELAY; + + break; + } +} + +/* + * varint_paramlen returns the length of a single transport parameter + * which has variable integer in its parameter. + */ +static size_t varint_paramlen(ngtcp2_transport_param_id id, uint64_t param) { + size_t valuelen = ngtcp2_put_uvarintlen(param); + return ngtcp2_put_uvarintlen(id) + ngtcp2_put_uvarintlen(valuelen) + valuelen; +} + +/* + * write_varint_param writes parameter |id| of the given |value| in + * varint encoding. It returns p + the number of bytes written. + */ +static uint8_t *write_varint_param(uint8_t *p, ngtcp2_transport_param_id id, + uint64_t value) { + p = ngtcp2_put_uvarint(p, id); + p = ngtcp2_put_uvarint(p, ngtcp2_put_uvarintlen(value)); + return ngtcp2_put_uvarint(p, value); +} + +/* + * zero_paramlen returns the length of a single transport parameter + * which has zero length value in its parameter. + */ +static size_t zero_paramlen(ngtcp2_transport_param_id id) { + return ngtcp2_put_uvarintlen(id) + 1; +} + +/* + * write_zero_param writes parameter |id| that has zero length value. + * It returns p + the number of bytes written. + */ +static uint8_t *write_zero_param(uint8_t *p, ngtcp2_transport_param_id id) { + p = ngtcp2_put_uvarint(p, id); + *p++ = 0; + + return p; +} + +/* + * cid_paramlen returns the length of a single transport parameter + * which has |cid| as value. + */ +static size_t cid_paramlen(ngtcp2_transport_param_id id, + const ngtcp2_cid *cid) { + return ngtcp2_put_uvarintlen(id) + ngtcp2_put_uvarintlen(cid->datalen) + + cid->datalen; +} + +/* + * write_cid_param writes parameter |id| of the given |cid|. It + * returns p + the number of bytes written. + */ +static uint8_t *write_cid_param(uint8_t *p, ngtcp2_transport_param_id id, + const ngtcp2_cid *cid) { + assert(cid->datalen == 0 || cid->datalen >= NGTCP2_MIN_CIDLEN); + assert(cid->datalen <= NGTCP2_MAX_CIDLEN); + + p = ngtcp2_put_uvarint(p, id); + p = ngtcp2_put_uvarint(p, cid->datalen); + if (cid->datalen) { + p = ngtcp2_cpymem(p, cid->data, cid->datalen); + } + return p; +} + +static const uint8_t empty_address[16]; + +ngtcp2_ssize ngtcp2_transport_params_encode_versioned( + uint8_t *dest, size_t destlen, int transport_params_version, + const ngtcp2_transport_params *params) { + uint8_t *p; + size_t len = 0; + /* For some reason, gcc 7.3.0 requires this initialization. */ + size_t preferred_addrlen = 0; + size_t version_infolen = 0; + const ngtcp2_sockaddr_in *sa_in; + const ngtcp2_sockaddr_in6 *sa_in6; + ngtcp2_transport_params paramsbuf; + + params = ngtcp2_transport_params_convert_to_latest( + ¶msbuf, transport_params_version, params); + + if (params->original_dcid_present) { + len += + cid_paramlen(NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID, + ¶ms->original_dcid); + } + + if (params->stateless_reset_token_present) { + len += ngtcp2_put_uvarintlen(NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN) + + ngtcp2_put_uvarintlen(NGTCP2_STATELESS_RESET_TOKENLEN) + + NGTCP2_STATELESS_RESET_TOKENLEN; + } + + if (params->preferred_addr_present) { + assert(params->preferred_addr.cid.datalen >= NGTCP2_MIN_CIDLEN); + assert(params->preferred_addr.cid.datalen <= NGTCP2_MAX_CIDLEN); + preferred_addrlen = 4 /* ipv4Address */ + 2 /* ipv4Port */ + + 16 /* ipv6Address */ + 2 /* ipv6Port */ + + 1 + params->preferred_addr.cid.datalen /* CID */ + + NGTCP2_STATELESS_RESET_TOKENLEN; + len += ngtcp2_put_uvarintlen(NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS) + + ngtcp2_put_uvarintlen(preferred_addrlen) + preferred_addrlen; + } + if (params->retry_scid_present) { + len += cid_paramlen(NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID, + ¶ms->retry_scid); + } + + if (params->initial_scid_present) { + len += cid_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID, + ¶ms->initial_scid); + } + + if (params->initial_max_stream_data_bidi_local) { + len += + varint_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL, + params->initial_max_stream_data_bidi_local); + } + if (params->initial_max_stream_data_bidi_remote) { + len += varint_paramlen( + NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE, + params->initial_max_stream_data_bidi_remote); + } + if (params->initial_max_stream_data_uni) { + len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_UNI, + params->initial_max_stream_data_uni); + } + if (params->initial_max_data) { + len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_DATA, + params->initial_max_data); + } + if (params->initial_max_streams_bidi) { + len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_BIDI, + params->initial_max_streams_bidi); + } + if (params->initial_max_streams_uni) { + len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_UNI, + params->initial_max_streams_uni); + } + if (params->max_udp_payload_size != + NGTCP2_DEFAULT_MAX_RECV_UDP_PAYLOAD_SIZE) { + len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_MAX_UDP_PAYLOAD_SIZE, + params->max_udp_payload_size); + } + if (params->ack_delay_exponent != NGTCP2_DEFAULT_ACK_DELAY_EXPONENT) { + len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_ACK_DELAY_EXPONENT, + params->ack_delay_exponent); + } + if (params->disable_active_migration) { + len += zero_paramlen(NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION); + } + if (params->max_ack_delay != NGTCP2_DEFAULT_MAX_ACK_DELAY) { + len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY, + params->max_ack_delay / NGTCP2_MILLISECONDS); + } + if (params->max_idle_timeout) { + len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_MAX_IDLE_TIMEOUT, + params->max_idle_timeout / NGTCP2_MILLISECONDS); + } + if (params->active_connection_id_limit && + params->active_connection_id_limit != + NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT) { + len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_ACTIVE_CONNECTION_ID_LIMIT, + params->active_connection_id_limit); + } + if (params->max_datagram_frame_size) { + len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_MAX_DATAGRAM_FRAME_SIZE, + params->max_datagram_frame_size); + } + if (params->grease_quic_bit) { + len += zero_paramlen(NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT); + } + if (params->version_info_present) { + version_infolen = + sizeof(uint32_t) + params->version_info.available_versionslen; + len += ngtcp2_put_uvarintlen(NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION) + + ngtcp2_put_uvarintlen(version_infolen) + version_infolen; + } + + if (dest == NULL && destlen == 0) { + return (ngtcp2_ssize)len; + } + + if (destlen < len) { + return NGTCP2_ERR_NOBUF; + } + + p = dest; + + if (params->original_dcid_present) { + p = write_cid_param( + p, NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID, + ¶ms->original_dcid); + } + + if (params->stateless_reset_token_present) { + p = ngtcp2_put_uvarint(p, NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN); + p = ngtcp2_put_uvarint(p, sizeof(params->stateless_reset_token)); + p = ngtcp2_cpymem(p, params->stateless_reset_token, + sizeof(params->stateless_reset_token)); + } + + if (params->preferred_addr_present) { + p = ngtcp2_put_uvarint(p, NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS); + p = ngtcp2_put_uvarint(p, preferred_addrlen); + + if (params->preferred_addr.ipv4_present) { + sa_in = ¶ms->preferred_addr.ipv4; + p = ngtcp2_cpymem(p, &sa_in->sin_addr, sizeof(sa_in->sin_addr)); + p = ngtcp2_put_uint16(p, sa_in->sin_port); + } else { + p = ngtcp2_cpymem(p, empty_address, sizeof(sa_in->sin_addr)); + p = ngtcp2_put_uint16(p, 0); + } + + if (params->preferred_addr.ipv6_present) { + sa_in6 = ¶ms->preferred_addr.ipv6; + p = ngtcp2_cpymem(p, &sa_in6->sin6_addr, sizeof(sa_in6->sin6_addr)); + p = ngtcp2_put_uint16(p, sa_in6->sin6_port); + } else { + p = ngtcp2_cpymem(p, empty_address, sizeof(sa_in6->sin6_addr)); + p = ngtcp2_put_uint16(p, 0); + } + + *p++ = (uint8_t)params->preferred_addr.cid.datalen; + if (params->preferred_addr.cid.datalen) { + p = ngtcp2_cpymem(p, params->preferred_addr.cid.data, + params->preferred_addr.cid.datalen); + } + p = ngtcp2_cpymem(p, params->preferred_addr.stateless_reset_token, + sizeof(params->preferred_addr.stateless_reset_token)); + } + + if (params->retry_scid_present) { + p = write_cid_param(p, NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID, + ¶ms->retry_scid); + } + + if (params->initial_scid_present) { + p = write_cid_param(p, NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID, + ¶ms->initial_scid); + } + + if (params->initial_max_stream_data_bidi_local) { + p = write_varint_param( + p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL, + params->initial_max_stream_data_bidi_local); + } + + if (params->initial_max_stream_data_bidi_remote) { + p = write_varint_param( + p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE, + params->initial_max_stream_data_bidi_remote); + } + + if (params->initial_max_stream_data_uni) { + p = + write_varint_param(p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_UNI, + params->initial_max_stream_data_uni); + } + + if (params->initial_max_data) { + p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_DATA, + params->initial_max_data); + } + + if (params->initial_max_streams_bidi) { + p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_BIDI, + params->initial_max_streams_bidi); + } + + if (params->initial_max_streams_uni) { + p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_UNI, + params->initial_max_streams_uni); + } + + if (params->max_udp_payload_size != + NGTCP2_DEFAULT_MAX_RECV_UDP_PAYLOAD_SIZE) { + p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_MAX_UDP_PAYLOAD_SIZE, + params->max_udp_payload_size); + } + + if (params->ack_delay_exponent != NGTCP2_DEFAULT_ACK_DELAY_EXPONENT) { + p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_ACK_DELAY_EXPONENT, + params->ack_delay_exponent); + } + + if (params->disable_active_migration) { + p = write_zero_param(p, NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION); + } + + if (params->max_ack_delay != NGTCP2_DEFAULT_MAX_ACK_DELAY) { + p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY, + params->max_ack_delay / NGTCP2_MILLISECONDS); + } + + if (params->max_idle_timeout) { + p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_MAX_IDLE_TIMEOUT, + params->max_idle_timeout / NGTCP2_MILLISECONDS); + } + + if (params->active_connection_id_limit && + params->active_connection_id_limit != + NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT) { + p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_ACTIVE_CONNECTION_ID_LIMIT, + params->active_connection_id_limit); + } + + if (params->max_datagram_frame_size) { + p = write_varint_param(p, NGTCP2_TRANSPORT_PARAM_MAX_DATAGRAM_FRAME_SIZE, + params->max_datagram_frame_size); + } + + if (params->grease_quic_bit) { + p = write_zero_param(p, NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT); + } + + if (params->version_info_present) { + p = ngtcp2_put_uvarint(p, NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION); + p = ngtcp2_put_uvarint(p, version_infolen); + p = ngtcp2_put_uint32be(p, params->version_info.chosen_version); + if (params->version_info.available_versionslen) { + p = ngtcp2_cpymem(p, params->version_info.available_versions, + params->version_info.available_versionslen); + } + } + + assert((size_t)(p - dest) == len); + + return (ngtcp2_ssize)len; +} + +/* + * decode_varint decodes a single varint from the buffer pointed by + * |*pp| of length |end - *pp|. If it decodes an integer + * successfully, it stores the integer in |*pdest|, increment |*pp| by + * the number of bytes read from |*pp|, and returns 0. Otherwise it + * returns -1. + */ +static int decode_varint(uint64_t *pdest, const uint8_t **pp, + const uint8_t *end) { + const uint8_t *p = *pp; + size_t len; + + if (p == end) { + return -1; + } + + len = ngtcp2_get_uvarintlen(p); + if ((uint64_t)(end - p) < len) { + return -1; + } + + *pp = ngtcp2_get_uvarint(pdest, p); + + return 0; +} + +/* + * decode_varint_param decodes length prefixed value from the buffer + * pointed by |*pp| of length |end - *pp|. The length and value are + * encoded in varint form. If it decodes a value successfully, it + * stores the value in |*pdest|, increment |*pp| by the number of + * bytes read from |*pp|, and returns 0. Otherwise it returns -1. + */ +static int decode_varint_param(uint64_t *pdest, const uint8_t **pp, + const uint8_t *end) { + const uint8_t *p = *pp; + uint64_t valuelen; + + if (decode_varint(&valuelen, &p, end) != 0) { + return -1; + } + + if (p == end) { + return -1; + } + + if ((uint64_t)(end - p) < valuelen) { + return -1; + } + + if (ngtcp2_get_uvarintlen(p) != valuelen) { + return -1; + } + + *pp = ngtcp2_get_uvarint(pdest, p); + + return 0; +} + +/* + * decode_zero_param decodes zero length value from the buffer pointed + * by |*pp| of length |end - *pp|. The length is encoded in varint + * form. If it decodes zero length value successfully, it increments + * |*pp| by 1, and returns 0. Otherwise it returns -1. + */ +static int decode_zero_param(const uint8_t **pp, const uint8_t *end) { + if (*pp == end || **pp != 0) { + return -1; + } + + ++*pp; + + return 0; +} + +/* + * decode_cid_param decodes length prefixed ngtcp2_cid from the buffer + * pointed by |*pp| of length |end - *pp|. The length is encoded in + * varint form. If it decodes a value successfully, it stores the + * value in |*pdest|, increment |*pp| by the number of read from + * |*pp|, and returns the number of bytes read. Otherwise it returns + * the one of the negative error code: + * + * NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM + * Could not decode Connection ID. + */ +static int decode_cid_param(ngtcp2_cid *pdest, const uint8_t **pp, + const uint8_t *end) { + const uint8_t *p = *pp; + uint64_t valuelen; + + if (decode_varint(&valuelen, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + + if ((valuelen != 0 && valuelen < NGTCP2_MIN_CIDLEN) || + valuelen > NGTCP2_MAX_CIDLEN || (size_t)(end - p) < valuelen) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + + ngtcp2_cid_init(pdest, p, (size_t)valuelen); + + p += valuelen; + + *pp = p; + + return 0; +} + +int ngtcp2_transport_params_decode_versioned(int transport_params_version, + ngtcp2_transport_params *dest, + const uint8_t *data, + size_t datalen) { + const uint8_t *p, *end, *lend; + size_t len; + uint64_t param_type; + uint64_t valuelen; + int rv; + ngtcp2_sockaddr_in *sa_in; + ngtcp2_sockaddr_in6 *sa_in6; + uint32_t version; + ngtcp2_transport_params *params, paramsbuf; + + if (transport_params_version == NGTCP2_TRANSPORT_PARAMS_VERSION) { + params = dest; + } else { + params = ¶msbuf; + } + + /* Set default values */ + memset(params, 0, sizeof(*params)); + params->max_udp_payload_size = NGTCP2_DEFAULT_MAX_RECV_UDP_PAYLOAD_SIZE; + params->ack_delay_exponent = NGTCP2_DEFAULT_ACK_DELAY_EXPONENT; + params->max_ack_delay = NGTCP2_DEFAULT_MAX_ACK_DELAY; + params->active_connection_id_limit = + NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT; + + p = data; + end = data + datalen; + + for (; (size_t)(end - p) >= 2;) { + if (decode_varint(¶m_type, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + + switch (param_type) { + case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL: + if (decode_varint_param(¶ms->initial_max_stream_data_bidi_local, &p, + end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + break; + case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE: + if (decode_varint_param(¶ms->initial_max_stream_data_bidi_remote, &p, + end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + break; + case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_UNI: + if (decode_varint_param(¶ms->initial_max_stream_data_uni, &p, end) != + 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + break; + case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_DATA: + if (decode_varint_param(¶ms->initial_max_data, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + break; + case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_BIDI: + if (decode_varint_param(¶ms->initial_max_streams_bidi, &p, end) != + 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if (params->initial_max_streams_bidi > NGTCP2_MAX_STREAMS) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + break; + case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_UNI: + if (decode_varint_param(¶ms->initial_max_streams_uni, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if (params->initial_max_streams_uni > NGTCP2_MAX_STREAMS) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + break; + case NGTCP2_TRANSPORT_PARAM_MAX_IDLE_TIMEOUT: + if (decode_varint_param(¶ms->max_idle_timeout, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + params->max_idle_timeout *= NGTCP2_MILLISECONDS; + break; + case NGTCP2_TRANSPORT_PARAM_MAX_UDP_PAYLOAD_SIZE: + if (decode_varint_param(¶ms->max_udp_payload_size, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + break; + case NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN: + if (decode_varint(&valuelen, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if ((size_t)valuelen != sizeof(params->stateless_reset_token)) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if ((size_t)(end - p) < sizeof(params->stateless_reset_token)) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + + p = ngtcp2_get_bytes(params->stateless_reset_token, p, + sizeof(params->stateless_reset_token)); + params->stateless_reset_token_present = 1; + + break; + case NGTCP2_TRANSPORT_PARAM_ACK_DELAY_EXPONENT: + if (decode_varint_param(¶ms->ack_delay_exponent, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if (params->ack_delay_exponent > 20) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + break; + case NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS: + if (decode_varint(&valuelen, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if ((size_t)(end - p) < valuelen) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + len = 4 /* ipv4Address */ + 2 /* ipv4Port */ + 16 /* ipv6Address */ + + 2 /* ipv6Port */ + + 1 /* cid length */ + NGTCP2_STATELESS_RESET_TOKENLEN; + if (valuelen < len) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + + sa_in = ¶ms->preferred_addr.ipv4; + + p = ngtcp2_get_bytes(&sa_in->sin_addr, p, sizeof(sa_in->sin_addr)); + p = ngtcp2_get_uint16(&sa_in->sin_port, p); + + if (sa_in->sin_port || memcmp(empty_address, &sa_in->sin_addr, + sizeof(sa_in->sin_addr)) != 0) { + sa_in->sin_family = NGTCP2_AF_INET; + params->preferred_addr.ipv4_present = 1; + } + + sa_in6 = ¶ms->preferred_addr.ipv6; + + p = ngtcp2_get_bytes(&sa_in6->sin6_addr, p, sizeof(sa_in6->sin6_addr)); + p = ngtcp2_get_uint16(&sa_in6->sin6_port, p); + + if (sa_in6->sin6_port || memcmp(empty_address, &sa_in6->sin6_addr, + sizeof(sa_in6->sin6_addr)) != 0) { + sa_in6->sin6_family = NGTCP2_AF_INET6; + params->preferred_addr.ipv6_present = 1; + } + + /* cid */ + params->preferred_addr.cid.datalen = *p++; + len += params->preferred_addr.cid.datalen; + if (valuelen != len || + params->preferred_addr.cid.datalen > NGTCP2_MAX_CIDLEN || + params->preferred_addr.cid.datalen < NGTCP2_MIN_CIDLEN) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if (params->preferred_addr.cid.datalen) { + p = ngtcp2_get_bytes(params->preferred_addr.cid.data, p, + params->preferred_addr.cid.datalen); + } + + /* stateless reset token */ + p = + ngtcp2_get_bytes(params->preferred_addr.stateless_reset_token, p, + sizeof(params->preferred_addr.stateless_reset_token)); + params->preferred_addr_present = 1; + break; + case NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION: + if (decode_zero_param(&p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + params->disable_active_migration = 1; + break; + case NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID: + rv = decode_cid_param(¶ms->original_dcid, &p, end); + if (rv != 0) { + return rv; + } + params->original_dcid_present = 1; + break; + case NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID: + rv = decode_cid_param(¶ms->retry_scid, &p, end); + if (rv != 0) { + return rv; + } + params->retry_scid_present = 1; + break; + case NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID: + rv = decode_cid_param(¶ms->initial_scid, &p, end); + if (rv != 0) { + return rv; + } + params->initial_scid_present = 1; + break; + case NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY: + if (decode_varint_param(¶ms->max_ack_delay, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if (params->max_ack_delay >= 16384) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + params->max_ack_delay *= NGTCP2_MILLISECONDS; + break; + case NGTCP2_TRANSPORT_PARAM_ACTIVE_CONNECTION_ID_LIMIT: + if (decode_varint_param(¶ms->active_connection_id_limit, &p, end) != + 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + break; + case NGTCP2_TRANSPORT_PARAM_MAX_DATAGRAM_FRAME_SIZE: + if (decode_varint_param(¶ms->max_datagram_frame_size, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + break; + case NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT: + if (decode_zero_param(&p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + params->grease_quic_bit = 1; + break; + case NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION: + if (decode_varint(&valuelen, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if ((size_t)(end - p) < valuelen) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if (valuelen < sizeof(uint32_t) || (valuelen & 0x3)) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + p = ngtcp2_get_uint32be(¶ms->version_info.chosen_version, p); + if (params->version_info.chosen_version == 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if (valuelen > sizeof(uint32_t)) { + params->version_info.available_versions = (uint8_t *)p; + params->version_info.available_versionslen = + (size_t)valuelen - sizeof(uint32_t); + + for (lend = p + (valuelen - sizeof(uint32_t)); p != lend;) { + p = ngtcp2_get_uint32be(&version, p); + if (version == 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + } + } + params->version_info_present = 1; + break; + default: + /* Ignore unknown parameter */ + if (decode_varint(&valuelen, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if ((size_t)(end - p) < valuelen) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + p += valuelen; + break; + } + } + + if (end - p != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + + if (transport_params_version != NGTCP2_TRANSPORT_PARAMS_VERSION) { + ngtcp2_transport_params_convert_to_old(transport_params_version, dest, + params); + } + + return 0; +} + +static int transport_params_copy_new(ngtcp2_transport_params **pdest, + const ngtcp2_transport_params *src, + const ngtcp2_mem *mem) { + size_t len = sizeof(**pdest); + ngtcp2_transport_params *dest; + uint8_t *p; + + if (src->version_info_present) { + len += src->version_info.available_versionslen; + } + + dest = ngtcp2_mem_malloc(mem, len); + if (dest == NULL) { + return NGTCP2_ERR_NOMEM; + } + + *dest = *src; + + if (src->version_info_present && src->version_info.available_versionslen) { + p = (uint8_t *)dest + sizeof(*dest); + memcpy(p, src->version_info.available_versions, + src->version_info.available_versionslen); + dest->version_info.available_versions = p; + } + + *pdest = dest; + + return 0; +} + +int ngtcp2_transport_params_decode_new(ngtcp2_transport_params **pparams, + const uint8_t *data, size_t datalen, + const ngtcp2_mem *mem) { + int rv; + ngtcp2_transport_params params; + + rv = ngtcp2_transport_params_decode(¶ms, data, datalen); + if (rv < 0) { + return rv; + } + + if (mem == NULL) { + mem = ngtcp2_mem_default(); + } + + return transport_params_copy_new(pparams, ¶ms, mem); +} + +void ngtcp2_transport_params_del(ngtcp2_transport_params *params, + const ngtcp2_mem *mem) { + if (params == NULL) { + return; + } + + if (mem == NULL) { + mem = ngtcp2_mem_default(); + } + + ngtcp2_mem_free(mem, params); +} + +int ngtcp2_transport_params_copy_new(ngtcp2_transport_params **pdest, + const ngtcp2_transport_params *src, + const ngtcp2_mem *mem) { + if (src == NULL) { + *pdest = NULL; + return 0; + } + + return transport_params_copy_new(pdest, src, mem); +} + +static void transport_params_copy(ngtcp2_transport_params *dest, + const ngtcp2_transport_params *src, + int transport_params_version) { + assert(transport_params_version != NGTCP2_TRANSPORT_PARAMS_VERSION); + + switch (transport_params_version) { + case NGTCP2_TRANSPORT_PARAMS_V1: + memcpy(dest, src, + offsetof(ngtcp2_transport_params, version_info_present) + + sizeof(src->version_info_present)); + + break; + } +} + +const ngtcp2_transport_params * +ngtcp2_transport_params_convert_to_latest(ngtcp2_transport_params *dest, + int transport_params_version, + const ngtcp2_transport_params *src) { + if (transport_params_version == NGTCP2_TRANSPORT_PARAMS_VERSION) { + return src; + } + + ngtcp2_transport_params_default(dest); + + transport_params_copy(dest, src, transport_params_version); + + return dest; +} + +void ngtcp2_transport_params_convert_to_old( + int transport_params_version, ngtcp2_transport_params *dest, + const ngtcp2_transport_params *src) { + assert(transport_params_version != NGTCP2_TRANSPORT_PARAMS_VERSION); + + transport_params_copy(dest, src, transport_params_version); +} diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_transport_params.h similarity index 55% rename from deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.h rename to deps/ngtcp2/ngtcp2/lib/ngtcp2_transport_params.h index 3457a8f2053aba..c077f06a9dd717 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_transport_params.h @@ -22,15 +22,62 @@ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#ifndef NGTCP2_CONVERSION_H -#define NGTCP2_CONVERSION_H +#ifndef NGTCP2_TRANSPORT_PARAMS_H +#define NGTCP2_TRANSPORT_PARAMS_H #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include +/* ngtcp2_transport_param_id is the registry of QUIC transport + parameter ID. */ +typedef uint64_t ngtcp2_transport_param_id; + +#define NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID 0x00 +#define NGTCP2_TRANSPORT_PARAM_MAX_IDLE_TIMEOUT 0x01 +#define NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN 0x02 +#define NGTCP2_TRANSPORT_PARAM_MAX_UDP_PAYLOAD_SIZE 0x03 +#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_DATA 0x04 +#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL 0x05 +#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE 0x06 +#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_UNI 0x07 +#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_BIDI 0x08 +#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_UNI 0x09 +#define NGTCP2_TRANSPORT_PARAM_ACK_DELAY_EXPONENT 0x0a +#define NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY 0x0b +#define NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION 0x0c +#define NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS 0x0d +#define NGTCP2_TRANSPORT_PARAM_ACTIVE_CONNECTION_ID_LIMIT 0x0e +#define NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID 0x0f +#define NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID 0x10 +/* https://datatracker.ietf.org/doc/html/rfc9221 */ +#define NGTCP2_TRANSPORT_PARAM_MAX_DATAGRAM_FRAME_SIZE 0x20 +#define NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT 0x2ab2 +/* https://datatracker.ietf.org/doc/html/rfc9368 */ +#define NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION 0x11 + +/* NGTCP2_MAX_STREAMS is the maximum number of streams. */ +#define NGTCP2_MAX_STREAMS (1LL << 60) + +/* + * ngtcp2_transport_params_copy_new makes a copy of |src|, and assigns + * it to |*pdest|. If |src| is NULL, NULL is assigned to |*pdest|. + * + * Caller is responsible to call ngtcp2_transport_params_del to free + * the memory assigned to |*pdest|. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * NGTCP2_ERR_NOMEM + * Out of memory. + */ +int ngtcp2_transport_params_copy_new(ngtcp2_transport_params **pdest, + const ngtcp2_transport_params *src, + const ngtcp2_mem *mem); + /* * ngtcp2_transport_params_convert_to_latest converts |src| of version * |transport_params_version| to the latest version @@ -68,4 +115,4 @@ void ngtcp2_transport_params_convert_to_old(int transport_params_version, ngtcp2_transport_params *dest, const ngtcp2_transport_params *src); -#endif /* NGTCP2_CONVERSION_H */ +#endif /* !defined(NGTCP2_TRANSPORT_PARAMS_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_tstamp.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_tstamp.h index 9a210a320dc1ca..2b1bb51d87edc6 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_tstamp.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_tstamp.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -65,4 +65,4 @@ static inline int ngtcp2_tstamp_not_elapsed(ngtcp2_tstamp base, return base != UINT64_MAX && (base >= UINT64_MAX - d || base + d > ts); } -#endif /* NGTCP2_TSTAMP_H */ +#endif /* !defined(NGTCP2_TSTAMP_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.c index 7c7d9ae78e914d..5ab1db72cc5384 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.c @@ -26,20 +26,22 @@ #include #include +#include #ifdef HAVE_UNISTD_H +# define NGTCP2_UNREACHABLE_LOG # include -#endif /* HAVE_UNISTD_H */ -#include -#ifdef WIN32 +#elif defined(WIN32) +# define NGTCP2_UNREACHABLE_LOG # include -#endif /* WIN32 */ +#endif /* defined(WIN32) */ void ngtcp2_unreachable_fail(const char *file, int line, const char *func) { +#ifdef NGTCP2_UNREACHABLE_LOG char *buf; size_t buflen; int rv; -#define NGTCP2_UNREACHABLE_TEMPLATE "%s:%d %s: Unreachable.\n" +# define NGTCP2_UNREACHABLE_TEMPLATE "%s:%d %s: Unreachable.\n" rv = snprintf(NULL, 0, NGTCP2_UNREACHABLE_TEMPLATE, file, line, func); if (rv < 0) { @@ -58,14 +60,15 @@ void ngtcp2_unreachable_fail(const char *file, int line, const char *func) { abort(); } -#ifndef WIN32 +# ifndef WIN32 while (write(STDERR_FILENO, buf, (size_t)rv) == -1 && errno == EINTR) ; -#else /* WIN32 */ +# else /* defined(WIN32) */ _write(_fileno(stderr), buf, (unsigned int)rv); -#endif /* WIN32 */ +# endif /* defined(WIN32) */ free(buf); +#endif /* defined(NGTCP2_UNREACHABLE_LOG) */ abort(); } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.h index a5276fd505463f..ca7123865279f9 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.h @@ -27,26 +27,26 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include #ifdef __FILE_NAME__ # define NGTCP2_FILE_NAME __FILE_NAME__ -#else /* !__FILE_NAME__ */ +#else /* !defined(__FILE_NAME__) */ # define NGTCP2_FILE_NAME "(file)" -#endif /* !__FILE_NAME__ */ +#endif /* !defined(__FILE_NAME__) */ #define ngtcp2_unreachable() \ ngtcp2_unreachable_fail(NGTCP2_FILE_NAME, __LINE__, __func__) #ifdef _MSC_VER __declspec(noreturn) -#endif /* _MSC_VER */ +#endif /* defined(_MSC_VER) */ void ngtcp2_unreachable_fail(const char *file, int line, const char *func) #ifndef _MSC_VER __attribute__((noreturn)) -#endif /* !_MSC_VER */ +#endif /* !defined(_MSC_VER) */ ; -#endif /* NGTCP2_UNREACHABLE_H */ +#endif /* !defined(NGTCP2_UNREACHABLE_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.c index dbc7b668042695..0b9c92d47d7d88 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.c @@ -50,6 +50,7 @@ int ngtcp2_vec_new(ngtcp2_vec **pvec, const uint8_t *data, size_t datalen, p = (uint8_t *)(*pvec) + sizeof(ngtcp2_vec); (*pvec)->base = p; (*pvec)->len = datalen; + if (datalen) { /* p = */ ngtcp2_cpymem(p, data, datalen); } @@ -89,8 +90,8 @@ int64_t ngtcp2_vec_len_varint(const ngtcp2_vec *vec, size_t n) { return (int64_t)res; } -ngtcp2_ssize ngtcp2_vec_split(ngtcp2_vec *src, size_t *psrccnt, ngtcp2_vec *dst, - size_t *pdstcnt, size_t left, size_t maxcnt) { +ngtcp2_ssize ngtcp2_vec_split(ngtcp2_vec *dst, size_t *pdstcnt, ngtcp2_vec *src, + size_t *psrccnt, size_t left, size_t maxcnt) { size_t i; size_t srccnt = *psrccnt; size_t nmove; @@ -203,6 +204,7 @@ size_t ngtcp2_vec_merge(ngtcp2_vec *dst, size_t *pdstcnt, ngtcp2_vec *src, } else { dst[(*pdstcnt)++] = *b; } + left -= b->len; } @@ -222,11 +224,14 @@ size_t ngtcp2_vec_copy_at_most(ngtcp2_vec *dst, size_t dstcnt, ++i; continue; } + dst[j] = src[i]; + if (dst[j].len > left) { dst[j].len = left; return j + 1; } + left -= dst[j].len; ++i; ++j; diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.h index a39c4392fd2627..f7611efcb7d6ce 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.h @@ -27,7 +27,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -74,19 +74,21 @@ uint64_t ngtcp2_vec_len(const ngtcp2_vec *vec, size_t n); int64_t ngtcp2_vec_len_varint(const ngtcp2_vec *vec, size_t n); /* - * ngtcp2_vec_split splits |src| to |dst| so that the sum of the - * length in |src| does not exceed |left| bytes. The |maxcnt| is the - * maximum number of elements which |dst| array can contain. The - * caller must set |*psrccnt| to the number of elements of |src|. - * Similarly, the caller must set |*pdstcnt| to the number of elements - * of |dst|. The split does not necessarily occur at the boundary of - * ngtcp2_vec object. After split has done, this function updates - * |*psrccnt| and |*pdstcnt|. This function returns the number of - * bytes moved from |src| to |dst|. If split cannot be made because - * doing so exceeds |maxcnt|, this function returns -1. + * ngtcp2_vec_split splits |src| at the data position where + * ngtcp2_vec_len(|src|) does not exceed |left| bytes. The removed + * vectors are moved to |dst|. The existing elements in |dst| are + * moved forward to make up a space. The |maxcnt| is the maximum + * number of elements which |dst| array can contain. The caller must + * set |*psrccnt| to the number of elements of |src|. Similarly, the + * caller must set |*pdstcnt| to the number of elements of |dst|. The + * split does not necessarily occur at the boundary of ngtcp2_vec + * object. After split has done, this function updates |*psrccnt| and + * |*pdstcnt|. This function returns the number of bytes moved from + * |src| to |dst|. If split cannot be made because doing so exceeds + * |maxcnt|, this function returns -1. */ -ngtcp2_ssize ngtcp2_vec_split(ngtcp2_vec *src, size_t *psrccnt, ngtcp2_vec *dst, - size_t *pdstcnt, size_t left, size_t maxcnt); +ngtcp2_ssize ngtcp2_vec_split(ngtcp2_vec *dst, size_t *pdstcnt, ngtcp2_vec *src, + size_t *psrccnt, size_t left, size_t maxcnt); /* * ngtcp2_vec_merge merges |src| into |dst| by moving at most |left| @@ -117,4 +119,4 @@ size_t ngtcp2_vec_copy_at_most(ngtcp2_vec *dst, size_t dstcnt, */ void ngtcp2_vec_copy(ngtcp2_vec *dst, const ngtcp2_vec *src, size_t cnt); -#endif /* NGTCP2_VEC_H */ +#endif /* !defined(NGTCP2_VEC_H) */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_version.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_version.c index b31162c3ebe0d7..d2557e9ef8a580 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_version.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_version.c @@ -24,7 +24,7 @@ */ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_window_filter.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_window_filter.h index 50415f10b8c37b..c90a9fdb9078da 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_window_filter.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_window_filter.h @@ -37,7 +37,7 @@ #ifdef HAVE_CONFIG_H # include -#endif /* HAVE_CONFIG_H */ +#endif /* defined(HAVE_CONFIG_H) */ #include @@ -62,4 +62,4 @@ void ngtcp2_window_filter_reset(ngtcp2_window_filter *wf, uint64_t new_sample, uint64_t ngtcp2_window_filter_get_best(ngtcp2_window_filter *wf); -#endif /* NGTCP2_WINDOW_FILTER_H */ +#endif /* !defined(NGTCP2_WINDOW_FILTER_H) */ diff --git a/deps/npm/docs/content/commands/npm-ls.md b/deps/npm/docs/content/commands/npm-ls.md index 3abedf9cd8e4e6..69bc86f85f3508 100644 --- a/deps/npm/docs/content/commands/npm-ls.md +++ b/deps/npm/docs/content/commands/npm-ls.md @@ -27,7 +27,7 @@ packages will *also* show the paths to the specified packages. For example, running `npm ls promzard` in npm's source tree will show: ```bash -npm@10.9.0 /path/to/npm +npm@10.9.2 /path/to/npm └─┬ init-package-json@0.0.4 └── promzard@0.1.5 ``` diff --git a/deps/npm/docs/content/commands/npm.md b/deps/npm/docs/content/commands/npm.md index d92892ec544eb9..029b9fa7631544 100644 --- a/deps/npm/docs/content/commands/npm.md +++ b/deps/npm/docs/content/commands/npm.md @@ -14,7 +14,7 @@ Note: This command is unaware of workspaces. ### Version -10.9.0 +10.9.2 ### Description diff --git a/deps/npm/docs/output/commands/npm-access.html b/deps/npm/docs/output/commands/npm-access.html index 0a839663fcb016..9ce3c00e3525d8 100644 --- a/deps/npm/docs/output/commands/npm-access.html +++ b/deps/npm/docs/output/commands/npm-access.html @@ -141,9 +141,9 @@
-

+

npm-access - @10.9.0 + @10.9.2

Set access level on published packages
diff --git a/deps/npm/docs/output/commands/npm-adduser.html b/deps/npm/docs/output/commands/npm-adduser.html index 2ac615ad13978d..5f6627229d2c8b 100644 --- a/deps/npm/docs/output/commands/npm-adduser.html +++ b/deps/npm/docs/output/commands/npm-adduser.html @@ -141,9 +141,9 @@
-

+

npm-adduser - @10.9.0 + @10.9.2

Add a registry user account
diff --git a/deps/npm/docs/output/commands/npm-audit.html b/deps/npm/docs/output/commands/npm-audit.html index d7374ccfa66089..a8934d172e3602 100644 --- a/deps/npm/docs/output/commands/npm-audit.html +++ b/deps/npm/docs/output/commands/npm-audit.html @@ -141,9 +141,9 @@
-

+

npm-audit - @10.9.0 + @10.9.2

Run a security audit
diff --git a/deps/npm/docs/output/commands/npm-bugs.html b/deps/npm/docs/output/commands/npm-bugs.html index 9186bc100ae8f6..3ec74a05673259 100644 --- a/deps/npm/docs/output/commands/npm-bugs.html +++ b/deps/npm/docs/output/commands/npm-bugs.html @@ -141,9 +141,9 @@
-

+

npm-bugs - @10.9.0 + @10.9.2

Report bugs for a package in a web browser
diff --git a/deps/npm/docs/output/commands/npm-cache.html b/deps/npm/docs/output/commands/npm-cache.html index 55cec217dbdfb7..0a4fd00a276a47 100644 --- a/deps/npm/docs/output/commands/npm-cache.html +++ b/deps/npm/docs/output/commands/npm-cache.html @@ -141,9 +141,9 @@
-

+

npm-cache - @10.9.0 + @10.9.2

Manipulates packages cache
diff --git a/deps/npm/docs/output/commands/npm-ci.html b/deps/npm/docs/output/commands/npm-ci.html index efe858777565a9..3af67dc55c81db 100644 --- a/deps/npm/docs/output/commands/npm-ci.html +++ b/deps/npm/docs/output/commands/npm-ci.html @@ -141,9 +141,9 @@
-

+

npm-ci - @10.9.0 + @10.9.2

Clean install a project
diff --git a/deps/npm/docs/output/commands/npm-completion.html b/deps/npm/docs/output/commands/npm-completion.html index 4a91a94498f33b..3f8b509e4ec8d6 100644 --- a/deps/npm/docs/output/commands/npm-completion.html +++ b/deps/npm/docs/output/commands/npm-completion.html @@ -141,9 +141,9 @@
-

+

npm-completion - @10.9.0 + @10.9.2

Tab Completion for npm
diff --git a/deps/npm/docs/output/commands/npm-config.html b/deps/npm/docs/output/commands/npm-config.html index d18998fea8471d..c9b9a2c7550bcc 100644 --- a/deps/npm/docs/output/commands/npm-config.html +++ b/deps/npm/docs/output/commands/npm-config.html @@ -141,9 +141,9 @@
-

+

npm-config - @10.9.0 + @10.9.2

Manage the npm configuration files
diff --git a/deps/npm/docs/output/commands/npm-dedupe.html b/deps/npm/docs/output/commands/npm-dedupe.html index 194ea085383df3..0aa8bf5a5bde77 100644 --- a/deps/npm/docs/output/commands/npm-dedupe.html +++ b/deps/npm/docs/output/commands/npm-dedupe.html @@ -141,9 +141,9 @@
-

+

npm-dedupe - @10.9.0 + @10.9.2

Reduce duplication in the package tree
diff --git a/deps/npm/docs/output/commands/npm-deprecate.html b/deps/npm/docs/output/commands/npm-deprecate.html index ae40adfbbab051..0019583ee2135a 100644 --- a/deps/npm/docs/output/commands/npm-deprecate.html +++ b/deps/npm/docs/output/commands/npm-deprecate.html @@ -141,9 +141,9 @@
-

+

npm-deprecate - @10.9.0 + @10.9.2

Deprecate a version of a package
diff --git a/deps/npm/docs/output/commands/npm-diff.html b/deps/npm/docs/output/commands/npm-diff.html index 257b1c21572083..fe2123ee60fdc8 100644 --- a/deps/npm/docs/output/commands/npm-diff.html +++ b/deps/npm/docs/output/commands/npm-diff.html @@ -141,9 +141,9 @@
-

+

npm-diff - @10.9.0 + @10.9.2

The registry diff command
diff --git a/deps/npm/docs/output/commands/npm-dist-tag.html b/deps/npm/docs/output/commands/npm-dist-tag.html index 08e7770a0c1745..dce3f752ba4eaa 100644 --- a/deps/npm/docs/output/commands/npm-dist-tag.html +++ b/deps/npm/docs/output/commands/npm-dist-tag.html @@ -141,9 +141,9 @@
-

+

npm-dist-tag - @10.9.0 + @10.9.2

Modify package distribution tags
diff --git a/deps/npm/docs/output/commands/npm-docs.html b/deps/npm/docs/output/commands/npm-docs.html index 8b647251f36740..caef5afe3b1bd4 100644 --- a/deps/npm/docs/output/commands/npm-docs.html +++ b/deps/npm/docs/output/commands/npm-docs.html @@ -141,9 +141,9 @@
-

+

npm-docs - @10.9.0 + @10.9.2

Open documentation for a package in a web browser
diff --git a/deps/npm/docs/output/commands/npm-doctor.html b/deps/npm/docs/output/commands/npm-doctor.html index 110ec2f9b20bf0..d9f7a71450ab75 100644 --- a/deps/npm/docs/output/commands/npm-doctor.html +++ b/deps/npm/docs/output/commands/npm-doctor.html @@ -141,9 +141,9 @@
-

+

npm-doctor - @10.9.0 + @10.9.2

Check the health of your npm environment
diff --git a/deps/npm/docs/output/commands/npm-edit.html b/deps/npm/docs/output/commands/npm-edit.html index 9e4f7b361a5cb6..ab835ddcd352f9 100644 --- a/deps/npm/docs/output/commands/npm-edit.html +++ b/deps/npm/docs/output/commands/npm-edit.html @@ -141,9 +141,9 @@
-

+

npm-edit - @10.9.0 + @10.9.2

Edit an installed package
diff --git a/deps/npm/docs/output/commands/npm-exec.html b/deps/npm/docs/output/commands/npm-exec.html index 695fa35ab825c2..b5b8f66bdbb54f 100644 --- a/deps/npm/docs/output/commands/npm-exec.html +++ b/deps/npm/docs/output/commands/npm-exec.html @@ -141,9 +141,9 @@
-

+

npm-exec - @10.9.0 + @10.9.2

Run a command from a local or remote npm package
diff --git a/deps/npm/docs/output/commands/npm-explain.html b/deps/npm/docs/output/commands/npm-explain.html index e79255ff5fa6c6..812ce85f73a130 100644 --- a/deps/npm/docs/output/commands/npm-explain.html +++ b/deps/npm/docs/output/commands/npm-explain.html @@ -141,9 +141,9 @@
-

+

npm-explain - @10.9.0 + @10.9.2

Explain installed packages
diff --git a/deps/npm/docs/output/commands/npm-explore.html b/deps/npm/docs/output/commands/npm-explore.html index e296f4146aff98..20d761c0db149c 100644 --- a/deps/npm/docs/output/commands/npm-explore.html +++ b/deps/npm/docs/output/commands/npm-explore.html @@ -141,9 +141,9 @@
-

+

npm-explore - @10.9.0 + @10.9.2

Browse an installed package
diff --git a/deps/npm/docs/output/commands/npm-find-dupes.html b/deps/npm/docs/output/commands/npm-find-dupes.html index a8c914a0dd3d44..9b2810cf1b17ac 100644 --- a/deps/npm/docs/output/commands/npm-find-dupes.html +++ b/deps/npm/docs/output/commands/npm-find-dupes.html @@ -141,9 +141,9 @@
-

+

npm-find-dupes - @10.9.0 + @10.9.2

Find duplication in the package tree
diff --git a/deps/npm/docs/output/commands/npm-fund.html b/deps/npm/docs/output/commands/npm-fund.html index 36a63253439b60..91e0a71c3b32af 100644 --- a/deps/npm/docs/output/commands/npm-fund.html +++ b/deps/npm/docs/output/commands/npm-fund.html @@ -141,9 +141,9 @@
-

+

npm-fund - @10.9.0 + @10.9.2

Retrieve funding information
diff --git a/deps/npm/docs/output/commands/npm-help-search.html b/deps/npm/docs/output/commands/npm-help-search.html index 76dea45d852e75..35e2ec587c48e8 100644 --- a/deps/npm/docs/output/commands/npm-help-search.html +++ b/deps/npm/docs/output/commands/npm-help-search.html @@ -141,9 +141,9 @@
-

+

npm-help-search - @10.9.0 + @10.9.2

Search npm help documentation
diff --git a/deps/npm/docs/output/commands/npm-help.html b/deps/npm/docs/output/commands/npm-help.html index e6b14af2f9ec6c..17403f14c89427 100644 --- a/deps/npm/docs/output/commands/npm-help.html +++ b/deps/npm/docs/output/commands/npm-help.html @@ -141,9 +141,9 @@
-

+

npm-help - @10.9.0 + @10.9.2

Get help on npm
diff --git a/deps/npm/docs/output/commands/npm-hook.html b/deps/npm/docs/output/commands/npm-hook.html index 393700a9a7165a..d06784faf03624 100644 --- a/deps/npm/docs/output/commands/npm-hook.html +++ b/deps/npm/docs/output/commands/npm-hook.html @@ -141,9 +141,9 @@
-

+

npm-hook - @10.9.0 + @10.9.2

Manage registry hooks
diff --git a/deps/npm/docs/output/commands/npm-init.html b/deps/npm/docs/output/commands/npm-init.html index 8ff01b2f7a76d0..430763db6ba6af 100644 --- a/deps/npm/docs/output/commands/npm-init.html +++ b/deps/npm/docs/output/commands/npm-init.html @@ -141,9 +141,9 @@
-

+

npm-init - @10.9.0 + @10.9.2

Create a package.json file
diff --git a/deps/npm/docs/output/commands/npm-install-ci-test.html b/deps/npm/docs/output/commands/npm-install-ci-test.html index b0f9d237ed8e98..6a29d2d54f679e 100644 --- a/deps/npm/docs/output/commands/npm-install-ci-test.html +++ b/deps/npm/docs/output/commands/npm-install-ci-test.html @@ -141,9 +141,9 @@
-

+

npm-install-ci-test - @10.9.0 + @10.9.2

Install a project with a clean slate and run tests
diff --git a/deps/npm/docs/output/commands/npm-install-test.html b/deps/npm/docs/output/commands/npm-install-test.html index b0fbd63887fff5..32bd2271074fb6 100644 --- a/deps/npm/docs/output/commands/npm-install-test.html +++ b/deps/npm/docs/output/commands/npm-install-test.html @@ -141,9 +141,9 @@
-

+

npm-install-test - @10.9.0 + @10.9.2

Install package(s) and run tests
diff --git a/deps/npm/docs/output/commands/npm-install.html b/deps/npm/docs/output/commands/npm-install.html index fa57e02eaf9ad5..db7d717d18160b 100644 --- a/deps/npm/docs/output/commands/npm-install.html +++ b/deps/npm/docs/output/commands/npm-install.html @@ -141,9 +141,9 @@
-

+

npm-install - @10.9.0 + @10.9.2

Install a package
diff --git a/deps/npm/docs/output/commands/npm-link.html b/deps/npm/docs/output/commands/npm-link.html index 4e461ebefafd42..5778cc2a6268d2 100644 --- a/deps/npm/docs/output/commands/npm-link.html +++ b/deps/npm/docs/output/commands/npm-link.html @@ -141,9 +141,9 @@
-

+

npm-link - @10.9.0 + @10.9.2

Symlink a package folder
diff --git a/deps/npm/docs/output/commands/npm-login.html b/deps/npm/docs/output/commands/npm-login.html index 9c1584ca36bc41..81555fcecefd3e 100644 --- a/deps/npm/docs/output/commands/npm-login.html +++ b/deps/npm/docs/output/commands/npm-login.html @@ -141,9 +141,9 @@
-

+

npm-login - @10.9.0 + @10.9.2

Login to a registry user account
diff --git a/deps/npm/docs/output/commands/npm-logout.html b/deps/npm/docs/output/commands/npm-logout.html index 8908b329395254..1b6cdf8b923034 100644 --- a/deps/npm/docs/output/commands/npm-logout.html +++ b/deps/npm/docs/output/commands/npm-logout.html @@ -141,9 +141,9 @@
-

+

npm-logout - @10.9.0 + @10.9.2

Log out of the registry
diff --git a/deps/npm/docs/output/commands/npm-ls.html b/deps/npm/docs/output/commands/npm-ls.html index 2615a492a75e38..c9aa847abf1dd4 100644 --- a/deps/npm/docs/output/commands/npm-ls.html +++ b/deps/npm/docs/output/commands/npm-ls.html @@ -141,9 +141,9 @@
-

+

npm-ls - @10.9.0 + @10.9.2

List installed packages
@@ -168,7 +168,7 @@

Description

the results to only the paths to the packages named. Note that nested packages will also show the paths to the specified packages. For example, running npm ls promzard in npm's source tree will show:

-
npm@10.9.0 /path/to/npm
+
npm@10.9.2 /path/to/npm
 └─┬ init-package-json@0.0.4
   └── promzard@0.1.5
 
diff --git a/deps/npm/docs/output/commands/npm-org.html b/deps/npm/docs/output/commands/npm-org.html index 66d823f7f3c2de..6c45111f034994 100644 --- a/deps/npm/docs/output/commands/npm-org.html +++ b/deps/npm/docs/output/commands/npm-org.html @@ -141,9 +141,9 @@
-

+

npm-org - @10.9.0 + @10.9.2

Manage orgs
diff --git a/deps/npm/docs/output/commands/npm-outdated.html b/deps/npm/docs/output/commands/npm-outdated.html index 6002ff1818da2c..9be28fdfd239e1 100644 --- a/deps/npm/docs/output/commands/npm-outdated.html +++ b/deps/npm/docs/output/commands/npm-outdated.html @@ -141,9 +141,9 @@
-

+

npm-outdated - @10.9.0 + @10.9.2

Check for outdated packages
diff --git a/deps/npm/docs/output/commands/npm-owner.html b/deps/npm/docs/output/commands/npm-owner.html index e8b7a05274aa91..fc0899e5e5d66b 100644 --- a/deps/npm/docs/output/commands/npm-owner.html +++ b/deps/npm/docs/output/commands/npm-owner.html @@ -141,9 +141,9 @@
-

+

npm-owner - @10.9.0 + @10.9.2

Manage package owners
diff --git a/deps/npm/docs/output/commands/npm-pack.html b/deps/npm/docs/output/commands/npm-pack.html index 596ba9a35e3ac5..359d463780e510 100644 --- a/deps/npm/docs/output/commands/npm-pack.html +++ b/deps/npm/docs/output/commands/npm-pack.html @@ -141,9 +141,9 @@
-

+

npm-pack - @10.9.0 + @10.9.2

Create a tarball from a package
diff --git a/deps/npm/docs/output/commands/npm-ping.html b/deps/npm/docs/output/commands/npm-ping.html index de393dbd0b5d71..4b3fbbc698cbb7 100644 --- a/deps/npm/docs/output/commands/npm-ping.html +++ b/deps/npm/docs/output/commands/npm-ping.html @@ -141,9 +141,9 @@
-

+

npm-ping - @10.9.0 + @10.9.2

Ping npm registry
diff --git a/deps/npm/docs/output/commands/npm-pkg.html b/deps/npm/docs/output/commands/npm-pkg.html index ffb153cc440ce4..f1fe76e8073150 100644 --- a/deps/npm/docs/output/commands/npm-pkg.html +++ b/deps/npm/docs/output/commands/npm-pkg.html @@ -141,9 +141,9 @@
-

+

npm-pkg - @10.9.0 + @10.9.2

Manages your package.json
diff --git a/deps/npm/docs/output/commands/npm-prefix.html b/deps/npm/docs/output/commands/npm-prefix.html index da63644f2df42a..9708e55234ab4c 100644 --- a/deps/npm/docs/output/commands/npm-prefix.html +++ b/deps/npm/docs/output/commands/npm-prefix.html @@ -141,9 +141,9 @@
-

+

npm-prefix - @10.9.0 + @10.9.2

Display prefix
diff --git a/deps/npm/docs/output/commands/npm-profile.html b/deps/npm/docs/output/commands/npm-profile.html index c3679196a85bc4..6bb08b1fda63a6 100644 --- a/deps/npm/docs/output/commands/npm-profile.html +++ b/deps/npm/docs/output/commands/npm-profile.html @@ -141,9 +141,9 @@
-

+

npm-profile - @10.9.0 + @10.9.2

Change settings on your registry profile
diff --git a/deps/npm/docs/output/commands/npm-prune.html b/deps/npm/docs/output/commands/npm-prune.html index 4e844ab9f02cbb..818522d63754f9 100644 --- a/deps/npm/docs/output/commands/npm-prune.html +++ b/deps/npm/docs/output/commands/npm-prune.html @@ -141,9 +141,9 @@
-

+

npm-prune - @10.9.0 + @10.9.2

Remove extraneous packages
diff --git a/deps/npm/docs/output/commands/npm-publish.html b/deps/npm/docs/output/commands/npm-publish.html index b808cc29a15744..37c19e0b8f357c 100644 --- a/deps/npm/docs/output/commands/npm-publish.html +++ b/deps/npm/docs/output/commands/npm-publish.html @@ -141,9 +141,9 @@
-

+

npm-publish - @10.9.0 + @10.9.2

Publish a package
diff --git a/deps/npm/docs/output/commands/npm-query.html b/deps/npm/docs/output/commands/npm-query.html index d85a2f6c27ff79..4a0210e62f58ea 100644 --- a/deps/npm/docs/output/commands/npm-query.html +++ b/deps/npm/docs/output/commands/npm-query.html @@ -141,9 +141,9 @@
-

+

npm-query - @10.9.0 + @10.9.2

Dependency selector query
diff --git a/deps/npm/docs/output/commands/npm-rebuild.html b/deps/npm/docs/output/commands/npm-rebuild.html index ff28b35b86bced..0a356397c4a95b 100644 --- a/deps/npm/docs/output/commands/npm-rebuild.html +++ b/deps/npm/docs/output/commands/npm-rebuild.html @@ -141,9 +141,9 @@
-

+

npm-rebuild - @10.9.0 + @10.9.2

Rebuild a package
diff --git a/deps/npm/docs/output/commands/npm-repo.html b/deps/npm/docs/output/commands/npm-repo.html index 8ee7e9d6d2d074..ad86f81682375d 100644 --- a/deps/npm/docs/output/commands/npm-repo.html +++ b/deps/npm/docs/output/commands/npm-repo.html @@ -141,9 +141,9 @@
-

+

npm-repo - @10.9.0 + @10.9.2

Open package repository page in the browser
diff --git a/deps/npm/docs/output/commands/npm-restart.html b/deps/npm/docs/output/commands/npm-restart.html index e3171bf280910b..4a2244b048ecbc 100644 --- a/deps/npm/docs/output/commands/npm-restart.html +++ b/deps/npm/docs/output/commands/npm-restart.html @@ -141,9 +141,9 @@
-

+

npm-restart - @10.9.0 + @10.9.2

Restart a package
diff --git a/deps/npm/docs/output/commands/npm-root.html b/deps/npm/docs/output/commands/npm-root.html index 4e2e82e5bb259c..499bc84358b3e5 100644 --- a/deps/npm/docs/output/commands/npm-root.html +++ b/deps/npm/docs/output/commands/npm-root.html @@ -141,9 +141,9 @@
-

+

npm-root - @10.9.0 + @10.9.2

Display npm root
diff --git a/deps/npm/docs/output/commands/npm-run-script.html b/deps/npm/docs/output/commands/npm-run-script.html index 4673e733f49fcd..9c0ef4fedbc16e 100644 --- a/deps/npm/docs/output/commands/npm-run-script.html +++ b/deps/npm/docs/output/commands/npm-run-script.html @@ -141,9 +141,9 @@
-

+

npm-run-script - @10.9.0 + @10.9.2

Run arbitrary package scripts
diff --git a/deps/npm/docs/output/commands/npm-sbom.html b/deps/npm/docs/output/commands/npm-sbom.html index 00508eca6cd91a..b648df1654e8a6 100644 --- a/deps/npm/docs/output/commands/npm-sbom.html +++ b/deps/npm/docs/output/commands/npm-sbom.html @@ -141,9 +141,9 @@
-

+

npm-sbom - @10.9.0 + @10.9.2

Generate a Software Bill of Materials (SBOM)
diff --git a/deps/npm/docs/output/commands/npm-search.html b/deps/npm/docs/output/commands/npm-search.html index edf4e437e65400..bfd70c2a8abe92 100644 --- a/deps/npm/docs/output/commands/npm-search.html +++ b/deps/npm/docs/output/commands/npm-search.html @@ -141,9 +141,9 @@
-

+

npm-search - @10.9.0 + @10.9.2

Search for packages
diff --git a/deps/npm/docs/output/commands/npm-shrinkwrap.html b/deps/npm/docs/output/commands/npm-shrinkwrap.html index f225abaed9218f..60d198f85ce67e 100644 --- a/deps/npm/docs/output/commands/npm-shrinkwrap.html +++ b/deps/npm/docs/output/commands/npm-shrinkwrap.html @@ -141,9 +141,9 @@
-

+

npm-shrinkwrap - @10.9.0 + @10.9.2

Lock down dependency versions for publication
diff --git a/deps/npm/docs/output/commands/npm-star.html b/deps/npm/docs/output/commands/npm-star.html index 04b36628ad2176..ccda8bb3297d70 100644 --- a/deps/npm/docs/output/commands/npm-star.html +++ b/deps/npm/docs/output/commands/npm-star.html @@ -141,9 +141,9 @@
-

+

npm-star - @10.9.0 + @10.9.2

Mark your favorite packages
diff --git a/deps/npm/docs/output/commands/npm-stars.html b/deps/npm/docs/output/commands/npm-stars.html index 3183aa047f1f89..2f9619190f0122 100644 --- a/deps/npm/docs/output/commands/npm-stars.html +++ b/deps/npm/docs/output/commands/npm-stars.html @@ -141,9 +141,9 @@
-

+

npm-stars - @10.9.0 + @10.9.2

View packages marked as favorites
diff --git a/deps/npm/docs/output/commands/npm-start.html b/deps/npm/docs/output/commands/npm-start.html index b81caefe0a4431..fad3ce05c3a2c9 100644 --- a/deps/npm/docs/output/commands/npm-start.html +++ b/deps/npm/docs/output/commands/npm-start.html @@ -141,9 +141,9 @@
-

+

npm-start - @10.9.0 + @10.9.2

Start a package
diff --git a/deps/npm/docs/output/commands/npm-stop.html b/deps/npm/docs/output/commands/npm-stop.html index 85d4c782a736e7..bc70086d9991d5 100644 --- a/deps/npm/docs/output/commands/npm-stop.html +++ b/deps/npm/docs/output/commands/npm-stop.html @@ -141,9 +141,9 @@
-

+

npm-stop - @10.9.0 + @10.9.2

Stop a package
diff --git a/deps/npm/docs/output/commands/npm-team.html b/deps/npm/docs/output/commands/npm-team.html index 6ad869fa3bed9e..e03442dc68bf9c 100644 --- a/deps/npm/docs/output/commands/npm-team.html +++ b/deps/npm/docs/output/commands/npm-team.html @@ -141,9 +141,9 @@
-

+

npm-team - @10.9.0 + @10.9.2

Manage organization teams and team memberships
diff --git a/deps/npm/docs/output/commands/npm-test.html b/deps/npm/docs/output/commands/npm-test.html index bce16c92e4f087..b30af4ed9b3b30 100644 --- a/deps/npm/docs/output/commands/npm-test.html +++ b/deps/npm/docs/output/commands/npm-test.html @@ -141,9 +141,9 @@
-

+

npm-test - @10.9.0 + @10.9.2

Test a package
diff --git a/deps/npm/docs/output/commands/npm-token.html b/deps/npm/docs/output/commands/npm-token.html index 99fcafd9b8f53b..353709607b4bd4 100644 --- a/deps/npm/docs/output/commands/npm-token.html +++ b/deps/npm/docs/output/commands/npm-token.html @@ -141,9 +141,9 @@
-

+

npm-token - @10.9.0 + @10.9.2

Manage your authentication tokens
diff --git a/deps/npm/docs/output/commands/npm-uninstall.html b/deps/npm/docs/output/commands/npm-uninstall.html index dd8e7a2234604e..633dbbe58f933e 100644 --- a/deps/npm/docs/output/commands/npm-uninstall.html +++ b/deps/npm/docs/output/commands/npm-uninstall.html @@ -141,9 +141,9 @@
-

+

npm-uninstall - @10.9.0 + @10.9.2

Remove a package
diff --git a/deps/npm/docs/output/commands/npm-unpublish.html b/deps/npm/docs/output/commands/npm-unpublish.html index 56e2c59f62addb..e4f4090936d4ff 100644 --- a/deps/npm/docs/output/commands/npm-unpublish.html +++ b/deps/npm/docs/output/commands/npm-unpublish.html @@ -141,9 +141,9 @@
-

+

npm-unpublish - @10.9.0 + @10.9.2

Remove a package from the registry
diff --git a/deps/npm/docs/output/commands/npm-unstar.html b/deps/npm/docs/output/commands/npm-unstar.html index 710d8a947d665f..741ed8e707a4f9 100644 --- a/deps/npm/docs/output/commands/npm-unstar.html +++ b/deps/npm/docs/output/commands/npm-unstar.html @@ -141,9 +141,9 @@
-

+

npm-unstar - @10.9.0 + @10.9.2

Remove an item from your favorite packages
diff --git a/deps/npm/docs/output/commands/npm-update.html b/deps/npm/docs/output/commands/npm-update.html index e587ec92f8614a..287ed19fe9f5f6 100644 --- a/deps/npm/docs/output/commands/npm-update.html +++ b/deps/npm/docs/output/commands/npm-update.html @@ -141,9 +141,9 @@
-

+

npm-update - @10.9.0 + @10.9.2

Update packages
diff --git a/deps/npm/docs/output/commands/npm-version.html b/deps/npm/docs/output/commands/npm-version.html index 196a0236093673..43b978ffa94c80 100644 --- a/deps/npm/docs/output/commands/npm-version.html +++ b/deps/npm/docs/output/commands/npm-version.html @@ -141,9 +141,9 @@
-

+

npm-version - @10.9.0 + @10.9.2

Bump a package version
diff --git a/deps/npm/docs/output/commands/npm-view.html b/deps/npm/docs/output/commands/npm-view.html index 0eebee3037748d..1e388cfb922462 100644 --- a/deps/npm/docs/output/commands/npm-view.html +++ b/deps/npm/docs/output/commands/npm-view.html @@ -141,9 +141,9 @@
-

+

npm-view - @10.9.0 + @10.9.2

View registry info
diff --git a/deps/npm/docs/output/commands/npm-whoami.html b/deps/npm/docs/output/commands/npm-whoami.html index 0d1fa5ea9b87d7..944a762ad4aea1 100644 --- a/deps/npm/docs/output/commands/npm-whoami.html +++ b/deps/npm/docs/output/commands/npm-whoami.html @@ -141,9 +141,9 @@
-

+

npm-whoami - @10.9.0 + @10.9.2

Display npm username
diff --git a/deps/npm/docs/output/commands/npm.html b/deps/npm/docs/output/commands/npm.html index 005009c1dba277..eae0fdc7b20659 100644 --- a/deps/npm/docs/output/commands/npm.html +++ b/deps/npm/docs/output/commands/npm.html @@ -141,9 +141,9 @@
-

+

npm - @10.9.0 + @10.9.2

javascript package manager
@@ -158,7 +158,7 @@

Table of contents

Note: This command is unaware of workspaces.

Version

-

10.9.0

+

10.9.2

Description

npm is the package manager for the Node JavaScript platform. It puts modules in place so that node can find them, and manages dependency diff --git a/deps/npm/docs/output/commands/npx.html b/deps/npm/docs/output/commands/npx.html index d3239ed320bb70..ec5547306359f7 100644 --- a/deps/npm/docs/output/commands/npx.html +++ b/deps/npm/docs/output/commands/npx.html @@ -141,9 +141,9 @@

-

+

npx - @10.9.0 + @10.9.2

Run a command from a local or remote npm package
diff --git a/deps/npm/docs/output/configuring-npm/folders.html b/deps/npm/docs/output/configuring-npm/folders.html index f23aef6354f73d..daca0c019a942b 100644 --- a/deps/npm/docs/output/configuring-npm/folders.html +++ b/deps/npm/docs/output/configuring-npm/folders.html @@ -141,9 +141,9 @@
-

+

folders - @10.9.0 + @10.9.2

Folder Structures Used by npm
diff --git a/deps/npm/docs/output/configuring-npm/install.html b/deps/npm/docs/output/configuring-npm/install.html index cddbcaae2de8be..abda3bcc06ab62 100644 --- a/deps/npm/docs/output/configuring-npm/install.html +++ b/deps/npm/docs/output/configuring-npm/install.html @@ -141,9 +141,9 @@
-

+

install - @10.9.0 + @10.9.2

Download and install node and npm
diff --git a/deps/npm/docs/output/configuring-npm/npm-global.html b/deps/npm/docs/output/configuring-npm/npm-global.html index f23aef6354f73d..daca0c019a942b 100644 --- a/deps/npm/docs/output/configuring-npm/npm-global.html +++ b/deps/npm/docs/output/configuring-npm/npm-global.html @@ -141,9 +141,9 @@
-

+

folders - @10.9.0 + @10.9.2

Folder Structures Used by npm
diff --git a/deps/npm/docs/output/configuring-npm/npm-json.html b/deps/npm/docs/output/configuring-npm/npm-json.html index 0cd27f8d82fdd8..1645e91a762b8f 100644 --- a/deps/npm/docs/output/configuring-npm/npm-json.html +++ b/deps/npm/docs/output/configuring-npm/npm-json.html @@ -141,9 +141,9 @@
-

+

package.json - @10.9.0 + @10.9.2

Specifics of npm's package.json handling
diff --git a/deps/npm/docs/output/configuring-npm/npm-shrinkwrap-json.html b/deps/npm/docs/output/configuring-npm/npm-shrinkwrap-json.html index 5f20e99541a7a7..65f0b0c184bef9 100644 --- a/deps/npm/docs/output/configuring-npm/npm-shrinkwrap-json.html +++ b/deps/npm/docs/output/configuring-npm/npm-shrinkwrap-json.html @@ -141,9 +141,9 @@
-

+

npm-shrinkwrap.json - @10.9.0 + @10.9.2

A publishable lockfile
diff --git a/deps/npm/docs/output/configuring-npm/npmrc.html b/deps/npm/docs/output/configuring-npm/npmrc.html index a20c3d807d3d83..e5e94afe63a94c 100644 --- a/deps/npm/docs/output/configuring-npm/npmrc.html +++ b/deps/npm/docs/output/configuring-npm/npmrc.html @@ -141,9 +141,9 @@
-

+

npmrc - @10.9.0 + @10.9.2

The npm config files
diff --git a/deps/npm/docs/output/configuring-npm/package-json.html b/deps/npm/docs/output/configuring-npm/package-json.html index 0cd27f8d82fdd8..1645e91a762b8f 100644 --- a/deps/npm/docs/output/configuring-npm/package-json.html +++ b/deps/npm/docs/output/configuring-npm/package-json.html @@ -141,9 +141,9 @@
-

+

package.json - @10.9.0 + @10.9.2

Specifics of npm's package.json handling
diff --git a/deps/npm/docs/output/configuring-npm/package-lock-json.html b/deps/npm/docs/output/configuring-npm/package-lock-json.html index 58598df7ae4fbc..80fa8bf8bd4ac8 100644 --- a/deps/npm/docs/output/configuring-npm/package-lock-json.html +++ b/deps/npm/docs/output/configuring-npm/package-lock-json.html @@ -141,9 +141,9 @@
-

+

package-lock.json - @10.9.0 + @10.9.2

A manifestation of the manifest
diff --git a/deps/npm/docs/output/using-npm/config.html b/deps/npm/docs/output/using-npm/config.html index 5d70bb7e8e803e..487cc56439e9ed 100644 --- a/deps/npm/docs/output/using-npm/config.html +++ b/deps/npm/docs/output/using-npm/config.html @@ -141,9 +141,9 @@
-

+

config - @10.9.0 + @10.9.2

More than you probably want to know about npm configuration
diff --git a/deps/npm/docs/output/using-npm/dependency-selectors.html b/deps/npm/docs/output/using-npm/dependency-selectors.html index 008377b3ab55ff..24e34408de9c52 100644 --- a/deps/npm/docs/output/using-npm/dependency-selectors.html +++ b/deps/npm/docs/output/using-npm/dependency-selectors.html @@ -141,9 +141,9 @@
-

+

Dependency Selector Syntax & Querying - @10.9.0 + @10.9.2

Dependency Selector Syntax & Querying
diff --git a/deps/npm/docs/output/using-npm/developers.html b/deps/npm/docs/output/using-npm/developers.html index e75223cd623a54..d7848c6e6647f3 100644 --- a/deps/npm/docs/output/using-npm/developers.html +++ b/deps/npm/docs/output/using-npm/developers.html @@ -141,9 +141,9 @@
-

+

developers - @10.9.0 + @10.9.2

Developer Guide
diff --git a/deps/npm/docs/output/using-npm/logging.html b/deps/npm/docs/output/using-npm/logging.html index 5499bd53e285f2..d1e6b0a7532253 100644 --- a/deps/npm/docs/output/using-npm/logging.html +++ b/deps/npm/docs/output/using-npm/logging.html @@ -141,9 +141,9 @@
-

+

Logging - @10.9.0 + @10.9.2

Why, What & How We Log
diff --git a/deps/npm/docs/output/using-npm/orgs.html b/deps/npm/docs/output/using-npm/orgs.html index c9ed80f6421c5e..5b7007b3618406 100644 --- a/deps/npm/docs/output/using-npm/orgs.html +++ b/deps/npm/docs/output/using-npm/orgs.html @@ -141,9 +141,9 @@
-

+

orgs - @10.9.0 + @10.9.2

Working with Teams & Orgs
diff --git a/deps/npm/docs/output/using-npm/package-spec.html b/deps/npm/docs/output/using-npm/package-spec.html index a370699bb555bd..eee81153269653 100644 --- a/deps/npm/docs/output/using-npm/package-spec.html +++ b/deps/npm/docs/output/using-npm/package-spec.html @@ -141,9 +141,9 @@
-

+

package-spec - @10.9.0 + @10.9.2

Package name specifier
diff --git a/deps/npm/docs/output/using-npm/registry.html b/deps/npm/docs/output/using-npm/registry.html index 9d9f8a539333c9..bd464be3fd67c9 100644 --- a/deps/npm/docs/output/using-npm/registry.html +++ b/deps/npm/docs/output/using-npm/registry.html @@ -141,9 +141,9 @@
-

+

registry - @10.9.0 + @10.9.2

The JavaScript Package Registry
diff --git a/deps/npm/docs/output/using-npm/removal.html b/deps/npm/docs/output/using-npm/removal.html index 2668dd9dd8a440..f0ccb74eb50811 100644 --- a/deps/npm/docs/output/using-npm/removal.html +++ b/deps/npm/docs/output/using-npm/removal.html @@ -141,9 +141,9 @@
-

+

removal - @10.9.0 + @10.9.2

Cleaning the Slate
diff --git a/deps/npm/docs/output/using-npm/scope.html b/deps/npm/docs/output/using-npm/scope.html index 0441f8ef703e95..35ed91f147f107 100644 --- a/deps/npm/docs/output/using-npm/scope.html +++ b/deps/npm/docs/output/using-npm/scope.html @@ -141,9 +141,9 @@
-

+

scope - @10.9.0 + @10.9.2

Scoped packages
diff --git a/deps/npm/docs/output/using-npm/scripts.html b/deps/npm/docs/output/using-npm/scripts.html index ff37c2ede18062..9982862814e73a 100644 --- a/deps/npm/docs/output/using-npm/scripts.html +++ b/deps/npm/docs/output/using-npm/scripts.html @@ -141,9 +141,9 @@
-

+

scripts - @10.9.0 + @10.9.2

How npm handles the "scripts" field
diff --git a/deps/npm/docs/output/using-npm/workspaces.html b/deps/npm/docs/output/using-npm/workspaces.html index 19195cc868db4b..5e2756479c7636 100644 --- a/deps/npm/docs/output/using-npm/workspaces.html +++ b/deps/npm/docs/output/using-npm/workspaces.html @@ -141,9 +141,9 @@
-

+

workspaces - @10.9.0 + @10.9.2

Working with workspaces
diff --git a/deps/npm/lib/cli.js b/deps/npm/lib/cli.js index e11729fe3205b9..00b4fc0bd7fb72 100644 --- a/deps/npm/lib/cli.js +++ b/deps/npm/lib/cli.js @@ -1,3 +1,11 @@ +try { + const { enableCompileCache } = require('node:module') + /* istanbul ignore next */ + if (enableCompileCache) { + enableCompileCache() + } +} catch (e) { /* istanbul ignore next */ } + const validateEngines = require('./cli/validate-engines.js') const cliEntry = require('node:path').resolve(__dirname, 'cli/entry.js') diff --git a/deps/npm/man/man1/npm-access.1 b/deps/npm/man/man1/npm-access.1 index 602193f3b8e3e8..96beab28fc365d 100644 --- a/deps/npm/man/man1/npm-access.1 +++ b/deps/npm/man/man1/npm-access.1 @@ -1,4 +1,4 @@ -.TH "NPM-ACCESS" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-ACCESS" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-access\fR - Set access level on published packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-adduser.1 b/deps/npm/man/man1/npm-adduser.1 index 467384ec0ec40e..0096f28229122d 100644 --- a/deps/npm/man/man1/npm-adduser.1 +++ b/deps/npm/man/man1/npm-adduser.1 @@ -1,4 +1,4 @@ -.TH "NPM-ADDUSER" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-ADDUSER" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-adduser\fR - Add a registry user account .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-audit.1 b/deps/npm/man/man1/npm-audit.1 index 64bf5d5461a157..38b977d3871d59 100644 --- a/deps/npm/man/man1/npm-audit.1 +++ b/deps/npm/man/man1/npm-audit.1 @@ -1,4 +1,4 @@ -.TH "NPM-AUDIT" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-AUDIT" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-audit\fR - Run a security audit .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-bugs.1 b/deps/npm/man/man1/npm-bugs.1 index c25b04b6814137..92c57c2c3c4146 100644 --- a/deps/npm/man/man1/npm-bugs.1 +++ b/deps/npm/man/man1/npm-bugs.1 @@ -1,4 +1,4 @@ -.TH "NPM-BUGS" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-BUGS" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-bugs\fR - Report bugs for a package in a web browser .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-cache.1 b/deps/npm/man/man1/npm-cache.1 index e9da044a5b4479..da188a0914e016 100644 --- a/deps/npm/man/man1/npm-cache.1 +++ b/deps/npm/man/man1/npm-cache.1 @@ -1,4 +1,4 @@ -.TH "NPM-CACHE" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-CACHE" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-cache\fR - Manipulates packages cache .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-ci.1 b/deps/npm/man/man1/npm-ci.1 index 1ad1419cb1f98f..1ae5e934e2d2b7 100644 --- a/deps/npm/man/man1/npm-ci.1 +++ b/deps/npm/man/man1/npm-ci.1 @@ -1,4 +1,4 @@ -.TH "NPM-CI" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-CI" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-ci\fR - Clean install a project .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-completion.1 b/deps/npm/man/man1/npm-completion.1 index 6f246c436cead3..ea44a0c9c56a51 100644 --- a/deps/npm/man/man1/npm-completion.1 +++ b/deps/npm/man/man1/npm-completion.1 @@ -1,4 +1,4 @@ -.TH "NPM-COMPLETION" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-COMPLETION" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-completion\fR - Tab Completion for npm .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-config.1 b/deps/npm/man/man1/npm-config.1 index 1bef58e188b68c..dbe609ba3b727f 100644 --- a/deps/npm/man/man1/npm-config.1 +++ b/deps/npm/man/man1/npm-config.1 @@ -1,4 +1,4 @@ -.TH "NPM-CONFIG" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-CONFIG" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-config\fR - Manage the npm configuration files .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-dedupe.1 b/deps/npm/man/man1/npm-dedupe.1 index b1e21f0478ed4f..530de6344d5f4e 100644 --- a/deps/npm/man/man1/npm-dedupe.1 +++ b/deps/npm/man/man1/npm-dedupe.1 @@ -1,4 +1,4 @@ -.TH "NPM-DEDUPE" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-DEDUPE" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-dedupe\fR - Reduce duplication in the package tree .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-deprecate.1 b/deps/npm/man/man1/npm-deprecate.1 index 31e9e56cd95382..0333772b97c690 100644 --- a/deps/npm/man/man1/npm-deprecate.1 +++ b/deps/npm/man/man1/npm-deprecate.1 @@ -1,4 +1,4 @@ -.TH "NPM-DEPRECATE" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-DEPRECATE" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-deprecate\fR - Deprecate a version of a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-diff.1 b/deps/npm/man/man1/npm-diff.1 index c29ae3266d1543..168f4397430854 100644 --- a/deps/npm/man/man1/npm-diff.1 +++ b/deps/npm/man/man1/npm-diff.1 @@ -1,4 +1,4 @@ -.TH "NPM-DIFF" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-DIFF" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-diff\fR - The registry diff command .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-dist-tag.1 b/deps/npm/man/man1/npm-dist-tag.1 index 46e5cf8ac3492a..97e2927ac1e2fa 100644 --- a/deps/npm/man/man1/npm-dist-tag.1 +++ b/deps/npm/man/man1/npm-dist-tag.1 @@ -1,4 +1,4 @@ -.TH "NPM-DIST-TAG" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-DIST-TAG" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-dist-tag\fR - Modify package distribution tags .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-docs.1 b/deps/npm/man/man1/npm-docs.1 index 6cda2d87abe163..6b09a835bbf318 100644 --- a/deps/npm/man/man1/npm-docs.1 +++ b/deps/npm/man/man1/npm-docs.1 @@ -1,4 +1,4 @@ -.TH "NPM-DOCS" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-DOCS" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-docs\fR - Open documentation for a package in a web browser .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-doctor.1 b/deps/npm/man/man1/npm-doctor.1 index 7a67ed1e6b32b1..9a0374b0896b52 100644 --- a/deps/npm/man/man1/npm-doctor.1 +++ b/deps/npm/man/man1/npm-doctor.1 @@ -1,4 +1,4 @@ -.TH "NPM-DOCTOR" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-DOCTOR" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-doctor\fR - Check the health of your npm environment .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-edit.1 b/deps/npm/man/man1/npm-edit.1 index 5ae7e24b67d60e..779feeede13656 100644 --- a/deps/npm/man/man1/npm-edit.1 +++ b/deps/npm/man/man1/npm-edit.1 @@ -1,4 +1,4 @@ -.TH "NPM-EDIT" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-EDIT" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-edit\fR - Edit an installed package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-exec.1 b/deps/npm/man/man1/npm-exec.1 index 20c21e67919d65..3b5fe4c425ee3e 100644 --- a/deps/npm/man/man1/npm-exec.1 +++ b/deps/npm/man/man1/npm-exec.1 @@ -1,4 +1,4 @@ -.TH "NPM-EXEC" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-EXEC" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-exec\fR - Run a command from a local or remote npm package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-explain.1 b/deps/npm/man/man1/npm-explain.1 index 65ef0e086e2ce0..5f86d19236fdb0 100644 --- a/deps/npm/man/man1/npm-explain.1 +++ b/deps/npm/man/man1/npm-explain.1 @@ -1,4 +1,4 @@ -.TH "NPM-EXPLAIN" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-EXPLAIN" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-explain\fR - Explain installed packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-explore.1 b/deps/npm/man/man1/npm-explore.1 index 6b92a81c2abc0d..eb71089b62446f 100644 --- a/deps/npm/man/man1/npm-explore.1 +++ b/deps/npm/man/man1/npm-explore.1 @@ -1,4 +1,4 @@ -.TH "NPM-EXPLORE" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-EXPLORE" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-explore\fR - Browse an installed package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-find-dupes.1 b/deps/npm/man/man1/npm-find-dupes.1 index 261313e1b23ad1..88a42adcec1f20 100644 --- a/deps/npm/man/man1/npm-find-dupes.1 +++ b/deps/npm/man/man1/npm-find-dupes.1 @@ -1,4 +1,4 @@ -.TH "NPM-FIND-DUPES" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-FIND-DUPES" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-find-dupes\fR - Find duplication in the package tree .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-fund.1 b/deps/npm/man/man1/npm-fund.1 index b87d57d20c0f50..31c5ceb7ff8c1f 100644 --- a/deps/npm/man/man1/npm-fund.1 +++ b/deps/npm/man/man1/npm-fund.1 @@ -1,4 +1,4 @@ -.TH "NPM-FUND" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-FUND" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-fund\fR - Retrieve funding information .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-help-search.1 b/deps/npm/man/man1/npm-help-search.1 index 9660ca29697e9f..c954795c2778c7 100644 --- a/deps/npm/man/man1/npm-help-search.1 +++ b/deps/npm/man/man1/npm-help-search.1 @@ -1,4 +1,4 @@ -.TH "NPM-HELP-SEARCH" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-HELP-SEARCH" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-help-search\fR - Search npm help documentation .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-help.1 b/deps/npm/man/man1/npm-help.1 index a8c70fdc88a9ae..2d0fc6e3a2c0e7 100644 --- a/deps/npm/man/man1/npm-help.1 +++ b/deps/npm/man/man1/npm-help.1 @@ -1,4 +1,4 @@ -.TH "NPM-HELP" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-HELP" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-help\fR - Get help on npm .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-hook.1 b/deps/npm/man/man1/npm-hook.1 index 2c07a5afadbb64..8d5d9334692b9e 100644 --- a/deps/npm/man/man1/npm-hook.1 +++ b/deps/npm/man/man1/npm-hook.1 @@ -1,4 +1,4 @@ -.TH "NPM-HOOK" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-HOOK" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-hook\fR - Manage registry hooks .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-init.1 b/deps/npm/man/man1/npm-init.1 index cb7339dc0f4ff0..1c1b008baa1deb 100644 --- a/deps/npm/man/man1/npm-init.1 +++ b/deps/npm/man/man1/npm-init.1 @@ -1,4 +1,4 @@ -.TH "NPM-INIT" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-INIT" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-init\fR - Create a package.json file .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-install-ci-test.1 b/deps/npm/man/man1/npm-install-ci-test.1 index 52defde693a506..4bbbb51140521b 100644 --- a/deps/npm/man/man1/npm-install-ci-test.1 +++ b/deps/npm/man/man1/npm-install-ci-test.1 @@ -1,4 +1,4 @@ -.TH "NPM-INSTALL-CI-TEST" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-INSTALL-CI-TEST" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-install-ci-test\fR - Install a project with a clean slate and run tests .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-install-test.1 b/deps/npm/man/man1/npm-install-test.1 index e2023e1a985474..eef940c066c709 100644 --- a/deps/npm/man/man1/npm-install-test.1 +++ b/deps/npm/man/man1/npm-install-test.1 @@ -1,4 +1,4 @@ -.TH "NPM-INSTALL-TEST" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-INSTALL-TEST" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-install-test\fR - Install package(s) and run tests .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-install.1 b/deps/npm/man/man1/npm-install.1 index 59b3a8c81ac73f..bbe4a43c10863e 100644 --- a/deps/npm/man/man1/npm-install.1 +++ b/deps/npm/man/man1/npm-install.1 @@ -1,4 +1,4 @@ -.TH "NPM-INSTALL" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-INSTALL" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-install\fR - Install a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-link.1 b/deps/npm/man/man1/npm-link.1 index e6106495400fdc..9aca1f4a7fb80d 100644 --- a/deps/npm/man/man1/npm-link.1 +++ b/deps/npm/man/man1/npm-link.1 @@ -1,4 +1,4 @@ -.TH "NPM-LINK" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-LINK" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-link\fR - Symlink a package folder .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-login.1 b/deps/npm/man/man1/npm-login.1 index 36f2bf2d0f35dc..a5e0cb71bb0d98 100644 --- a/deps/npm/man/man1/npm-login.1 +++ b/deps/npm/man/man1/npm-login.1 @@ -1,4 +1,4 @@ -.TH "NPM-LOGIN" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-LOGIN" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-login\fR - Login to a registry user account .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-logout.1 b/deps/npm/man/man1/npm-logout.1 index 78cba39ccd85c9..8ac69429ea320d 100644 --- a/deps/npm/man/man1/npm-logout.1 +++ b/deps/npm/man/man1/npm-logout.1 @@ -1,4 +1,4 @@ -.TH "NPM-LOGOUT" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-LOGOUT" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-logout\fR - Log out of the registry .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-ls.1 b/deps/npm/man/man1/npm-ls.1 index b0364fe1531753..47d20120898b59 100644 --- a/deps/npm/man/man1/npm-ls.1 +++ b/deps/npm/man/man1/npm-ls.1 @@ -1,4 +1,4 @@ -.TH "NPM-LS" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-LS" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-ls\fR - List installed packages .SS "Synopsis" @@ -20,7 +20,7 @@ Positional arguments are \fBname@version-range\fR identifiers, which will limit .P .RS 2 .nf -npm@10.9.0 /path/to/npm +npm@10.9.2 /path/to/npm └─┬ init-package-json@0.0.4 └── promzard@0.1.5 .fi diff --git a/deps/npm/man/man1/npm-org.1 b/deps/npm/man/man1/npm-org.1 index 0465a6b5bddb54..88df3325a344fd 100644 --- a/deps/npm/man/man1/npm-org.1 +++ b/deps/npm/man/man1/npm-org.1 @@ -1,4 +1,4 @@ -.TH "NPM-ORG" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-ORG" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-org\fR - Manage orgs .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-outdated.1 b/deps/npm/man/man1/npm-outdated.1 index ad1cf07823630f..17aabbd0857183 100644 --- a/deps/npm/man/man1/npm-outdated.1 +++ b/deps/npm/man/man1/npm-outdated.1 @@ -1,4 +1,4 @@ -.TH "NPM-OUTDATED" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-OUTDATED" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-outdated\fR - Check for outdated packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-owner.1 b/deps/npm/man/man1/npm-owner.1 index 5842e5a51d125f..f01e67e6464e7f 100644 --- a/deps/npm/man/man1/npm-owner.1 +++ b/deps/npm/man/man1/npm-owner.1 @@ -1,4 +1,4 @@ -.TH "NPM-OWNER" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-OWNER" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-owner\fR - Manage package owners .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-pack.1 b/deps/npm/man/man1/npm-pack.1 index 6cc2acd4b580f7..b9b56a31e30eef 100644 --- a/deps/npm/man/man1/npm-pack.1 +++ b/deps/npm/man/man1/npm-pack.1 @@ -1,4 +1,4 @@ -.TH "NPM-PACK" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-PACK" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-pack\fR - Create a tarball from a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-ping.1 b/deps/npm/man/man1/npm-ping.1 index ad03cbbb6a1398..2d16431b030ea8 100644 --- a/deps/npm/man/man1/npm-ping.1 +++ b/deps/npm/man/man1/npm-ping.1 @@ -1,4 +1,4 @@ -.TH "NPM-PING" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-PING" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-ping\fR - Ping npm registry .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-pkg.1 b/deps/npm/man/man1/npm-pkg.1 index 2f45876234ec3d..e55a0280d1211b 100644 --- a/deps/npm/man/man1/npm-pkg.1 +++ b/deps/npm/man/man1/npm-pkg.1 @@ -1,4 +1,4 @@ -.TH "NPM-PKG" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-PKG" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-pkg\fR - Manages your package.json .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-prefix.1 b/deps/npm/man/man1/npm-prefix.1 index cac44344c71d79..dca8d0c47497b4 100644 --- a/deps/npm/man/man1/npm-prefix.1 +++ b/deps/npm/man/man1/npm-prefix.1 @@ -1,4 +1,4 @@ -.TH "NPM-PREFIX" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-PREFIX" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-prefix\fR - Display prefix .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-profile.1 b/deps/npm/man/man1/npm-profile.1 index 28e3f41244ace1..89837b408f5a67 100644 --- a/deps/npm/man/man1/npm-profile.1 +++ b/deps/npm/man/man1/npm-profile.1 @@ -1,4 +1,4 @@ -.TH "NPM-PROFILE" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-PROFILE" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-profile\fR - Change settings on your registry profile .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-prune.1 b/deps/npm/man/man1/npm-prune.1 index 7634c75f0901fa..243426dffaf20f 100644 --- a/deps/npm/man/man1/npm-prune.1 +++ b/deps/npm/man/man1/npm-prune.1 @@ -1,4 +1,4 @@ -.TH "NPM-PRUNE" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-PRUNE" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-prune\fR - Remove extraneous packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-publish.1 b/deps/npm/man/man1/npm-publish.1 index c6724b6fe2ce51..1110b097182988 100644 --- a/deps/npm/man/man1/npm-publish.1 +++ b/deps/npm/man/man1/npm-publish.1 @@ -1,4 +1,4 @@ -.TH "NPM-PUBLISH" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-PUBLISH" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-publish\fR - Publish a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-query.1 b/deps/npm/man/man1/npm-query.1 index 0a3c6e665e7ecf..983f2cad388940 100644 --- a/deps/npm/man/man1/npm-query.1 +++ b/deps/npm/man/man1/npm-query.1 @@ -1,4 +1,4 @@ -.TH "NPM-QUERY" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-QUERY" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-query\fR - Dependency selector query .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-rebuild.1 b/deps/npm/man/man1/npm-rebuild.1 index e537cec373e3c6..396d9adf779cb1 100644 --- a/deps/npm/man/man1/npm-rebuild.1 +++ b/deps/npm/man/man1/npm-rebuild.1 @@ -1,4 +1,4 @@ -.TH "NPM-REBUILD" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-REBUILD" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-rebuild\fR - Rebuild a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-repo.1 b/deps/npm/man/man1/npm-repo.1 index 869541f6faaa51..b9c7bbdfbc233c 100644 --- a/deps/npm/man/man1/npm-repo.1 +++ b/deps/npm/man/man1/npm-repo.1 @@ -1,4 +1,4 @@ -.TH "NPM-REPO" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-REPO" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-repo\fR - Open package repository page in the browser .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-restart.1 b/deps/npm/man/man1/npm-restart.1 index b0e3b0deead7ef..1112bf9180cece 100644 --- a/deps/npm/man/man1/npm-restart.1 +++ b/deps/npm/man/man1/npm-restart.1 @@ -1,4 +1,4 @@ -.TH "NPM-RESTART" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-RESTART" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-restart\fR - Restart a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-root.1 b/deps/npm/man/man1/npm-root.1 index 94a132557e3979..2456bdba6d1816 100644 --- a/deps/npm/man/man1/npm-root.1 +++ b/deps/npm/man/man1/npm-root.1 @@ -1,4 +1,4 @@ -.TH "NPM-ROOT" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-ROOT" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-root\fR - Display npm root .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-run-script.1 b/deps/npm/man/man1/npm-run-script.1 index 424c733d6a0523..a05362361848d2 100644 --- a/deps/npm/man/man1/npm-run-script.1 +++ b/deps/npm/man/man1/npm-run-script.1 @@ -1,4 +1,4 @@ -.TH "NPM-RUN-SCRIPT" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-RUN-SCRIPT" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-run-script\fR - Run arbitrary package scripts .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-sbom.1 b/deps/npm/man/man1/npm-sbom.1 index 7bb8199c8b11c8..f784a53edd4c08 100644 --- a/deps/npm/man/man1/npm-sbom.1 +++ b/deps/npm/man/man1/npm-sbom.1 @@ -1,4 +1,4 @@ -.TH "NPM-SBOM" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-SBOM" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-sbom\fR - Generate a Software Bill of Materials (SBOM) .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-search.1 b/deps/npm/man/man1/npm-search.1 index 4fe67890bc949a..3ee7bcb7cb4e92 100644 --- a/deps/npm/man/man1/npm-search.1 +++ b/deps/npm/man/man1/npm-search.1 @@ -1,4 +1,4 @@ -.TH "NPM-SEARCH" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-SEARCH" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-search\fR - Search for packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-shrinkwrap.1 b/deps/npm/man/man1/npm-shrinkwrap.1 index ed3c0cb03ccca7..d7e64493a90754 100644 --- a/deps/npm/man/man1/npm-shrinkwrap.1 +++ b/deps/npm/man/man1/npm-shrinkwrap.1 @@ -1,4 +1,4 @@ -.TH "NPM-SHRINKWRAP" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-SHRINKWRAP" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-shrinkwrap\fR - Lock down dependency versions for publication .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-star.1 b/deps/npm/man/man1/npm-star.1 index 83d0ff230ec1bb..7c3a528ec3a04f 100644 --- a/deps/npm/man/man1/npm-star.1 +++ b/deps/npm/man/man1/npm-star.1 @@ -1,4 +1,4 @@ -.TH "NPM-STAR" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-STAR" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-star\fR - Mark your favorite packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-stars.1 b/deps/npm/man/man1/npm-stars.1 index bdeffec7a3b1b5..61d60d9534da40 100644 --- a/deps/npm/man/man1/npm-stars.1 +++ b/deps/npm/man/man1/npm-stars.1 @@ -1,4 +1,4 @@ -.TH "NPM-STARS" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-STARS" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-stars\fR - View packages marked as favorites .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-start.1 b/deps/npm/man/man1/npm-start.1 index c3cb48c6f72576..4c687a1ecba49e 100644 --- a/deps/npm/man/man1/npm-start.1 +++ b/deps/npm/man/man1/npm-start.1 @@ -1,4 +1,4 @@ -.TH "NPM-START" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-START" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-start\fR - Start a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-stop.1 b/deps/npm/man/man1/npm-stop.1 index 36ef105209b7f0..dadebe98c52834 100644 --- a/deps/npm/man/man1/npm-stop.1 +++ b/deps/npm/man/man1/npm-stop.1 @@ -1,4 +1,4 @@ -.TH "NPM-STOP" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-STOP" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-stop\fR - Stop a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-team.1 b/deps/npm/man/man1/npm-team.1 index 6712c8f92b23c8..42ee63f4743b6e 100644 --- a/deps/npm/man/man1/npm-team.1 +++ b/deps/npm/man/man1/npm-team.1 @@ -1,4 +1,4 @@ -.TH "NPM-TEAM" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-TEAM" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-team\fR - Manage organization teams and team memberships .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-test.1 b/deps/npm/man/man1/npm-test.1 index 5e63e9c6d76ab3..80eb6a1d7bb9e7 100644 --- a/deps/npm/man/man1/npm-test.1 +++ b/deps/npm/man/man1/npm-test.1 @@ -1,4 +1,4 @@ -.TH "NPM-TEST" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-TEST" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-test\fR - Test a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-token.1 b/deps/npm/man/man1/npm-token.1 index 29a2c361574121..1d14f81553b5c9 100644 --- a/deps/npm/man/man1/npm-token.1 +++ b/deps/npm/man/man1/npm-token.1 @@ -1,4 +1,4 @@ -.TH "NPM-TOKEN" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-TOKEN" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-token\fR - Manage your authentication tokens .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-uninstall.1 b/deps/npm/man/man1/npm-uninstall.1 index 0c10a60c1f07e7..f5e25641fc57c5 100644 --- a/deps/npm/man/man1/npm-uninstall.1 +++ b/deps/npm/man/man1/npm-uninstall.1 @@ -1,4 +1,4 @@ -.TH "NPM-UNINSTALL" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-UNINSTALL" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-uninstall\fR - Remove a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-unpublish.1 b/deps/npm/man/man1/npm-unpublish.1 index 098af2e59310bc..fcf62bbd7da263 100644 --- a/deps/npm/man/man1/npm-unpublish.1 +++ b/deps/npm/man/man1/npm-unpublish.1 @@ -1,4 +1,4 @@ -.TH "NPM-UNPUBLISH" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-UNPUBLISH" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-unpublish\fR - Remove a package from the registry .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-unstar.1 b/deps/npm/man/man1/npm-unstar.1 index 7326984aa8b4d2..e9edf3ab6634dd 100644 --- a/deps/npm/man/man1/npm-unstar.1 +++ b/deps/npm/man/man1/npm-unstar.1 @@ -1,4 +1,4 @@ -.TH "NPM-UNSTAR" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-UNSTAR" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-unstar\fR - Remove an item from your favorite packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-update.1 b/deps/npm/man/man1/npm-update.1 index 797a9e601cf3d2..052008d73485c9 100644 --- a/deps/npm/man/man1/npm-update.1 +++ b/deps/npm/man/man1/npm-update.1 @@ -1,4 +1,4 @@ -.TH "NPM-UPDATE" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-UPDATE" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-update\fR - Update packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-version.1 b/deps/npm/man/man1/npm-version.1 index d0a7dcee6fea65..f79a0c84c3f8fd 100644 --- a/deps/npm/man/man1/npm-version.1 +++ b/deps/npm/man/man1/npm-version.1 @@ -1,4 +1,4 @@ -.TH "NPM-VERSION" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-VERSION" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-version\fR - Bump a package version .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-view.1 b/deps/npm/man/man1/npm-view.1 index 2925f8d7d2cffe..e862ddc4b8780d 100644 --- a/deps/npm/man/man1/npm-view.1 +++ b/deps/npm/man/man1/npm-view.1 @@ -1,4 +1,4 @@ -.TH "NPM-VIEW" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-VIEW" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-view\fR - View registry info .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-whoami.1 b/deps/npm/man/man1/npm-whoami.1 index f283db5c9d247a..a003f634a2b293 100644 --- a/deps/npm/man/man1/npm-whoami.1 +++ b/deps/npm/man/man1/npm-whoami.1 @@ -1,4 +1,4 @@ -.TH "NPM-WHOAMI" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM-WHOAMI" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-whoami\fR - Display npm username .SS "Synopsis" diff --git a/deps/npm/man/man1/npm.1 b/deps/npm/man/man1/npm.1 index b2034bc50fa5e3..28e8774ad1b4d0 100644 --- a/deps/npm/man/man1/npm.1 +++ b/deps/npm/man/man1/npm.1 @@ -1,4 +1,4 @@ -.TH "NPM" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPM" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm\fR - javascript package manager .SS "Synopsis" @@ -12,7 +12,7 @@ npm Note: This command is unaware of workspaces. .SS "Version" .P -10.9.0 +10.9.2 .SS "Description" .P npm is the package manager for the Node JavaScript platform. It puts modules in place so that node can find them, and manages dependency conflicts intelligently. diff --git a/deps/npm/man/man1/npx.1 b/deps/npm/man/man1/npx.1 index a1000443cc665e..9b4ba1af1dc365 100644 --- a/deps/npm/man/man1/npx.1 +++ b/deps/npm/man/man1/npx.1 @@ -1,4 +1,4 @@ -.TH "NPX" "1" "October 2024" "NPM@10.9.0" "" +.TH "NPX" "1" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpx\fR - Run a command from a local or remote npm package .SS "Synopsis" diff --git a/deps/npm/man/man5/folders.5 b/deps/npm/man/man5/folders.5 index 6e3045729c46ca..8eb8a8230333f9 100644 --- a/deps/npm/man/man5/folders.5 +++ b/deps/npm/man/man5/folders.5 @@ -1,4 +1,4 @@ -.TH "FOLDERS" "5" "October 2024" "NPM@10.9.0" "" +.TH "FOLDERS" "5" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBfolders\fR - Folder Structures Used by npm .SS "Description" diff --git a/deps/npm/man/man5/install.5 b/deps/npm/man/man5/install.5 index f5fabf02d7431b..0b75adf308685a 100644 --- a/deps/npm/man/man5/install.5 +++ b/deps/npm/man/man5/install.5 @@ -1,4 +1,4 @@ -.TH "INSTALL" "5" "October 2024" "NPM@10.9.0" "" +.TH "INSTALL" "5" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBinstall\fR - Download and install node and npm .SS "Description" diff --git a/deps/npm/man/man5/npm-global.5 b/deps/npm/man/man5/npm-global.5 index 6e3045729c46ca..8eb8a8230333f9 100644 --- a/deps/npm/man/man5/npm-global.5 +++ b/deps/npm/man/man5/npm-global.5 @@ -1,4 +1,4 @@ -.TH "FOLDERS" "5" "October 2024" "NPM@10.9.0" "" +.TH "FOLDERS" "5" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBfolders\fR - Folder Structures Used by npm .SS "Description" diff --git a/deps/npm/man/man5/npm-json.5 b/deps/npm/man/man5/npm-json.5 index 49773dda01d706..81fa9a8c95b8dc 100644 --- a/deps/npm/man/man5/npm-json.5 +++ b/deps/npm/man/man5/npm-json.5 @@ -1,4 +1,4 @@ -.TH "PACKAGE.JSON" "5" "October 2024" "NPM@10.9.0" "" +.TH "PACKAGE.JSON" "5" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBpackage.json\fR - Specifics of npm's package.json handling .SS "Description" diff --git a/deps/npm/man/man5/npm-shrinkwrap-json.5 b/deps/npm/man/man5/npm-shrinkwrap-json.5 index d1f394d23aa1ce..7e8efb285e959e 100644 --- a/deps/npm/man/man5/npm-shrinkwrap-json.5 +++ b/deps/npm/man/man5/npm-shrinkwrap-json.5 @@ -1,4 +1,4 @@ -.TH "NPM-SHRINKWRAP.JSON" "5" "October 2024" "NPM@10.9.0" "" +.TH "NPM-SHRINKWRAP.JSON" "5" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpm-shrinkwrap.json\fR - A publishable lockfile .SS "Description" diff --git a/deps/npm/man/man5/npmrc.5 b/deps/npm/man/man5/npmrc.5 index 3b78f989c23545..947a0fe433faa3 100644 --- a/deps/npm/man/man5/npmrc.5 +++ b/deps/npm/man/man5/npmrc.5 @@ -1,4 +1,4 @@ -.TH "NPMRC" "5" "October 2024" "NPM@10.9.0" "" +.TH "NPMRC" "5" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBnpmrc\fR - The npm config files .SS "Description" diff --git a/deps/npm/man/man5/package-json.5 b/deps/npm/man/man5/package-json.5 index 49773dda01d706..81fa9a8c95b8dc 100644 --- a/deps/npm/man/man5/package-json.5 +++ b/deps/npm/man/man5/package-json.5 @@ -1,4 +1,4 @@ -.TH "PACKAGE.JSON" "5" "October 2024" "NPM@10.9.0" "" +.TH "PACKAGE.JSON" "5" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBpackage.json\fR - Specifics of npm's package.json handling .SS "Description" diff --git a/deps/npm/man/man5/package-lock-json.5 b/deps/npm/man/man5/package-lock-json.5 index df3dcca1c4fa75..78deecab3757c1 100644 --- a/deps/npm/man/man5/package-lock-json.5 +++ b/deps/npm/man/man5/package-lock-json.5 @@ -1,4 +1,4 @@ -.TH "PACKAGE-LOCK.JSON" "5" "October 2024" "NPM@10.9.0" "" +.TH "PACKAGE-LOCK.JSON" "5" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBpackage-lock.json\fR - A manifestation of the manifest .SS "Description" diff --git a/deps/npm/man/man7/config.7 b/deps/npm/man/man7/config.7 index ee166c9d4ccee9..0ce1f25f38d5d3 100644 --- a/deps/npm/man/man7/config.7 +++ b/deps/npm/man/man7/config.7 @@ -1,4 +1,4 @@ -.TH "CONFIG" "7" "October 2024" "NPM@10.9.0" "" +.TH "CONFIG" "7" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBconfig\fR - More than you probably want to know about npm configuration .SS "Description" diff --git a/deps/npm/man/man7/dependency-selectors.7 b/deps/npm/man/man7/dependency-selectors.7 index 54d80fee4dc58d..e15648cb400af4 100644 --- a/deps/npm/man/man7/dependency-selectors.7 +++ b/deps/npm/man/man7/dependency-selectors.7 @@ -1,4 +1,4 @@ -.TH "QUERYING" "7" "October 2024" "NPM@10.9.0" "" +.TH "QUERYING" "7" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBQuerying\fR - Dependency Selector Syntax & Querying .SS "Description" diff --git a/deps/npm/man/man7/developers.7 b/deps/npm/man/man7/developers.7 index aa907f3f51deda..a5d429c60a5cef 100644 --- a/deps/npm/man/man7/developers.7 +++ b/deps/npm/man/man7/developers.7 @@ -1,4 +1,4 @@ -.TH "DEVELOPERS" "7" "October 2024" "NPM@10.9.0" "" +.TH "DEVELOPERS" "7" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBdevelopers\fR - Developer Guide .SS "Description" diff --git a/deps/npm/man/man7/logging.7 b/deps/npm/man/man7/logging.7 index f36ae6e4703048..200ad578584521 100644 --- a/deps/npm/man/man7/logging.7 +++ b/deps/npm/man/man7/logging.7 @@ -1,4 +1,4 @@ -.TH "LOGGING" "7" "October 2024" "NPM@10.9.0" "" +.TH "LOGGING" "7" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBLogging\fR - Why, What & How We Log .SS "Description" diff --git a/deps/npm/man/man7/orgs.7 b/deps/npm/man/man7/orgs.7 index 2866d7dd9800a5..8f12f6e0434a19 100644 --- a/deps/npm/man/man7/orgs.7 +++ b/deps/npm/man/man7/orgs.7 @@ -1,4 +1,4 @@ -.TH "ORGS" "7" "October 2024" "NPM@10.9.0" "" +.TH "ORGS" "7" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBorgs\fR - Working with Teams & Orgs .SS "Description" diff --git a/deps/npm/man/man7/package-spec.7 b/deps/npm/man/man7/package-spec.7 index 0d76a5f3017253..f8a0ac73903272 100644 --- a/deps/npm/man/man7/package-spec.7 +++ b/deps/npm/man/man7/package-spec.7 @@ -1,4 +1,4 @@ -.TH "PACKAGE-SPEC" "7" "October 2024" "NPM@10.9.0" "" +.TH "PACKAGE-SPEC" "7" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBpackage-spec\fR - Package name specifier .SS "Description" diff --git a/deps/npm/man/man7/registry.7 b/deps/npm/man/man7/registry.7 index 61ad2702f7b235..1e8134daaf751d 100644 --- a/deps/npm/man/man7/registry.7 +++ b/deps/npm/man/man7/registry.7 @@ -1,4 +1,4 @@ -.TH "REGISTRY" "7" "October 2024" "NPM@10.9.0" "" +.TH "REGISTRY" "7" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBregistry\fR - The JavaScript Package Registry .SS "Description" diff --git a/deps/npm/man/man7/removal.7 b/deps/npm/man/man7/removal.7 index a76b743553c304..74a7703f177e47 100644 --- a/deps/npm/man/man7/removal.7 +++ b/deps/npm/man/man7/removal.7 @@ -1,4 +1,4 @@ -.TH "REMOVAL" "7" "October 2024" "NPM@10.9.0" "" +.TH "REMOVAL" "7" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBremoval\fR - Cleaning the Slate .SS "Synopsis" diff --git a/deps/npm/man/man7/scope.7 b/deps/npm/man/man7/scope.7 index 94c22ccbd9f8fa..ccf0d9e7ee99c4 100644 --- a/deps/npm/man/man7/scope.7 +++ b/deps/npm/man/man7/scope.7 @@ -1,4 +1,4 @@ -.TH "SCOPE" "7" "October 2024" "NPM@10.9.0" "" +.TH "SCOPE" "7" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBscope\fR - Scoped packages .SS "Description" diff --git a/deps/npm/man/man7/scripts.7 b/deps/npm/man/man7/scripts.7 index 925d3181dc9174..1758473fc89dbb 100644 --- a/deps/npm/man/man7/scripts.7 +++ b/deps/npm/man/man7/scripts.7 @@ -1,4 +1,4 @@ -.TH "SCRIPTS" "7" "October 2024" "NPM@10.9.0" "" +.TH "SCRIPTS" "7" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBscripts\fR - How npm handles the "scripts" field .SS "Description" diff --git a/deps/npm/man/man7/workspaces.7 b/deps/npm/man/man7/workspaces.7 index 1812eb73eb7ece..cd7df95b81cf3e 100644 --- a/deps/npm/man/man7/workspaces.7 +++ b/deps/npm/man/man7/workspaces.7 @@ -1,4 +1,4 @@ -.TH "WORKSPACES" "7" "October 2024" "NPM@10.9.0" "" +.TH "WORKSPACES" "7" "December 2024" "NPM@10.9.2" "" .SH "NAME" \fBworkspaces\fR - Working with workspaces .SS "Description" diff --git a/deps/npm/node_modules/@isaacs/cliui/node_modules/ansi-regex/index.js b/deps/npm/node_modules/@isaacs/cliui/node_modules/ansi-regex/index.js index 130a0929b8ce8c..ddfdba39a783a4 100644 --- a/deps/npm/node_modules/@isaacs/cliui/node_modules/ansi-regex/index.js +++ b/deps/npm/node_modules/@isaacs/cliui/node_modules/ansi-regex/index.js @@ -1,7 +1,9 @@ export default function ansiRegex({onlyFirst = false} = {}) { + // Valid string terminator sequences are BEL, ESC\, and 0x9c + const ST = '(?:\\u0007|\\u001B\\u005C|\\u009C)'; const pattern = [ - '[\\u001B\\u009B][[\\]()#;?]*(?:(?:(?:(?:;[-a-zA-Z\\d\\/#&.:=?%@~_]+)*|[a-zA-Z\\d]+(?:;[-a-zA-Z\\d\\/#&.:=?%@~_]*)*)?\\u0007)', - '(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PR-TZcf-ntqry=><~]))' + `[\\u001B\\u009B][[\\]()#;?]*(?:(?:(?:(?:;[-a-zA-Z\\d\\/#&.:=?%@~_]+)*|[a-zA-Z\\d]+(?:;[-a-zA-Z\\d\\/#&.:=?%@~_]*)*)?${ST})`, + '(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PR-TZcf-nq-uy=><~]))', ].join('|'); return new RegExp(pattern, onlyFirst ? undefined : 'g'); diff --git a/deps/npm/node_modules/@isaacs/cliui/node_modules/ansi-regex/package.json b/deps/npm/node_modules/@isaacs/cliui/node_modules/ansi-regex/package.json index 7bbb563bf2a70a..49f3f61021512b 100644 --- a/deps/npm/node_modules/@isaacs/cliui/node_modules/ansi-regex/package.json +++ b/deps/npm/node_modules/@isaacs/cliui/node_modules/ansi-regex/package.json @@ -1,6 +1,6 @@ { "name": "ansi-regex", - "version": "6.0.1", + "version": "6.1.0", "description": "Regular expression for matching ANSI escape codes", "license": "MIT", "repository": "chalk/ansi-regex", @@ -12,6 +12,8 @@ }, "type": "module", "exports": "./index.js", + "types": "./index.d.ts", + "sideEffects": false, "engines": { "node": ">=12" }, @@ -51,8 +53,9 @@ "pattern" ], "devDependencies": { + "ansi-escapes": "^5.0.0", "ava": "^3.15.0", - "tsd": "^0.14.0", - "xo": "^0.38.2" + "tsd": "^0.21.0", + "xo": "^0.54.2" } } diff --git a/deps/npm/node_modules/@npmcli/map-workspaces/lib/index.js b/deps/npm/node_modules/@npmcli/map-workspaces/lib/index.js index 1b39d2e3f67e08..f38b8cd33b74f7 100644 --- a/deps/npm/node_modules/@npmcli/map-workspaces/lib/index.js +++ b/deps/npm/node_modules/@npmcli/map-workspaces/lib/index.js @@ -134,7 +134,7 @@ async function mapWorkspaces (opts = {}) { try { pkg = await pkgJson.normalize(path.join(opts.cwd, match)) } catch (err) { - if (err.code === 'ENOENT') { + if (err.code === 'ENOENT' || err.code === 'ENOTDIR') { continue } else { throw err diff --git a/deps/npm/node_modules/@npmcli/map-workspaces/package.json b/deps/npm/node_modules/@npmcli/map-workspaces/package.json index f2667f25e9d5d2..78a515e027b011 100644 --- a/deps/npm/node_modules/@npmcli/map-workspaces/package.json +++ b/deps/npm/node_modules/@npmcli/map-workspaces/package.json @@ -1,6 +1,6 @@ { "name": "@npmcli/map-workspaces", - "version": "4.0.1", + "version": "4.0.2", "main": "lib/index.js", "files": [ "bin/", @@ -44,7 +44,7 @@ }, "devDependencies": { "@npmcli/eslint-config": "^5.0.0", - "@npmcli/template-oss": "4.23.3", + "@npmcli/template-oss": "4.23.4", "tap": "^16.0.1" }, "dependencies": { @@ -55,7 +55,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.23.3", + "version": "4.23.4", "publish": "true" } } diff --git a/deps/npm/node_modules/node-gyp/node_modules/isexe/LICENSE b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/LICENSE similarity index 90% rename from deps/npm/node_modules/node-gyp/node_modules/isexe/LICENSE rename to deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/LICENSE index c925dbe826b670..a03cd0ed0b338b 100644 --- a/deps/npm/node_modules/node-gyp/node_modules/isexe/LICENSE +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/LICENSE @@ -1,6 +1,6 @@ The ISC License -Copyright (c) 2016-2022 Isaac Z. Schlueter and Contributors +Copyright (c) Isaac Z. Schlueter, Kat Marchán, npm, Inc., and Contributors Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/README.md b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/README.md new file mode 100644 index 00000000000000..dbb0051de23a4d --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/README.md @@ -0,0 +1,283 @@ +# pacote + +Fetches package manifests and tarballs from the npm registry. + +## USAGE + +```js +const pacote = require('pacote') + +// get a package manifest +pacote.manifest('foo@1.x').then(manifest => console.log('got it', manifest)) + +// extract a package into a folder +pacote.extract('github:npm/cli', 'some/path', options) + .then(({from, resolved, integrity}) => { + console.log('extracted!', from, resolved, integrity) + }) + +pacote.tarball('https://server.com/package.tgz').then(data => { + console.log('got ' + data.length + ' bytes of tarball data') +}) +``` + +`pacote` works with any kind of package specifier that npm can install. If +you can pass it to the npm CLI, you can pass it to pacote. (In fact, that's +exactly what the npm CLI does.) + +Anything that you can do with one kind of package, you can do with another. + +Data that isn't relevant (like a packument for a tarball) will be +simulated. + +`prepare` scripts will be run when generating tarballs from `git` and +`directory` locations, to simulate what _would_ be published to the +registry, so that you get a working package instead of just raw source +code that might need to be transpiled. + +## CLI + +This module exports a command line interface that can do most of what is +described below. Run `pacote -h` to learn more. + +``` +Pacote - The JavaScript Package Handler, v10.1.1 + +Usage: + + pacote resolve + Resolve a specifier and output the fully resolved target + Returns integrity and from if '--long' flag is set. + + pacote manifest + Fetch a manifest and print to stdout + + pacote packument + Fetch a full packument and print to stdout + + pacote tarball [] + Fetch a package tarball and save to + If is missing or '-', the tarball will be streamed to stdout. + + pacote extract + Extract a package to the destination folder. + +Configuration values all match the names of configs passed to npm, or +options passed to Pacote. Additional flags for this executable: + + --long Print an object from 'resolve', including integrity and spec. + --json Print result objects as JSON rather than node's default. + (This is the default if stdout is not a TTY.) + --help -h Print this helpful text. + +For example '--cache=/path/to/folder' will use that folder as the cache. +``` + +## API + +The `spec` refers to any kind of package specifier that npm can install. +If you can pass it to the npm CLI, you can pass it to pacote. (In fact, +that's exactly what the npm CLI does.) + +See below for valid `opts` values. + +* `pacote.resolve(spec, opts)` Resolve a specifier like `foo@latest` or + `github:user/project` all the way to a tarball url, tarball file, or git + repo with commit hash. + +* `pacote.extract(spec, dest, opts)` Extract a package's tarball into a + destination folder. Returns a promise that resolves to the + `{from,resolved,integrity}` of the extracted package. + +* `pacote.manifest(spec, opts)` Fetch (or simulate) a package's manifest + (basically, the `package.json` file, plus a bit of metadata). + See below for more on manifests and packuments. Returns a Promise that + resolves to the manifest object. + +* `pacote.packument(spec, opts)` Fetch (or simulate) a package's packument + (basically, the top-level package document listing all the manifests that + the registry returns). See below for more on manifests and packuments. + Returns a Promise that resolves to the packument object. + +* `pacote.tarball(spec, opts)` Get a package tarball data as a buffer in + memory. Returns a Promise that resolves to the tarball data Buffer, with + `from`, `resolved`, and `integrity` fields attached. + +* `pacote.tarball.file(spec, dest, opts)` Save a package tarball data to + a file on disk. Returns a Promise that resolves to + `{from,integrity,resolved}` of the fetched tarball. + +* `pacote.tarball.stream(spec, streamHandler, opts)` Fetch a tarball and + make the stream available to the `streamHandler` function. + + This is mostly an internal function, but it is exposed because it does + provide some functionality that may be difficult to achieve otherwise. + + The `streamHandler` function MUST return a Promise that resolves when + the stream (and all associated work) is ended, or rejects if the stream + has an error. + + The `streamHandler` function MAY be called multiple times, as Pacote + retries requests in some scenarios, such as cache corruption or + retriable network failures. + +### Options + +Options are passed to +[`npm-registry-fetch`](http://npm.im/npm-registry-fetch) and +[`cacache`](http://npm.im/cacache), so in addition to these, anything for +those modules can be given to pacote as well. + +Options object is cloned, and mutated along the way to add integrity, +resolved, and other properties, as they are determined. + +* `cache` Where to store cache entries and temp files. Passed to + [`cacache`](http://npm.im/cacache). Defaults to the same cache directory + that npm will use by default, based on platform and environment. +* `where` Base folder for resolving relative `file:` dependencies. +* `resolved` Shortcut for looking up resolved values. Should be specified + if known. +* `integrity` Expected integrity of fetched package tarball. If specified, + tarballs with mismatched integrity values will raise an `EINTEGRITY` + error. +* `umask` Permission mode mask for extracted files and directories. + Defaults to `0o22`. See "Extracted File Modes" below. +* `fmode` Minimum permission mode for extracted files. Defaults to + `0o666`. See "Extracted File Modes" below. +* `dmode` Minimum permission mode for extracted directories. Defaults to + `0o777`. See "Extracted File Modes" below. +* `preferOnline` Prefer to revalidate cache entries, even when it would not + be strictly necessary. Default `false`. +* `before` When picking a manifest from a packument, only consider + packages published before the specified date. Default `null`. +* `defaultTag` The default `dist-tag` to use when choosing a manifest from a + packument. Defaults to `latest`. +* `registry` The npm registry to use by default. Defaults to + `https://registry.npmjs.org/`. +* `fullMetadata` Fetch the full metadata from the registry for packuments, + including information not strictly required for installation (author, + description, etc.) Defaults to `true` when `before` is set, since the + version publish time is part of the extended packument metadata. +* `fullReadJson` Use the slower `read-package-json` package insted of + `read-package-json-fast` in order to include extra fields like "readme" in + the manifest. Defaults to `false`. +* `packumentCache` For registry packuments only, you may provide a `Map` + object which will be used to cache packument requests between pacote + calls. This allows you to easily avoid hitting the registry multiple + times (even just to validate the cache) for a given packument, since it + is unlikely to change in the span of a single command. +* `verifySignatures` A boolean that will make pacote verify the + integrity signature of a manifest, if present. There must be a + configured `_keys` entry in the config that is scoped to the + registry the manifest is being fetched from. +* `verifyAttestations` A boolean that will make pacote verify Sigstore + attestations, if present. There must be a configured `_keys` entry in the + config that is scoped to the registry the manifest is being fetched from. +* `tufCache` Where to store metadata/target files when retrieving the package + attestation key material via TUF. Defaults to the same cache directory that + npm will use by default, based on platform and environment. + +### Advanced API + +Each different type of fetcher is exposed for more advanced usage such as +using helper methods from this classes: + +* `DirFetcher` +* `FileFetcher` +* `GitFetcher` +* `RegistryFetcher` +* `RemoteFetcher` + +## Extracted File Modes + +Files are extracted with a mode matching the following formula: + +``` +( (tarball entry mode value) | (minimum mode option) ) ~ (umask) +``` + +This is in order to prevent unreadable files or unlistable directories from +cluttering a project's `node_modules` folder, even if the package tarball +specifies that the file should be inaccessible. + +It also prevents files from being group- or world-writable without explicit +opt-in by the user, because all file and directory modes are masked against +the `umask` value. + +So, a file which is `0o771` in the tarball, using the default `fmode` of +`0o666` and `umask` of `0o22`, will result in a file mode of `0o755`: + +``` +(0o771 | 0o666) => 0o777 +(0o777 ~ 0o22) => 0o755 +``` + +In almost every case, the defaults are appropriate. To respect exactly +what is in the package tarball (even if this makes an unusable system), set +both `dmode` and `fmode` options to `0`. Otherwise, the `umask` config +should be used in most cases where file mode modifications are required, +and this functions more or less the same as the `umask` value in most Unix +systems. + +## Extracted File Ownership + +When running as `root` on Unix systems, all extracted files and folders +will have their owning `uid` and `gid` values set to match the ownership +of the containing folder. + +This prevents `root`-owned files showing up in a project's `node_modules` +folder when a user runs `sudo npm install`. + +## Manifests + +A `manifest` is similar to a `package.json` file. However, it has a few +pieces of extra metadata, and sometimes lacks metadata that is inessential +to package installation. + +In addition to the common `package.json` fields, manifests include: + +* `manifest._resolved` The tarball url or file path where the package + artifact can be found. +* `manifest._from` A normalized form of the spec passed in as an argument. +* `manifest._integrity` The integrity value for the package artifact. +* `manifest._id` The canonical spec of this package version: name@version. +* `manifest.dist` Registry manifests (those included in a packument) have a + `dist` object. Only `tarball` is required, though at least one of + `shasum` or `integrity` is almost always present. + + * `tarball` The url to the associated package artifact. (Copied by + Pacote to `manifest._resolved`.) + * `integrity` The integrity SRI string for the artifact. This may not + be present for older packages on the npm registry. (Copied by Pacote + to `manifest._integrity`.) + * `shasum` Legacy integrity value. Hexadecimal-encoded sha1 hash. + (Converted to an SRI string and copied by Pacote to + `manifest._integrity` when `dist.integrity` is not present.) + * `fileCount` Number of files in the tarball. + * `unpackedSize` Size on disk of the package when unpacked. + * `signatures` Signatures of the shasum. Includes the keyid that + correlates to a [`key from the npm + registry`](https://registry.npmjs.org/-/npm/v1/keys) + +## Packuments + +A packument is the top-level package document that lists the set of +manifests for available versions for a package. + +When a packument is fetched with `accept: +application/vnd.npm.install-v1+json` in the HTTP headers, only the most +minimum necessary metadata is returned. Additional metadata is returned +when fetched with only `accept: application/json`. + +For Pacote's purposes, the following fields are relevant: + +* `versions` An object where each key is a version, and each value is the + manifest for that version. +* `dist-tags` An object mapping dist-tags to version numbers. This is how + `foo@latest` gets turned into `foo@1.2.3`. +* `time` In the full packument, an object mapping version numbers to + publication times, for the `opts.before` functionality. + +Pacote adds the following field, regardless of the accept header: + +* `_contentLength` The size of the packument. diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/bin/index.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/bin/index.js new file mode 100755 index 00000000000000..f35b62ca71a537 --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/bin/index.js @@ -0,0 +1,158 @@ +#!/usr/bin/env node + +const run = conf => { + const pacote = require('../') + switch (conf._[0]) { + case 'resolve': + case 'manifest': + case 'packument': + if (conf._[0] === 'resolve' && conf.long) { + return pacote.manifest(conf._[1], conf).then(mani => ({ + resolved: mani._resolved, + integrity: mani._integrity, + from: mani._from, + })) + } + return pacote[conf._[0]](conf._[1], conf) + + case 'tarball': + if (!conf._[2] || conf._[2] === '-') { + return pacote.tarball.stream(conf._[1], stream => { + stream.pipe( + conf.testStdout || + /* istanbul ignore next */ + process.stdout + ) + // make sure it resolves something falsey + return stream.promise().then(() => { + return false + }) + }, conf) + } else { + return pacote.tarball.file(conf._[1], conf._[2], conf) + } + + case 'extract': + return pacote.extract(conf._[1], conf._[2], conf) + + default: /* istanbul ignore next */ { + throw new Error(`bad command: ${conf._[0]}`) + } + } +} + +const version = require('../package.json').version +const usage = () => +`Pacote - The JavaScript Package Handler, v${version} + +Usage: + + pacote resolve + Resolve a specifier and output the fully resolved target + Returns integrity and from if '--long' flag is set. + + pacote manifest + Fetch a manifest and print to stdout + + pacote packument + Fetch a full packument and print to stdout + + pacote tarball [] + Fetch a package tarball and save to + If is missing or '-', the tarball will be streamed to stdout. + + pacote extract + Extract a package to the destination folder. + +Configuration values all match the names of configs passed to npm, or +options passed to Pacote. Additional flags for this executable: + + --long Print an object from 'resolve', including integrity and spec. + --json Print result objects as JSON rather than node's default. + (This is the default if stdout is not a TTY.) + --help -h Print this helpful text. + +For example '--cache=/path/to/folder' will use that folder as the cache. +` + +const shouldJSON = (conf, result) => + conf.json || + !process.stdout.isTTY && + conf.json === undefined && + result && + typeof result === 'object' + +const pretty = (conf, result) => + shouldJSON(conf, result) ? JSON.stringify(result, 0, 2) : result + +let addedLogListener = false +const main = args => { + const conf = parse(args) + if (conf.help || conf.h) { + return console.log(usage()) + } + + if (!addedLogListener) { + process.on('log', console.error) + addedLogListener = true + } + + try { + return run(conf) + .then(result => result && console.log(pretty(conf, result))) + .catch(er => { + console.error(er) + process.exit(1) + }) + } catch (er) { + console.error(er.message) + console.error(usage()) + } +} + +const parseArg = arg => { + const split = arg.slice(2).split('=') + const k = split.shift() + const v = split.join('=') + const no = /^no-/.test(k) && !v + const key = (no ? k.slice(3) : k) + .replace(/^tag$/, 'defaultTag') + .replace(/-([a-z])/g, (_, c) => c.toUpperCase()) + const value = v ? v.replace(/^~/, process.env.HOME) : !no + return { key, value } +} + +const parse = args => { + const conf = { + _: [], + cache: process.env.HOME + '/.npm/_cacache', + } + let dashdash = false + args.forEach(arg => { + if (dashdash) { + conf._.push(arg) + } else if (arg === '--') { + dashdash = true + } else if (arg === '-h') { + conf.help = true + } else if (/^--/.test(arg)) { + const { key, value } = parseArg(arg) + conf[key] = value + } else { + conf._.push(arg) + } + }) + return conf +} + +if (module === require.main) { + main(process.argv.slice(2)) +} else { + module.exports = { + main, + run, + usage, + parseArg, + parse, + } +} diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/dir.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/dir.js new file mode 100644 index 00000000000000..04846eb8a6e221 --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/dir.js @@ -0,0 +1,105 @@ +const { resolve } = require('node:path') +const packlist = require('npm-packlist') +const runScript = require('@npmcli/run-script') +const tar = require('tar') +const { Minipass } = require('minipass') +const Fetcher = require('./fetcher.js') +const FileFetcher = require('./file.js') +const _ = require('./util/protected.js') +const tarCreateOptions = require('./util/tar-create-options.js') + +class DirFetcher extends Fetcher { + constructor (spec, opts) { + super(spec, opts) + // just the fully resolved filename + this.resolved = this.spec.fetchSpec + + this.tree = opts.tree || null + this.Arborist = opts.Arborist || null + } + + // exposes tarCreateOptions as public API + static tarCreateOptions (manifest) { + return tarCreateOptions(manifest) + } + + get types () { + return ['directory'] + } + + #prepareDir () { + return this.manifest().then(mani => { + if (!mani.scripts || !mani.scripts.prepare) { + return + } + if (this.opts.ignoreScripts) { + return + } + + // we *only* run prepare. + // pre/post-pack is run by the npm CLI for publish and pack, + // but this function is *also* run when installing git deps + const stdio = this.opts.foregroundScripts ? 'inherit' : 'pipe' + + return runScript({ + // this || undefined is because runScript will be unhappy with the default null value + scriptShell: this.opts.scriptShell || undefined, + pkg: mani, + event: 'prepare', + path: this.resolved, + stdio, + env: { + npm_package_resolved: this.resolved, + npm_package_integrity: this.integrity, + npm_package_json: resolve(this.resolved, 'package.json'), + }, + }) + }) + } + + [_.tarballFromResolved] () { + if (!this.tree && !this.Arborist) { + throw new Error('DirFetcher requires either a tree or an Arborist constructor to pack') + } + + const stream = new Minipass() + stream.resolved = this.resolved + stream.integrity = this.integrity + + const { prefix, workspaces } = this.opts + + // run the prepare script, get the list of files, and tar it up + // pipe to the stream, and proxy errors the chain. + this.#prepareDir() + .then(async () => { + if (!this.tree) { + const arb = new this.Arborist({ path: this.resolved }) + this.tree = await arb.loadActual() + } + return packlist(this.tree, { path: this.resolved, prefix, workspaces }) + }) + .then(files => tar.c(tarCreateOptions(this.package), files) + .on('error', er => stream.emit('error', er)).pipe(stream)) + .catch(er => stream.emit('error', er)) + return stream + } + + manifest () { + if (this.package) { + return Promise.resolve(this.package) + } + + return this[_.readPackageJson](this.resolved) + .then(mani => this.package = { + ...mani, + _integrity: this.integrity && String(this.integrity), + _resolved: this.resolved, + _from: this.from, + }) + } + + packument () { + return FileFetcher.prototype.packument.apply(this) + } +} +module.exports = DirFetcher diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/fetcher.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/fetcher.js new file mode 100644 index 00000000000000..f2ac97619d3af1 --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/fetcher.js @@ -0,0 +1,497 @@ +// This is the base class that the other fetcher types in lib +// all descend from. +// It handles the unpacking and retry logic that is shared among +// all of the other Fetcher types. + +const { basename, dirname } = require('node:path') +const { rm, mkdir } = require('node:fs/promises') +const PackageJson = require('@npmcli/package-json') +const cacache = require('cacache') +const fsm = require('fs-minipass') +const getContents = require('@npmcli/installed-package-contents') +const npa = require('npm-package-arg') +const retry = require('promise-retry') +const ssri = require('ssri') +const tar = require('tar') +const { Minipass } = require('minipass') +const { log } = require('proc-log') +const _ = require('./util/protected.js') +const cacheDir = require('./util/cache-dir.js') +const isPackageBin = require('./util/is-package-bin.js') +const removeTrailingSlashes = require('./util/trailing-slashes.js') + +// Pacote is only concerned with the package.json contents +const packageJsonPrepare = (p) => PackageJson.prepare(p).then(pkg => pkg.content) +const packageJsonNormalize = (p) => PackageJson.normalize(p).then(pkg => pkg.content) + +class FetcherBase { + constructor (spec, opts) { + if (!opts || typeof opts !== 'object') { + throw new TypeError('options object is required') + } + this.spec = npa(spec, opts.where) + + this.allowGitIgnore = !!opts.allowGitIgnore + + // a bit redundant because presumably the caller already knows this, + // but it makes it easier to not have to keep track of the requested + // spec when we're dispatching thousands of these at once, and normalizing + // is nice. saveSpec is preferred if set, because it turns stuff like + // x/y#committish into github:x/y#committish. use name@rawSpec for + // registry deps so that we turn xyz and xyz@ -> xyz@ + this.from = this.spec.registry + ? `${this.spec.name}@${this.spec.rawSpec}` : this.spec.saveSpec + + this.#assertType() + // clone the opts object so that others aren't upset when we mutate it + // by adding/modifying the integrity value. + this.opts = { ...opts } + + this.cache = opts.cache || cacheDir().cacache + this.tufCache = opts.tufCache || cacheDir().tufcache + this.resolved = opts.resolved || null + + // default to caching/verifying with sha512, that's what we usually have + // need to change this default, or start overriding it, when sha512 + // is no longer strong enough. + this.defaultIntegrityAlgorithm = opts.defaultIntegrityAlgorithm || 'sha512' + + if (typeof opts.integrity === 'string') { + this.opts.integrity = ssri.parse(opts.integrity) + } + + this.package = null + this.type = this.constructor.name + this.fmode = opts.fmode || 0o666 + this.dmode = opts.dmode || 0o777 + // we don't need a default umask, because we don't chmod files coming + // out of package tarballs. they're forced to have a mode that is + // valid, regardless of what's in the tarball entry, and then we let + // the process's umask setting do its job. but if configured, we do + // respect it. + this.umask = opts.umask || 0 + + this.preferOnline = !!opts.preferOnline + this.preferOffline = !!opts.preferOffline + this.offline = !!opts.offline + + this.before = opts.before + this.fullMetadata = this.before ? true : !!opts.fullMetadata + this.fullReadJson = !!opts.fullReadJson + this[_.readPackageJson] = this.fullReadJson + ? packageJsonPrepare + : packageJsonNormalize + + // rrh is a registry hostname or 'never' or 'always' + // defaults to registry.npmjs.org + this.replaceRegistryHost = (!opts.replaceRegistryHost || opts.replaceRegistryHost === 'npmjs') ? + 'registry.npmjs.org' : opts.replaceRegistryHost + + this.defaultTag = opts.defaultTag || 'latest' + this.registry = removeTrailingSlashes(opts.registry || 'https://registry.npmjs.org') + + // command to run 'prepare' scripts on directories and git dirs + // To use pacote with yarn, for example, set npmBin to 'yarn' + // and npmCliConfig with yarn's equivalents. + this.npmBin = opts.npmBin || 'npm' + + // command to install deps for preparing + this.npmInstallCmd = opts.npmInstallCmd || ['install', '--force'] + + // XXX fill more of this in based on what we know from this.opts + // we explicitly DO NOT fill in --tag, though, since we are often + // going to be packing in the context of a publish, which may set + // a dist-tag, but certainly wants to keep defaulting to latest. + this.npmCliConfig = opts.npmCliConfig || [ + `--cache=${dirname(this.cache)}`, + `--prefer-offline=${!!this.preferOffline}`, + `--prefer-online=${!!this.preferOnline}`, + `--offline=${!!this.offline}`, + ...(this.before ? [`--before=${this.before.toISOString()}`] : []), + '--no-progress', + '--no-save', + '--no-audit', + // override any omit settings from the environment + '--include=dev', + '--include=peer', + '--include=optional', + // we need the actual things, not just the lockfile + '--no-package-lock-only', + '--no-dry-run', + ] + } + + get integrity () { + return this.opts.integrity || null + } + + set integrity (i) { + if (!i) { + return + } + + i = ssri.parse(i) + const current = this.opts.integrity + + // do not ever update an existing hash value, but do + // merge in NEW algos and hashes that we don't already have. + if (current) { + current.merge(i) + } else { + this.opts.integrity = i + } + } + + get notImplementedError () { + return new Error('not implemented in this fetcher type: ' + this.type) + } + + // override in child classes + // Returns a Promise that resolves to this.resolved string value + resolve () { + return this.resolved ? Promise.resolve(this.resolved) + : Promise.reject(this.notImplementedError) + } + + packument () { + return Promise.reject(this.notImplementedError) + } + + // override in child class + // returns a manifest containing: + // - name + // - version + // - _resolved + // - _integrity + // - plus whatever else was in there (corgi, full metadata, or pj file) + manifest () { + return Promise.reject(this.notImplementedError) + } + + // private, should be overridden. + // Note that they should *not* calculate or check integrity or cache, + // but *just* return the raw tarball data stream. + [_.tarballFromResolved] () { + throw this.notImplementedError + } + + // public, should not be overridden + tarball () { + return this.tarballStream(stream => stream.concat().then(data => { + data.integrity = this.integrity && String(this.integrity) + data.resolved = this.resolved + data.from = this.from + return data + })) + } + + // private + // Note: cacache will raise a EINTEGRITY error if the integrity doesn't match + #tarballFromCache () { + const startTime = Date.now() + const stream = cacache.get.stream.byDigest(this.cache, this.integrity, this.opts) + const elapsedTime = Date.now() - startTime + // cache is good, so log it as a hit in particular since there was no fetch logged + log.http( + 'cache', + `${this.spec} ${elapsedTime}ms (cache hit)` + ) + return stream + } + + get [_.cacheFetches] () { + return true + } + + #istream (stream) { + // if not caching this, just return it + if (!this.opts.cache || !this[_.cacheFetches]) { + // instead of creating a new integrity stream, we only piggyback on the + // provided stream's events + if (stream.hasIntegrityEmitter) { + stream.on('integrity', i => this.integrity = i) + return stream + } + + const istream = ssri.integrityStream(this.opts) + istream.on('integrity', i => this.integrity = i) + stream.on('error', err => istream.emit('error', err)) + return stream.pipe(istream) + } + + // we have to return a stream that gets ALL the data, and proxies errors, + // but then pipe from the original tarball stream into the cache as well. + // To do this without losing any data, and since the cacache put stream + // is not a passthrough, we have to pipe from the original stream into + // the cache AFTER we pipe into the middleStream. Since the cache stream + // has an asynchronous flush to write its contents to disk, we need to + // defer the middleStream end until the cache stream ends. + const middleStream = new Minipass() + stream.on('error', err => middleStream.emit('error', err)) + stream.pipe(middleStream, { end: false }) + const cstream = cacache.put.stream( + this.opts.cache, + `pacote:tarball:${this.from}`, + this.opts + ) + cstream.on('integrity', i => this.integrity = i) + cstream.on('error', err => stream.emit('error', err)) + stream.pipe(cstream) + + // eslint-disable-next-line promise/catch-or-return + cstream.promise().catch(() => {}).then(() => middleStream.end()) + return middleStream + } + + pickIntegrityAlgorithm () { + return this.integrity ? this.integrity.pickAlgorithm(this.opts) + : this.defaultIntegrityAlgorithm + } + + // TODO: check error class, once those are rolled out to our deps + isDataCorruptionError (er) { + return er.code === 'EINTEGRITY' || er.code === 'Z_DATA_ERROR' + } + + // override the types getter + get types () { + return false + } + + #assertType () { + if (this.types && !this.types.includes(this.spec.type)) { + throw new TypeError(`Wrong spec type (${ + this.spec.type + }) for ${ + this.constructor.name + }. Supported types: ${this.types.join(', ')}`) + } + } + + // We allow ENOENTs from cacache, but not anywhere else. + // An ENOENT trying to read a tgz file, for example, is Right Out. + isRetriableError (er) { + // TODO: check error class, once those are rolled out to our deps + return this.isDataCorruptionError(er) || + er.code === 'ENOENT' || + er.code === 'EISDIR' + } + + // Mostly internal, but has some uses + // Pass in a function which returns a promise + // Function will be called 1 or more times with streams that may fail. + // Retries: + // Function MUST handle errors on the stream by rejecting the promise, + // so that retry logic can pick it up and either retry or fail whatever + // promise it was making (ie, failing extraction, etc.) + // + // The return value of this method is a Promise that resolves the same + // as whatever the streamHandler resolves to. + // + // This should never be overridden by child classes, but it is public. + tarballStream (streamHandler) { + // Only short-circuit via cache if we have everything else we'll need, + // and the user has not expressed a preference for checking online. + + const fromCache = ( + !this.preferOnline && + this.integrity && + this.resolved + ) ? streamHandler(this.#tarballFromCache()).catch(er => { + if (this.isDataCorruptionError(er)) { + log.warn('tarball', `cached data for ${ + this.spec + } (${this.integrity}) seems to be corrupted. Refreshing cache.`) + return this.cleanupCached().then(() => { + throw er + }) + } else { + throw er + } + }) : null + + const fromResolved = er => { + if (er) { + if (!this.isRetriableError(er)) { + throw er + } + log.silly('tarball', `no local data for ${ + this.spec + }. Extracting by manifest.`) + } + return this.resolve().then(() => retry(tryAgain => + streamHandler(this.#istream(this[_.tarballFromResolved]())) + .catch(streamErr => { + // Most likely data integrity. A cache ENOENT error is unlikely + // here, since we're definitely not reading from the cache, but it + // IS possible that the fetch subsystem accessed the cache, and the + // entry got blown away or something. Try one more time to be sure. + if (this.isRetriableError(streamErr)) { + log.warn('tarball', `tarball data for ${ + this.spec + } (${this.integrity}) seems to be corrupted. Trying again.`) + return this.cleanupCached().then(() => tryAgain(streamErr)) + } + throw streamErr + }), { retries: 1, minTimeout: 0, maxTimeout: 0 })) + } + + return fromCache ? fromCache.catch(fromResolved) : fromResolved() + } + + cleanupCached () { + return cacache.rm.content(this.cache, this.integrity, this.opts) + } + + #empty (path) { + return getContents({ path, depth: 1 }).then(contents => Promise.all( + contents.map(entry => rm(entry, { recursive: true, force: true })))) + } + + async #mkdir (dest) { + await this.#empty(dest) + return await mkdir(dest, { recursive: true }) + } + + // extraction is always the same. the only difference is where + // the tarball comes from. + async extract (dest) { + await this.#mkdir(dest) + return this.tarballStream((tarball) => this.#extract(dest, tarball)) + } + + #toFile (dest) { + return this.tarballStream(str => new Promise((res, rej) => { + const writer = new fsm.WriteStream(dest) + str.on('error', er => writer.emit('error', er)) + writer.on('error', er => rej(er)) + writer.on('close', () => res({ + integrity: this.integrity && String(this.integrity), + resolved: this.resolved, + from: this.from, + })) + str.pipe(writer) + })) + } + + // don't use this.#mkdir because we don't want to rimraf anything + async tarballFile (dest) { + const dir = dirname(dest) + await mkdir(dir, { recursive: true }) + return this.#toFile(dest) + } + + #extract (dest, tarball) { + const extractor = tar.x(this.#tarxOptions({ cwd: dest })) + const p = new Promise((resolve, reject) => { + extractor.on('end', () => { + resolve({ + resolved: this.resolved, + integrity: this.integrity && String(this.integrity), + from: this.from, + }) + }) + + extractor.on('error', er => { + log.warn('tar', er.message) + log.silly('tar', er) + reject(er) + }) + + tarball.on('error', er => reject(er)) + }) + + tarball.pipe(extractor) + return p + } + + // always ensure that entries are at least as permissive as our configured + // dmode/fmode, but never more permissive than the umask allows. + #entryMode (path, mode, type) { + const m = /Directory|GNUDumpDir/.test(type) ? this.dmode + : /File$/.test(type) ? this.fmode + : /* istanbul ignore next - should never happen in a pkg */ 0 + + // make sure package bins are executable + const exe = isPackageBin(this.package, path) ? 0o111 : 0 + // always ensure that files are read/writable by the owner + return ((mode | m) & ~this.umask) | exe | 0o600 + } + + #tarxOptions ({ cwd }) { + const sawIgnores = new Set() + return { + cwd, + noChmod: true, + noMtime: true, + filter: (name, entry) => { + if (/Link$/.test(entry.type)) { + return false + } + entry.mode = this.#entryMode(entry.path, entry.mode, entry.type) + // this replicates the npm pack behavior where .gitignore files + // are treated like .npmignore files, but only if a .npmignore + // file is not present. + if (/File$/.test(entry.type)) { + const base = basename(entry.path) + if (base === '.npmignore') { + sawIgnores.add(entry.path) + } else if (base === '.gitignore' && !this.allowGitIgnore) { + // rename, but only if there's not already a .npmignore + const ni = entry.path.replace(/\.gitignore$/, '.npmignore') + if (sawIgnores.has(ni)) { + return false + } + entry.path = ni + } + return true + } + }, + strip: 1, + onwarn: /* istanbul ignore next - we can trust that tar logs */ + (code, msg, data) => { + log.warn('tar', code, msg) + log.silly('tar', code, msg, data) + }, + umask: this.umask, + // always ignore ownership info from tarball metadata + preserveOwner: false, + } + } +} + +module.exports = FetcherBase + +// Child classes +const GitFetcher = require('./git.js') +const RegistryFetcher = require('./registry.js') +const FileFetcher = require('./file.js') +const DirFetcher = require('./dir.js') +const RemoteFetcher = require('./remote.js') + +// Get an appropriate fetcher object from a spec and options +FetcherBase.get = (rawSpec, opts = {}) => { + const spec = npa(rawSpec, opts.where) + switch (spec.type) { + case 'git': + return new GitFetcher(spec, opts) + + case 'remote': + return new RemoteFetcher(spec, opts) + + case 'version': + case 'range': + case 'tag': + case 'alias': + return new RegistryFetcher(spec.subSpec || spec, opts) + + case 'file': + return new FileFetcher(spec, opts) + + case 'directory': + return new DirFetcher(spec, opts) + + default: + throw new TypeError('Unknown spec type: ' + spec.type) + } +} diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/file.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/file.js new file mode 100644 index 00000000000000..2021325085e4f0 --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/file.js @@ -0,0 +1,94 @@ +const { resolve } = require('node:path') +const { stat, chmod } = require('node:fs/promises') +const cacache = require('cacache') +const fsm = require('fs-minipass') +const Fetcher = require('./fetcher.js') +const _ = require('./util/protected.js') + +class FileFetcher extends Fetcher { + constructor (spec, opts) { + super(spec, opts) + // just the fully resolved filename + this.resolved = this.spec.fetchSpec + } + + get types () { + return ['file'] + } + + manifest () { + if (this.package) { + return Promise.resolve(this.package) + } + + // have to unpack the tarball for this. + return cacache.tmp.withTmp(this.cache, this.opts, dir => + this.extract(dir) + .then(() => this[_.readPackageJson](dir)) + .then(mani => this.package = { + ...mani, + _integrity: this.integrity && String(this.integrity), + _resolved: this.resolved, + _from: this.from, + })) + } + + #exeBins (pkg, dest) { + if (!pkg.bin) { + return Promise.resolve() + } + + return Promise.all(Object.keys(pkg.bin).map(async k => { + const script = resolve(dest, pkg.bin[k]) + // Best effort. Ignore errors here, the only result is that + // a bin script is not executable. But if it's missing or + // something, we just leave it for a later stage to trip over + // when we can provide a more useful contextual error. + try { + const st = await stat(script) + const mode = st.mode | 0o111 + if (mode === st.mode) { + return + } + await chmod(script, mode) + } catch { + // Ignore errors here + } + })) + } + + extract (dest) { + // if we've already loaded the manifest, then the super got it. + // but if not, read the unpacked manifest and chmod properly. + return super.extract(dest) + .then(result => this.package ? result + : this[_.readPackageJson](dest).then(pkg => + this.#exeBins(pkg, dest)).then(() => result)) + } + + [_.tarballFromResolved] () { + // create a read stream and return it + return new fsm.ReadStream(this.resolved) + } + + packument () { + // simulate based on manifest + return this.manifest().then(mani => ({ + name: mani.name, + 'dist-tags': { + [this.defaultTag]: mani.version, + }, + versions: { + [mani.version]: { + ...mani, + dist: { + tarball: `file:${this.resolved}`, + integrity: this.integrity && String(this.integrity), + }, + }, + }, + })) + } +} + +module.exports = FileFetcher diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/git.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/git.js new file mode 100644 index 00000000000000..077193a86f026f --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/git.js @@ -0,0 +1,317 @@ +const cacache = require('cacache') +const git = require('@npmcli/git') +const npa = require('npm-package-arg') +const pickManifest = require('npm-pick-manifest') +const { Minipass } = require('minipass') +const { log } = require('proc-log') +const DirFetcher = require('./dir.js') +const Fetcher = require('./fetcher.js') +const FileFetcher = require('./file.js') +const RemoteFetcher = require('./remote.js') +const _ = require('./util/protected.js') +const addGitSha = require('./util/add-git-sha.js') +const npm = require('./util/npm.js') + +const hashre = /^[a-f0-9]{40}$/ + +// get the repository url. +// prefer https if there's auth, since ssh will drop that. +// otherwise, prefer ssh if available (more secure). +// We have to add the git+ back because npa suppresses it. +const repoUrl = (h, opts) => + h.sshurl && !(h.https && h.auth) && addGitPlus(h.sshurl(opts)) || + h.https && addGitPlus(h.https(opts)) + +// add git+ to the url, but only one time. +const addGitPlus = url => url && `git+${url}`.replace(/^(git\+)+/, 'git+') + +class GitFetcher extends Fetcher { + constructor (spec, opts) { + super(spec, opts) + + // we never want to compare integrity for git dependencies: npm/rfcs#525 + if (this.opts.integrity) { + delete this.opts.integrity + log.warn(`skipping integrity check for git dependency ${this.spec.fetchSpec}`) + } + + this.resolvedRef = null + if (this.spec.hosted) { + this.from = this.spec.hosted.shortcut({ noCommittish: false }) + } + + // shortcut: avoid full clone when we can go straight to the tgz + // if we have the full sha and it's a hosted git platform + if (this.spec.gitCommittish && hashre.test(this.spec.gitCommittish)) { + this.resolvedSha = this.spec.gitCommittish + // use hosted.tarball() when we shell to RemoteFetcher later + this.resolved = this.spec.hosted + ? repoUrl(this.spec.hosted, { noCommittish: false }) + : this.spec.rawSpec + } else { + this.resolvedSha = '' + } + + this.Arborist = opts.Arborist || null + } + + // just exposed to make it easier to test all the combinations + static repoUrl (hosted, opts) { + return repoUrl(hosted, opts) + } + + get types () { + return ['git'] + } + + resolve () { + // likely a hosted git repo with a sha, so get the tarball url + // but in general, no reason to resolve() more than necessary! + if (this.resolved) { + return super.resolve() + } + + // fetch the git repo and then look at the current hash + const h = this.spec.hosted + // try to use ssh, fall back to git. + return h + ? this.#resolvedFromHosted(h) + : this.#resolvedFromRepo(this.spec.fetchSpec) + } + + // first try https, since that's faster and passphrase-less for + // public repos, and supports private repos when auth is provided. + // Fall back to SSH to support private repos + // NB: we always store the https url in resolved field if auth + // is present, otherwise ssh if the hosted type provides it + #resolvedFromHosted (hosted) { + return this.#resolvedFromRepo(hosted.https && hosted.https()).catch(er => { + // Throw early since we know pathspec errors will fail again if retried + if (er instanceof git.errors.GitPathspecError) { + throw er + } + const ssh = hosted.sshurl && hosted.sshurl() + // no fallthrough if we can't fall through or have https auth + if (!ssh || hosted.auth) { + throw er + } + return this.#resolvedFromRepo(ssh) + }) + } + + #resolvedFromRepo (gitRemote) { + // XXX make this a custom error class + if (!gitRemote) { + return Promise.reject(new Error(`No git url for ${this.spec}`)) + } + const gitRange = this.spec.gitRange + const name = this.spec.name + return git.revs(gitRemote, this.opts).then(remoteRefs => { + return gitRange ? pickManifest({ + versions: remoteRefs.versions, + 'dist-tags': remoteRefs['dist-tags'], + name, + }, gitRange, this.opts) + : this.spec.gitCommittish ? + remoteRefs.refs[this.spec.gitCommittish] || + remoteRefs.refs[remoteRefs.shas[this.spec.gitCommittish]] + : remoteRefs.refs.HEAD // no git committish, get default head + }).then(revDoc => { + // the committish provided isn't in the rev list + // things like HEAD~3 or @yesterday can land here. + if (!revDoc || !revDoc.sha) { + return this.#resolvedFromClone() + } + + this.resolvedRef = revDoc + this.resolvedSha = revDoc.sha + this.#addGitSha(revDoc.sha) + return this.resolved + }) + } + + #setResolvedWithSha (withSha) { + // we haven't cloned, so a tgz download is still faster + // of course, if it's not a known host, we can't do that. + this.resolved = !this.spec.hosted ? withSha + : repoUrl(npa(withSha).hosted, { noCommittish: false }) + } + + // when we get the git sha, we affix it to our spec to build up + // either a git url with a hash, or a tarball download URL + #addGitSha (sha) { + this.#setResolvedWithSha(addGitSha(this.spec, sha)) + } + + #resolvedFromClone () { + // do a full or shallow clone, then look at the HEAD + // kind of wasteful, but no other option, really + return this.#clone(() => this.resolved) + } + + #prepareDir (dir) { + return this[_.readPackageJson](dir).then(mani => { + // no need if we aren't going to do any preparation. + const scripts = mani.scripts + if (!mani.workspaces && (!scripts || !( + scripts.postinstall || + scripts.build || + scripts.preinstall || + scripts.install || + scripts.prepack || + scripts.prepare))) { + return + } + + // to avoid cases where we have an cycle of git deps that depend + // on one another, we only ever do preparation for one instance + // of a given git dep along the chain of installations. + // Note that this does mean that a dependency MAY in theory end up + // trying to run its prepare script using a dependency that has not + // been properly prepared itself, but that edge case is smaller + // and less hazardous than a fork bomb of npm and git commands. + const noPrepare = !process.env._PACOTE_NO_PREPARE_ ? [] + : process.env._PACOTE_NO_PREPARE_.split('\n') + if (noPrepare.includes(this.resolved)) { + log.info('prepare', 'skip prepare, already seen', this.resolved) + return + } + noPrepare.push(this.resolved) + + // the DirFetcher will do its own preparation to run the prepare scripts + // All we have to do is put the deps in place so that it can succeed. + return npm( + this.npmBin, + [].concat(this.npmInstallCmd).concat(this.npmCliConfig), + dir, + { ...process.env, _PACOTE_NO_PREPARE_: noPrepare.join('\n') }, + { message: 'git dep preparation failed' } + ) + }) + } + + [_.tarballFromResolved] () { + const stream = new Minipass() + stream.resolved = this.resolved + stream.from = this.from + + // check it out and then shell out to the DirFetcher tarball packer + this.#clone(dir => this.#prepareDir(dir) + .then(() => new Promise((res, rej) => { + if (!this.Arborist) { + throw new Error('GitFetcher requires an Arborist constructor to pack a tarball') + } + const df = new DirFetcher(`file:${dir}`, { + ...this.opts, + Arborist: this.Arborist, + resolved: null, + integrity: null, + }) + const dirStream = df[_.tarballFromResolved]() + dirStream.on('error', rej) + dirStream.on('end', res) + dirStream.pipe(stream) + }))).catch( + /* istanbul ignore next: very unlikely and hard to test */ + er => stream.emit('error', er) + ) + return stream + } + + // clone a git repo into a temp folder (or fetch and unpack if possible) + // handler accepts a directory, and returns a promise that resolves + // when we're done with it, at which point, cacache deletes it + // + // TODO: after cloning, create a tarball of the folder, and add to the cache + // with cacache.put.stream(), using a key that's deterministic based on the + // spec and repo, so that we don't ever clone the same thing multiple times. + #clone (handler, tarballOk = true) { + const o = { tmpPrefix: 'git-clone' } + const ref = this.resolvedSha || this.spec.gitCommittish + const h = this.spec.hosted + const resolved = this.resolved + + // can be set manually to false to fall back to actual git clone + tarballOk = tarballOk && + h && resolved === repoUrl(h, { noCommittish: false }) && h.tarball + + return cacache.tmp.withTmp(this.cache, o, async tmp => { + // if we're resolved, and have a tarball url, shell out to RemoteFetcher + if (tarballOk) { + const nameat = this.spec.name ? `${this.spec.name}@` : '' + return new RemoteFetcher(h.tarball({ noCommittish: false }), { + ...this.opts, + allowGitIgnore: true, + pkgid: `git:${nameat}${this.resolved}`, + resolved: this.resolved, + integrity: null, // it'll always be different, if we have one + }).extract(tmp).then(() => handler(tmp), er => { + // fall back to ssh download if tarball fails + if (er.constructor.name.match(/^Http/)) { + return this.#clone(handler, false) + } else { + throw er + } + }) + } + + const sha = await ( + h ? this.#cloneHosted(ref, tmp) + : this.#cloneRepo(this.spec.fetchSpec, ref, tmp) + ) + this.resolvedSha = sha + if (!this.resolved) { + await this.#addGitSha(sha) + } + return handler(tmp) + }) + } + + // first try https, since that's faster and passphrase-less for + // public repos, and supports private repos when auth is provided. + // Fall back to SSH to support private repos + // NB: we always store the https url in resolved field if auth + // is present, otherwise ssh if the hosted type provides it + #cloneHosted (ref, tmp) { + const hosted = this.spec.hosted + return this.#cloneRepo(hosted.https({ noCommittish: true }), ref, tmp) + .catch(er => { + // Throw early since we know pathspec errors will fail again if retried + if (er instanceof git.errors.GitPathspecError) { + throw er + } + const ssh = hosted.sshurl && hosted.sshurl({ noCommittish: true }) + // no fallthrough if we can't fall through or have https auth + if (!ssh || hosted.auth) { + throw er + } + return this.#cloneRepo(ssh, ref, tmp) + }) + } + + #cloneRepo (repo, ref, tmp) { + const { opts, spec } = this + return git.clone(repo, ref, tmp, { ...opts, spec }) + } + + manifest () { + if (this.package) { + return Promise.resolve(this.package) + } + + return this.spec.hosted && this.resolved + ? FileFetcher.prototype.manifest.apply(this) + : this.#clone(dir => + this[_.readPackageJson](dir) + .then(mani => this.package = { + ...mani, + _resolved: this.resolved, + _from: this.from, + })) + } + + packument () { + return FileFetcher.prototype.packument.apply(this) + } +} +module.exports = GitFetcher diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/index.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/index.js new file mode 100644 index 00000000000000..f35314d275d5fd --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/index.js @@ -0,0 +1,23 @@ +const { get } = require('./fetcher.js') +const GitFetcher = require('./git.js') +const RegistryFetcher = require('./registry.js') +const FileFetcher = require('./file.js') +const DirFetcher = require('./dir.js') +const RemoteFetcher = require('./remote.js') + +const tarball = (spec, opts) => get(spec, opts).tarball() +tarball.stream = (spec, handler, opts) => get(spec, opts).tarballStream(handler) +tarball.file = (spec, dest, opts) => get(spec, opts).tarballFile(dest) + +module.exports = { + GitFetcher, + RegistryFetcher, + FileFetcher, + DirFetcher, + RemoteFetcher, + resolve: (spec, opts) => get(spec, opts).resolve(), + extract: (spec, dest, opts) => get(spec, opts).extract(dest), + manifest: (spec, opts) => get(spec, opts).manifest(), + packument: (spec, opts) => get(spec, opts).packument(), + tarball, +} diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/registry.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/registry.js new file mode 100644 index 00000000000000..1ecf4ee1773499 --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/registry.js @@ -0,0 +1,369 @@ +const crypto = require('node:crypto') +const PackageJson = require('@npmcli/package-json') +const pickManifest = require('npm-pick-manifest') +const ssri = require('ssri') +const npa = require('npm-package-arg') +const sigstore = require('sigstore') +const fetch = require('npm-registry-fetch') +const Fetcher = require('./fetcher.js') +const RemoteFetcher = require('./remote.js') +const pacoteVersion = require('../package.json').version +const removeTrailingSlashes = require('./util/trailing-slashes.js') +const _ = require('./util/protected.js') + +// Corgis are cute. 🐕🐶 +const corgiDoc = 'application/vnd.npm.install-v1+json; q=1.0, application/json; q=0.8, */*' +const fullDoc = 'application/json' + +// Some really old packages have no time field in their packument so we need a +// cutoff date. +const MISSING_TIME_CUTOFF = '2015-01-01T00:00:00.000Z' + +class RegistryFetcher extends Fetcher { + #cacheKey + constructor (spec, opts) { + super(spec, opts) + + // you usually don't want to fetch the same packument multiple times in + // the span of a given script or command, no matter how many pacote calls + // are made, so this lets us avoid doing that. It's only relevant for + // registry fetchers, because other types simulate their packument from + // the manifest, which they memoize on this.package, so it's very cheap + // already. + this.packumentCache = this.opts.packumentCache || null + + this.registry = fetch.pickRegistry(spec, opts) + this.packumentUrl = `${removeTrailingSlashes(this.registry)}/${this.spec.escapedName}` + this.#cacheKey = `${this.fullMetadata ? 'full' : 'corgi'}:${this.packumentUrl}` + + const parsed = new URL(this.registry) + const regKey = `//${parsed.host}${parsed.pathname}` + // unlike the nerf-darted auth keys, this one does *not* allow a mismatch + // of trailing slashes. It must match exactly. + if (this.opts[`${regKey}:_keys`]) { + this.registryKeys = this.opts[`${regKey}:_keys`] + } + + // XXX pacote <=9 has some logic to ignore opts.resolved if + // the resolved URL doesn't go to the same registry. + // Consider reproducing that here, to throw away this.resolved + // in that case. + } + + async resolve () { + // fetching the manifest sets resolved and (if present) integrity + await this.manifest() + if (!this.resolved) { + throw Object.assign( + new Error('Invalid package manifest: no `dist.tarball` field'), + { package: this.spec.toString() } + ) + } + return this.resolved + } + + #headers () { + return { + // npm will override UA, but ensure that we always send *something* + 'user-agent': this.opts.userAgent || + `pacote/${pacoteVersion} node/${process.version}`, + ...(this.opts.headers || {}), + 'pacote-version': pacoteVersion, + 'pacote-req-type': 'packument', + 'pacote-pkg-id': `registry:${this.spec.name}`, + accept: this.fullMetadata ? fullDoc : corgiDoc, + } + } + + async packument () { + // note this might be either an in-flight promise for a request, + // or the actual packument, but we never want to make more than + // one request at a time for the same thing regardless. + if (this.packumentCache?.has(this.#cacheKey)) { + return this.packumentCache.get(this.#cacheKey) + } + + // npm-registry-fetch the packument + // set the appropriate header for corgis if fullMetadata isn't set + // return the res.json() promise + try { + const res = await fetch(this.packumentUrl, { + ...this.opts, + headers: this.#headers(), + spec: this.spec, + + // never check integrity for packuments themselves + integrity: null, + }) + const packument = await res.json() + const contentLength = res.headers.get('content-length') + if (contentLength) { + packument._contentLength = Number(contentLength) + } + this.packumentCache?.set(this.#cacheKey, packument) + return packument + } catch (err) { + this.packumentCache?.delete(this.#cacheKey) + if (err.code !== 'E404' || this.fullMetadata) { + throw err + } + // possible that corgis are not supported by this registry + this.fullMetadata = true + return this.packument() + } + } + + async manifest () { + if (this.package) { + return this.package + } + + // When verifying signatures, we need to fetch the full/uncompressed + // packument to get publish time as this is not included in the + // corgi/compressed packument. + if (this.opts.verifySignatures) { + this.fullMetadata = true + } + + const packument = await this.packument() + const steps = PackageJson.normalizeSteps.filter(s => s !== '_attributes') + const mani = await new PackageJson().fromContent(pickManifest(packument, this.spec.fetchSpec, { + ...this.opts, + defaultTag: this.defaultTag, + before: this.before, + })).normalize({ steps }).then(p => p.content) + + /* XXX add ETARGET and E403 revalidation of cached packuments here */ + + // add _time from packument if fetched with fullMetadata + const time = packument.time?.[mani.version] + if (time) { + mani._time = time + } + + // add _resolved and _integrity from dist object + const { dist } = mani + if (dist) { + this.resolved = mani._resolved = dist.tarball + mani._from = this.from + const distIntegrity = dist.integrity ? ssri.parse(dist.integrity) + : dist.shasum ? ssri.fromHex(dist.shasum, 'sha1', { ...this.opts }) + : null + if (distIntegrity) { + if (this.integrity && !this.integrity.match(distIntegrity)) { + // only bork if they have algos in common. + // otherwise we end up breaking if we have saved a sha512 + // previously for the tarball, but the manifest only + // provides a sha1, which is possible for older publishes. + // Otherwise, this is almost certainly a case of holding it + // wrong, and will result in weird or insecure behavior + // later on when building package tree. + for (const algo of Object.keys(this.integrity)) { + if (distIntegrity[algo]) { + throw Object.assign(new Error( + `Integrity checksum failed when using ${algo}: ` + + `wanted ${this.integrity} but got ${distIntegrity}.` + ), { code: 'EINTEGRITY' }) + } + } + } + // made it this far, the integrity is worthwhile. accept it. + // the setter here will take care of merging it into what we already + // had. + this.integrity = distIntegrity + } + } + if (this.integrity) { + mani._integrity = String(this.integrity) + if (dist.signatures) { + if (this.opts.verifySignatures) { + // validate and throw on error, then set _signatures + const message = `${mani._id}:${mani._integrity}` + for (const signature of dist.signatures) { + const publicKey = this.registryKeys && + this.registryKeys.filter(key => (key.keyid === signature.keyid))[0] + if (!publicKey) { + throw Object.assign(new Error( + `${mani._id} has a registry signature with keyid: ${signature.keyid} ` + + 'but no corresponding public key can be found' + ), { code: 'EMISSINGSIGNATUREKEY' }) + } + + const publishedTime = Date.parse(mani._time || MISSING_TIME_CUTOFF) + const validPublicKey = !publicKey.expires || + publishedTime < Date.parse(publicKey.expires) + if (!validPublicKey) { + throw Object.assign(new Error( + `${mani._id} has a registry signature with keyid: ${signature.keyid} ` + + `but the corresponding public key has expired ${publicKey.expires}` + ), { code: 'EEXPIREDSIGNATUREKEY' }) + } + const verifier = crypto.createVerify('SHA256') + verifier.write(message) + verifier.end() + const valid = verifier.verify( + publicKey.pemkey, + signature.sig, + 'base64' + ) + if (!valid) { + throw Object.assign(new Error( + `${mani._id} has an invalid registry signature with ` + + `keyid: ${publicKey.keyid} and signature: ${signature.sig}` + ), { + code: 'EINTEGRITYSIGNATURE', + keyid: publicKey.keyid, + signature: signature.sig, + resolved: mani._resolved, + integrity: mani._integrity, + }) + } + } + mani._signatures = dist.signatures + } else { + mani._signatures = dist.signatures + } + } + + if (dist.attestations) { + if (this.opts.verifyAttestations) { + // Always fetch attestations from the current registry host + const attestationsPath = new URL(dist.attestations.url).pathname + const attestationsUrl = removeTrailingSlashes(this.registry) + attestationsPath + const res = await fetch(attestationsUrl, { + ...this.opts, + // disable integrity check for attestations json payload, we check the + // integrity in the verification steps below + integrity: null, + }) + const { attestations } = await res.json() + const bundles = attestations.map(({ predicateType, bundle }) => { + const statement = JSON.parse( + Buffer.from(bundle.dsseEnvelope.payload, 'base64').toString('utf8') + ) + const keyid = bundle.dsseEnvelope.signatures[0].keyid + const signature = bundle.dsseEnvelope.signatures[0].sig + + return { + predicateType, + bundle, + statement, + keyid, + signature, + } + }) + + const attestationKeyIds = bundles.map((b) => b.keyid).filter((k) => !!k) + const attestationRegistryKeys = (this.registryKeys || []) + .filter(key => attestationKeyIds.includes(key.keyid)) + if (!attestationRegistryKeys.length) { + throw Object.assign(new Error( + `${mani._id} has attestations but no corresponding public key(s) can be found` + ), { code: 'EMISSINGSIGNATUREKEY' }) + } + + for (const { predicateType, bundle, keyid, signature, statement } of bundles) { + const publicKey = attestationRegistryKeys.find(key => key.keyid === keyid) + // Publish attestations have a keyid set and a valid public key must be found + if (keyid) { + if (!publicKey) { + throw Object.assign(new Error( + `${mani._id} has attestations with keyid: ${keyid} ` + + 'but no corresponding public key can be found' + ), { code: 'EMISSINGSIGNATUREKEY' }) + } + + const integratedTime = new Date( + Number( + bundle.verificationMaterial.tlogEntries[0].integratedTime + ) * 1000 + ) + const validPublicKey = !publicKey.expires || + (integratedTime < Date.parse(publicKey.expires)) + if (!validPublicKey) { + throw Object.assign(new Error( + `${mani._id} has attestations with keyid: ${keyid} ` + + `but the corresponding public key has expired ${publicKey.expires}` + ), { code: 'EEXPIREDSIGNATUREKEY' }) + } + } + + const subject = { + name: statement.subject[0].name, + sha512: statement.subject[0].digest.sha512, + } + + // Only type 'version' can be turned into a PURL + const purl = this.spec.type === 'version' ? npa.toPurl(this.spec) : this.spec + // Verify the statement subject matches the package, version + if (subject.name !== purl) { + throw Object.assign(new Error( + `${mani._id} package name and version (PURL): ${purl} ` + + `doesn't match what was signed: ${subject.name}` + ), { code: 'EATTESTATIONSUBJECT' }) + } + + // Verify the statement subject matches the tarball integrity + const integrityHexDigest = ssri.parse(this.integrity).hexDigest() + if (subject.sha512 !== integrityHexDigest) { + throw Object.assign(new Error( + `${mani._id} package integrity (hex digest): ` + + `${integrityHexDigest} ` + + `doesn't match what was signed: ${subject.sha512}` + ), { code: 'EATTESTATIONSUBJECT' }) + } + + try { + // Provenance attestations are signed with a signing certificate + // (including the key) so we don't need to return a public key. + // + // Publish attestations are signed with a keyid so we need to + // specify a public key from the keys endpoint: `registry-host.tld/-/npm/v1/keys` + const options = { + tufCachePath: this.tufCache, + tufForceCache: true, + keySelector: publicKey ? () => publicKey.pemkey : undefined, + } + await sigstore.verify(bundle, options) + } catch (e) { + throw Object.assign(new Error( + `${mani._id} failed to verify attestation: ${e.message}` + ), { + code: 'EATTESTATIONVERIFY', + predicateType, + keyid, + signature, + resolved: mani._resolved, + integrity: mani._integrity, + }) + } + } + mani._attestations = dist.attestations + } else { + mani._attestations = dist.attestations + } + } + } + + this.package = mani + return this.package + } + + [_.tarballFromResolved] () { + // we use a RemoteFetcher to get the actual tarball stream + return new RemoteFetcher(this.resolved, { + ...this.opts, + resolved: this.resolved, + pkgid: `registry:${this.spec.name}@${this.resolved}`, + })[_.tarballFromResolved]() + } + + get types () { + return [ + 'tag', + 'version', + 'range', + ] + } +} +module.exports = RegistryFetcher diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/remote.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/remote.js new file mode 100644 index 00000000000000..bd321e65a1f18a --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/remote.js @@ -0,0 +1,89 @@ +const fetch = require('npm-registry-fetch') +const { Minipass } = require('minipass') +const Fetcher = require('./fetcher.js') +const FileFetcher = require('./file.js') +const _ = require('./util/protected.js') +const pacoteVersion = require('../package.json').version + +class RemoteFetcher extends Fetcher { + constructor (spec, opts) { + super(spec, opts) + this.resolved = this.spec.fetchSpec + const resolvedURL = new URL(this.resolved) + if (this.replaceRegistryHost !== 'never' + && (this.replaceRegistryHost === 'always' + || this.replaceRegistryHost === resolvedURL.host)) { + this.resolved = new URL(resolvedURL.pathname, this.registry).href + } + + // nam is a fermented pork sausage that is good to eat + const nameat = this.spec.name ? `${this.spec.name}@` : '' + this.pkgid = opts.pkgid ? opts.pkgid : `remote:${nameat}${this.resolved}` + } + + // Don't need to cache tarball fetches in pacote, because make-fetch-happen + // will write into cacache anyway. + get [_.cacheFetches] () { + return false + } + + [_.tarballFromResolved] () { + const stream = new Minipass() + stream.hasIntegrityEmitter = true + + const fetchOpts = { + ...this.opts, + headers: this.#headers(), + spec: this.spec, + integrity: this.integrity, + algorithms: [this.pickIntegrityAlgorithm()], + } + + // eslint-disable-next-line promise/always-return + fetch(this.resolved, fetchOpts).then(res => { + res.body.on('error', + /* istanbul ignore next - exceedingly rare and hard to simulate */ + er => stream.emit('error', er) + ) + + res.body.on('integrity', i => { + this.integrity = i + stream.emit('integrity', i) + }) + + res.body.pipe(stream) + }).catch(er => stream.emit('error', er)) + + return stream + } + + #headers () { + return { + // npm will override this, but ensure that we always send *something* + 'user-agent': this.opts.userAgent || + `pacote/${pacoteVersion} node/${process.version}`, + ...(this.opts.headers || {}), + 'pacote-version': pacoteVersion, + 'pacote-req-type': 'tarball', + 'pacote-pkg-id': this.pkgid, + ...(this.integrity ? { 'pacote-integrity': String(this.integrity) } + : {}), + ...(this.opts.headers || {}), + } + } + + get types () { + return ['remote'] + } + + // getting a packument and/or manifest is the same as with a file: spec. + // unpack the tarball stream, and then read from the package.json file. + packument () { + return FileFetcher.prototype.packument.apply(this) + } + + manifest () { + return FileFetcher.prototype.manifest.apply(this) + } +} +module.exports = RemoteFetcher diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/add-git-sha.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/add-git-sha.js new file mode 100644 index 00000000000000..843fe5b600cafa --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/add-git-sha.js @@ -0,0 +1,15 @@ +// add a sha to a git remote url spec +const addGitSha = (spec, sha) => { + if (spec.hosted) { + const h = spec.hosted + const opt = { noCommittish: true } + const base = h.https && h.auth ? h.https(opt) : h.shortcut(opt) + + return `${base}#${sha}` + } else { + // don't use new URL for this, because it doesn't handle scp urls + return spec.rawSpec.replace(/#.*$/, '') + `#${sha}` + } +} + +module.exports = addGitSha diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/cache-dir.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/cache-dir.js new file mode 100644 index 00000000000000..ba5683a7bb5bf3 --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/cache-dir.js @@ -0,0 +1,15 @@ +const { resolve } = require('node:path') +const { tmpdir, homedir } = require('node:os') + +module.exports = (fakePlatform = false) => { + const temp = tmpdir() + const uidOrPid = process.getuid ? process.getuid() : process.pid + const home = homedir() || resolve(temp, 'npm-' + uidOrPid) + const platform = fakePlatform || process.platform + const cacheExtra = platform === 'win32' ? 'npm-cache' : '.npm' + const cacheRoot = (platform === 'win32' && process.env.LOCALAPPDATA) || home + return { + cacache: resolve(cacheRoot, cacheExtra, '_cacache'), + tufcache: resolve(cacheRoot, cacheExtra, '_tuf'), + } +} diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/is-package-bin.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/is-package-bin.js new file mode 100644 index 00000000000000..49a3f73f537ce9 --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/is-package-bin.js @@ -0,0 +1,25 @@ +// Function to determine whether a path is in the package.bin set. +// Used to prevent issues when people publish a package from a +// windows machine, and then install with --no-bin-links. +// +// Note: this is not possible in remote or file fetchers, since +// we don't have the manifest until AFTER we've unpacked. But the +// main use case is registry fetching with git a distant second, +// so that's an acceptable edge case to not handle. + +const binObj = (name, bin) => + typeof bin === 'string' ? { [name]: bin } : bin + +const hasBin = (pkg, path) => { + const bin = binObj(pkg.name, pkg.bin) + const p = path.replace(/^[^\\/]*\//, '') + for (const kv of Object.entries(bin)) { + if (kv[1] === p) { + return true + } + } + return false +} + +module.exports = (pkg, path) => + pkg && pkg.bin ? hasBin(pkg, path) : false diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/npm.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/npm.js new file mode 100644 index 00000000000000..a3005c255565fb --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/npm.js @@ -0,0 +1,14 @@ +// run an npm command +const spawn = require('@npmcli/promise-spawn') + +module.exports = (npmBin, npmCommand, cwd, env, extra) => { + const isJS = npmBin.endsWith('.js') + const cmd = isJS ? process.execPath : npmBin + const args = (isJS ? [npmBin] : []).concat(npmCommand) + // when installing to run the `prepare` script for a git dep, we need + // to ensure that we don't run into a cycle of checking out packages + // in temp directories. this lets us link previously-seen repos that + // are also being prepared. + + return spawn(cmd, args, { cwd, env }, extra) +} diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/protected.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/protected.js new file mode 100644 index 00000000000000..e05203b481e6aa --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/protected.js @@ -0,0 +1,5 @@ +module.exports = { + cacheFetches: Symbol.for('pacote.Fetcher._cacheFetches'), + readPackageJson: Symbol.for('package.Fetcher._readPackageJson'), + tarballFromResolved: Symbol.for('pacote.Fetcher._tarballFromResolved'), +} diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/tar-create-options.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/tar-create-options.js new file mode 100644 index 00000000000000..d070f0f7ba2d4e --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/tar-create-options.js @@ -0,0 +1,31 @@ +const isPackageBin = require('./is-package-bin.js') + +const tarCreateOptions = manifest => ({ + cwd: manifest._resolved, + prefix: 'package/', + portable: true, + gzip: { + // forcing the level to 9 seems to avoid some + // platform specific optimizations that cause + // integrity mismatch errors due to differing + // end results after compression + level: 9, + }, + + // ensure that package bins are always executable + // Note that npm-packlist is already filtering out + // anything that is not a regular file, ignored by + // .npmignore or package.json "files", etc. + filter: (path, stat) => { + if (isPackageBin(manifest, path)) { + stat.mode |= 0o111 + } + return true + }, + + // Provide a specific date in the 1980s for the benefit of zip, + // which is confounded by files dated at the Unix epoch 0. + mtime: new Date('1985-10-26T08:15:00.000Z'), +}) + +module.exports = tarCreateOptions diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/trailing-slashes.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/trailing-slashes.js new file mode 100644 index 00000000000000..c50cb6173b92eb --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/lib/util/trailing-slashes.js @@ -0,0 +1,10 @@ +const removeTrailingSlashes = (input) => { + // in order to avoid regexp redos detection + let output = input + while (output.endsWith('/')) { + output = output.slice(0, -1) + } + return output +} + +module.exports = removeTrailingSlashes diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/package.json b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/package.json new file mode 100644 index 00000000000000..335c7a6c87bd3c --- /dev/null +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/node_modules/pacote/package.json @@ -0,0 +1,79 @@ +{ + "name": "pacote", + "version": "20.0.0", + "description": "JavaScript package downloader", + "author": "GitHub Inc.", + "bin": { + "pacote": "bin/index.js" + }, + "license": "ISC", + "main": "lib/index.js", + "scripts": { + "test": "tap", + "snap": "tap", + "lint": "npm run eslint", + "postlint": "template-oss-check", + "lintfix": "npm run eslint -- --fix", + "posttest": "npm run lint", + "template-oss-apply": "template-oss-apply --force", + "eslint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"" + }, + "tap": { + "timeout": 300, + "nyc-arg": [ + "--exclude", + "tap-snapshots/**" + ] + }, + "devDependencies": { + "@npmcli/arborist": "^7.1.0", + "@npmcli/eslint-config": "^5.0.0", + "@npmcli/template-oss": "4.23.3", + "hosted-git-info": "^8.0.0", + "mutate-fs": "^2.1.1", + "nock": "^13.2.4", + "npm-registry-mock": "^1.3.2", + "tap": "^16.0.1" + }, + "files": [ + "bin/", + "lib/" + ], + "keywords": [ + "packages", + "npm", + "git" + ], + "dependencies": { + "@npmcli/git": "^6.0.0", + "@npmcli/installed-package-contents": "^3.0.0", + "@npmcli/package-json": "^6.0.0", + "@npmcli/promise-spawn": "^8.0.0", + "@npmcli/run-script": "^9.0.0", + "cacache": "^19.0.0", + "fs-minipass": "^3.0.0", + "minipass": "^7.0.2", + "npm-package-arg": "^12.0.0", + "npm-packlist": "^9.0.0", + "npm-pick-manifest": "^10.0.0", + "npm-registry-fetch": "^18.0.0", + "proc-log": "^5.0.0", + "promise-retry": "^2.0.1", + "sigstore": "^3.0.0", + "ssri": "^12.0.0", + "tar": "^6.1.11" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/npm/pacote.git" + }, + "templateOSS": { + "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", + "version": "4.23.3", + "windowsCI": false, + "publish": "true" + } +} diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/package.json b/deps/npm/node_modules/@npmcli/metavuln-calculator/package.json index d4c3cf54d83ea7..df0b8f2f4faf1c 100644 --- a/deps/npm/node_modules/@npmcli/metavuln-calculator/package.json +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/package.json @@ -1,6 +1,6 @@ { "name": "@npmcli/metavuln-calculator", - "version": "8.0.0", + "version": "8.0.1", "main": "lib/index.js", "files": [ "bin/", @@ -41,7 +41,7 @@ "dependencies": { "cacache": "^19.0.0", "json-parse-even-better-errors": "^4.0.0", - "pacote": "^19.0.0", + "pacote": "^20.0.0", "proc-log": "^5.0.0", "semver": "^7.3.5" }, diff --git a/deps/npm/node_modules/@npmcli/package-json/lib/index.js b/deps/npm/node_modules/@npmcli/package-json/lib/index.js index f165ee23b75ab9..23f326dd59359f 100644 --- a/deps/npm/node_modules/@npmcli/package-json/lib/index.js +++ b/deps/npm/node_modules/@npmcli/package-json/lib/index.js @@ -7,6 +7,7 @@ const updateScripts = require('./update-scripts.js') const updateWorkspaces = require('./update-workspaces.js') const normalize = require('./normalize.js') const { read, parse } = require('./read-package.js') +const { packageSort } = require('./sort.js') // a list of handy specialized helper functions that take // care of special cases that are handled by the npm cli @@ -230,19 +231,23 @@ class PackageJson { return this } - async save () { + async save ({ sort } = {}) { if (!this.#canSave) { throw new Error('No package.json to save to') } const { [Symbol.for('indent')]: indent, [Symbol.for('newline')]: newline, + ...rest } = this.content const format = indent === undefined ? ' ' : indent const eol = newline === undefined ? '\n' : newline + + const content = sort ? packageSort(rest) : rest + const fileContent = `${ - JSON.stringify(this.content, null, format) + JSON.stringify(content, null, format) }\n` .replace(/\n/g, eol) diff --git a/deps/npm/node_modules/@npmcli/package-json/lib/sort.js b/deps/npm/node_modules/@npmcli/package-json/lib/sort.js new file mode 100644 index 00000000000000..0bd0d5199da583 --- /dev/null +++ b/deps/npm/node_modules/@npmcli/package-json/lib/sort.js @@ -0,0 +1,101 @@ +/** + * arbitrary sort order for package.json largely pulled from: + * https://github.com/keithamus/sort-package-json/blob/main/defaultRules.md + * + * cross checked with: + * https://github.com/npm/types/blob/main/types/index.d.ts#L104 + * https://docs.npmjs.com/cli/configuring-npm/package-json + */ +function packageSort (json) { + const { + name, + version, + private: isPrivate, + description, + keywords, + homepage, + bugs, + repository, + funding, + license, + author, + maintainers, + contributors, + type, + imports, + exports, + main, + browser, + types, + bin, + man, + directories, + files, + workspaces, + scripts, + config, + dependencies, + devDependencies, + peerDependencies, + peerDependenciesMeta, + optionalDependencies, + bundledDependencies, + bundleDependencies, + engines, + os, + cpu, + publishConfig, + devEngines, + licenses, + overrides, + ...rest + } = json + + return { + ...(typeof name !== 'undefined' ? { name } : {}), + ...(typeof version !== 'undefined' ? { version } : {}), + ...(typeof isPrivate !== 'undefined' ? { private: isPrivate } : {}), + ...(typeof description !== 'undefined' ? { description } : {}), + ...(typeof keywords !== 'undefined' ? { keywords } : {}), + ...(typeof homepage !== 'undefined' ? { homepage } : {}), + ...(typeof bugs !== 'undefined' ? { bugs } : {}), + ...(typeof repository !== 'undefined' ? { repository } : {}), + ...(typeof funding !== 'undefined' ? { funding } : {}), + ...(typeof license !== 'undefined' ? { license } : {}), + ...(typeof author !== 'undefined' ? { author } : {}), + ...(typeof maintainers !== 'undefined' ? { maintainers } : {}), + ...(typeof contributors !== 'undefined' ? { contributors } : {}), + ...(typeof type !== 'undefined' ? { type } : {}), + ...(typeof imports !== 'undefined' ? { imports } : {}), + ...(typeof exports !== 'undefined' ? { exports } : {}), + ...(typeof main !== 'undefined' ? { main } : {}), + ...(typeof browser !== 'undefined' ? { browser } : {}), + ...(typeof types !== 'undefined' ? { types } : {}), + ...(typeof bin !== 'undefined' ? { bin } : {}), + ...(typeof man !== 'undefined' ? { man } : {}), + ...(typeof directories !== 'undefined' ? { directories } : {}), + ...(typeof files !== 'undefined' ? { files } : {}), + ...(typeof workspaces !== 'undefined' ? { workspaces } : {}), + ...(typeof scripts !== 'undefined' ? { scripts } : {}), + ...(typeof config !== 'undefined' ? { config } : {}), + ...(typeof dependencies !== 'undefined' ? { dependencies } : {}), + ...(typeof devDependencies !== 'undefined' ? { devDependencies } : {}), + ...(typeof peerDependencies !== 'undefined' ? { peerDependencies } : {}), + ...(typeof peerDependenciesMeta !== 'undefined' ? { peerDependenciesMeta } : {}), + ...(typeof optionalDependencies !== 'undefined' ? { optionalDependencies } : {}), + ...(typeof bundledDependencies !== 'undefined' ? { bundledDependencies } : {}), + ...(typeof bundleDependencies !== 'undefined' ? { bundleDependencies } : {}), + ...(typeof engines !== 'undefined' ? { engines } : {}), + ...(typeof os !== 'undefined' ? { os } : {}), + ...(typeof cpu !== 'undefined' ? { cpu } : {}), + ...(typeof publishConfig !== 'undefined' ? { publishConfig } : {}), + ...(typeof devEngines !== 'undefined' ? { devEngines } : {}), + ...(typeof licenses !== 'undefined' ? { licenses } : {}), + ...(typeof overrides !== 'undefined' ? { overrides } : {}), + ...rest, + } +} + +module.exports = { + packageSort, +} diff --git a/deps/npm/node_modules/@npmcli/package-json/package.json b/deps/npm/node_modules/@npmcli/package-json/package.json index e766e8ccb4bbf5..97070e27d0d22e 100644 --- a/deps/npm/node_modules/@npmcli/package-json/package.json +++ b/deps/npm/node_modules/@npmcli/package-json/package.json @@ -1,7 +1,17 @@ { "name": "@npmcli/package-json", - "version": "6.0.1", + "version": "6.1.0", "description": "Programmatic API to update package.json", + "keywords": [ + "npm", + "oss" + ], + "repository": { + "type": "git", + "url": "git+https://github.com/npm/package-json.git" + }, + "license": "ISC", + "author": "GitHub Inc.", "main": "lib/index.js", "files": [ "bin/", @@ -18,19 +28,6 @@ "template-oss-apply": "template-oss-apply --force", "eslint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"" }, - "keywords": [ - "npm", - "oss" - ], - "author": "GitHub Inc.", - "license": "ISC", - "devDependencies": { - "@npmcli/eslint-config": "^5.0.0", - "@npmcli/template-oss": "4.23.3", - "read-package-json": "^7.0.0", - "read-package-json-fast": "^4.0.0", - "tap": "^16.0.1" - }, "dependencies": { "@npmcli/git": "^6.0.0", "glob": "^10.2.2", @@ -40,16 +37,19 @@ "proc-log": "^5.0.0", "semver": "^7.5.3" }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/package-json.git" + "devDependencies": { + "@npmcli/eslint-config": "^5.0.0", + "@npmcli/template-oss": "4.23.5", + "read-package-json": "^7.0.0", + "read-package-json-fast": "^4.0.0", + "tap": "^16.0.1" }, "engines": { "node": "^18.17.0 || >=20.5.0" }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.23.3", + "version": "4.23.5", "publish": "true" }, "tap": { diff --git a/deps/npm/node_modules/@npmcli/promise-spawn/lib/index.js b/deps/npm/node_modules/@npmcli/promise-spawn/lib/index.js index e147cb8f9c746f..aa7b55d8f038d4 100644 --- a/deps/npm/node_modules/@npmcli/promise-spawn/lib/index.js +++ b/deps/npm/node_modules/@npmcli/promise-spawn/lib/index.js @@ -131,9 +131,19 @@ const open = (_args, opts = {}, extra = {}) => { let platform = process.platform // process.platform === 'linux' may actually indicate WSL, if that's the case - // we want to treat things as win32 anyway so the host can open the argument + // open the argument with sensible-browser which is pre-installed + // In WSL, set the default browser using, for example, + // export BROWSER="/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe" + // or + // export BROWSER="/mnt/c/Program Files (x86)/Microsoft/Edge/Application/msedge.exe" + // To permanently set the default browser, add the appropriate entry to your shell's + // RC file, e.g. .bashrc or .zshrc. if (platform === 'linux' && os.release().toLowerCase().includes('microsoft')) { - platform = 'win32' + platform = 'wsl' + if (!process.env.BROWSER) { + return Promise.reject( + new Error('Set the BROWSER environment variable to your desired browser.')) + } } let command = options.command @@ -146,6 +156,8 @@ const open = (_args, opts = {}, extra = {}) => { // accidentally interpret the first arg as the title, we stick an empty // string immediately after the start command command = 'start ""' + } else if (platform === 'wsl') { + command = 'sensible-browser' } else if (platform === 'darwin') { command = 'open' } else { diff --git a/deps/npm/node_modules/@npmcli/promise-spawn/package.json b/deps/npm/node_modules/@npmcli/promise-spawn/package.json index 9914063f85156d..f5fb026be50e85 100644 --- a/deps/npm/node_modules/@npmcli/promise-spawn/package.json +++ b/deps/npm/node_modules/@npmcli/promise-spawn/package.json @@ -1,6 +1,6 @@ { "name": "@npmcli/promise-spawn", - "version": "8.0.1", + "version": "8.0.2", "files": [ "bin/", "lib/" @@ -33,7 +33,7 @@ }, "devDependencies": { "@npmcli/eslint-config": "^5.0.0", - "@npmcli/template-oss": "4.23.3", + "@npmcli/template-oss": "4.23.4", "spawk": "^1.7.1", "tap": "^16.0.1" }, @@ -42,7 +42,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.23.3", + "version": "4.23.4", "publish": true }, "dependencies": { diff --git a/deps/npm/node_modules/@npmcli/run-script/package.json b/deps/npm/node_modules/@npmcli/run-script/package.json index e5fd2fef62e5ce..38a2ac9f87772b 100644 --- a/deps/npm/node_modules/@npmcli/run-script/package.json +++ b/deps/npm/node_modules/@npmcli/run-script/package.json @@ -1,6 +1,6 @@ { "name": "@npmcli/run-script", - "version": "9.0.1", + "version": "9.0.2", "description": "Run a lifecycle script for a package (descendant of npm-lifecycle)", "author": "GitHub Inc.", "license": "ISC", @@ -16,7 +16,7 @@ }, "devDependencies": { "@npmcli/eslint-config": "^5.0.0", - "@npmcli/template-oss": "4.23.3", + "@npmcli/template-oss": "4.23.4", "spawk": "^1.8.1", "tap": "^16.0.1" }, @@ -24,7 +24,7 @@ "@npmcli/node-gyp": "^4.0.0", "@npmcli/package-json": "^6.0.0", "@npmcli/promise-spawn": "^8.0.0", - "node-gyp": "^10.0.0", + "node-gyp": "^11.0.0", "proc-log": "^5.0.0", "which": "^5.0.0" }, @@ -42,7 +42,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.23.3", + "version": "4.23.4", "publish": "true" }, "tap": { diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/agents.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/agents.js deleted file mode 100644 index c541b93001517e..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/agents.js +++ /dev/null @@ -1,206 +0,0 @@ -'use strict' - -const net = require('net') -const tls = require('tls') -const { once } = require('events') -const timers = require('timers/promises') -const { normalizeOptions, cacheOptions } = require('./options') -const { getProxy, getProxyAgent, proxyCache } = require('./proxy.js') -const Errors = require('./errors.js') -const { Agent: AgentBase } = require('agent-base') - -module.exports = class Agent extends AgentBase { - #options - #timeouts - #proxy - #noProxy - #ProxyAgent - - constructor (options = {}) { - const { timeouts, proxy, noProxy, ...normalizedOptions } = normalizeOptions(options) - - super(normalizedOptions) - - this.#options = normalizedOptions - this.#timeouts = timeouts - - if (proxy) { - this.#proxy = new URL(proxy) - this.#noProxy = noProxy - this.#ProxyAgent = getProxyAgent(proxy) - } - } - - get proxy () { - return this.#proxy ? { url: this.#proxy } : {} - } - - #getProxy (options) { - if (!this.#proxy) { - return - } - - const proxy = getProxy(`${options.protocol}//${options.host}:${options.port}`, { - proxy: this.#proxy, - noProxy: this.#noProxy, - }) - - if (!proxy) { - return - } - - const cacheKey = cacheOptions({ - ...options, - ...this.#options, - timeouts: this.#timeouts, - proxy, - }) - - if (proxyCache.has(cacheKey)) { - return proxyCache.get(cacheKey) - } - - let ProxyAgent = this.#ProxyAgent - if (Array.isArray(ProxyAgent)) { - ProxyAgent = this.isSecureEndpoint(options) ? ProxyAgent[1] : ProxyAgent[0] - } - - const proxyAgent = new ProxyAgent(proxy, { - ...this.#options, - socketOptions: { family: this.#options.family }, - }) - proxyCache.set(cacheKey, proxyAgent) - - return proxyAgent - } - - // takes an array of promises and races them against the connection timeout - // which will throw the necessary error if it is hit. This will return the - // result of the promise race. - async #timeoutConnection ({ promises, options, timeout }, ac = new AbortController()) { - if (timeout) { - const connectionTimeout = timers.setTimeout(timeout, null, { signal: ac.signal }) - .then(() => { - throw new Errors.ConnectionTimeoutError(`${options.host}:${options.port}`) - }).catch((err) => { - if (err.name === 'AbortError') { - return - } - throw err - }) - promises.push(connectionTimeout) - } - - let result - try { - result = await Promise.race(promises) - ac.abort() - } catch (err) { - ac.abort() - throw err - } - return result - } - - async connect (request, options) { - // if the connection does not have its own lookup function - // set, then use the one from our options - options.lookup ??= this.#options.lookup - - let socket - let timeout = this.#timeouts.connection - const isSecureEndpoint = this.isSecureEndpoint(options) - - const proxy = this.#getProxy(options) - if (proxy) { - // some of the proxies will wait for the socket to fully connect before - // returning so we have to await this while also racing it against the - // connection timeout. - const start = Date.now() - socket = await this.#timeoutConnection({ - options, - timeout, - promises: [proxy.connect(request, options)], - }) - // see how much time proxy.connect took and subtract it from - // the timeout - if (timeout) { - timeout = timeout - (Date.now() - start) - } - } else { - socket = (isSecureEndpoint ? tls : net).connect(options) - } - - socket.setKeepAlive(this.keepAlive, this.keepAliveMsecs) - socket.setNoDelay(this.keepAlive) - - const abortController = new AbortController() - const { signal } = abortController - - const connectPromise = socket[isSecureEndpoint ? 'secureConnecting' : 'connecting'] - ? once(socket, isSecureEndpoint ? 'secureConnect' : 'connect', { signal }) - : Promise.resolve() - - await this.#timeoutConnection({ - options, - timeout, - promises: [ - connectPromise, - once(socket, 'error', { signal }).then((err) => { - throw err[0] - }), - ], - }, abortController) - - if (this.#timeouts.idle) { - socket.setTimeout(this.#timeouts.idle, () => { - socket.destroy(new Errors.IdleTimeoutError(`${options.host}:${options.port}`)) - }) - } - - return socket - } - - addRequest (request, options) { - const proxy = this.#getProxy(options) - // it would be better to call proxy.addRequest here but this causes the - // http-proxy-agent to call its super.addRequest which causes the request - // to be added to the agent twice. since we only support 3 agents - // currently (see the required agents in proxy.js) we have manually - // checked that the only public methods we need to call are called in the - // next block. this could change in the future and presumably we would get - // failing tests until we have properly called the necessary methods on - // each of our proxy agents - if (proxy?.setRequestProps) { - proxy.setRequestProps(request, options) - } - - request.setHeader('connection', this.keepAlive ? 'keep-alive' : 'close') - - if (this.#timeouts.response) { - let responseTimeout - request.once('finish', () => { - setTimeout(() => { - request.destroy(new Errors.ResponseTimeoutError(request, this.#proxy)) - }, this.#timeouts.response) - }) - request.once('response', () => { - clearTimeout(responseTimeout) - }) - } - - if (this.#timeouts.transfer) { - let transferTimeout - request.once('response', (res) => { - setTimeout(() => { - res.destroy(new Errors.TransferTimeoutError(request, this.#proxy)) - }, this.#timeouts.transfer) - res.once('close', () => { - clearTimeout(transferTimeout) - }) - }) - } - - return super.addRequest(request, options) - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/dns.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/dns.js deleted file mode 100644 index 3c6946c566d736..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/dns.js +++ /dev/null @@ -1,53 +0,0 @@ -'use strict' - -const { LRUCache } = require('lru-cache') -const dns = require('dns') - -// this is a factory so that each request can have its own opts (i.e. ttl) -// while still sharing the cache across all requests -const cache = new LRUCache({ max: 50 }) - -const getOptions = ({ - family = 0, - hints = dns.ADDRCONFIG, - all = false, - verbatim = undefined, - ttl = 5 * 60 * 1000, - lookup = dns.lookup, -}) => ({ - // hints and lookup are returned since both are top level properties to (net|tls).connect - hints, - lookup: (hostname, ...args) => { - const callback = args.pop() // callback is always last arg - const lookupOptions = args[0] ?? {} - - const options = { - family, - hints, - all, - verbatim, - ...(typeof lookupOptions === 'number' ? { family: lookupOptions } : lookupOptions), - } - - const key = JSON.stringify({ hostname, ...options }) - - if (cache.has(key)) { - const cached = cache.get(key) - return process.nextTick(callback, null, ...cached) - } - - lookup(hostname, options, (err, ...result) => { - if (err) { - return callback(err) - } - - cache.set(key, result, { ttl }) - return callback(null, ...result) - }) - }, -}) - -module.exports = { - cache, - getOptions, -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/errors.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/errors.js deleted file mode 100644 index 70475aec8eb357..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/errors.js +++ /dev/null @@ -1,61 +0,0 @@ -'use strict' - -class InvalidProxyProtocolError extends Error { - constructor (url) { - super(`Invalid protocol \`${url.protocol}\` connecting to proxy \`${url.host}\``) - this.code = 'EINVALIDPROXY' - this.proxy = url - } -} - -class ConnectionTimeoutError extends Error { - constructor (host) { - super(`Timeout connecting to host \`${host}\``) - this.code = 'ECONNECTIONTIMEOUT' - this.host = host - } -} - -class IdleTimeoutError extends Error { - constructor (host) { - super(`Idle timeout reached for host \`${host}\``) - this.code = 'EIDLETIMEOUT' - this.host = host - } -} - -class ResponseTimeoutError extends Error { - constructor (request, proxy) { - let msg = 'Response timeout ' - if (proxy) { - msg += `from proxy \`${proxy.host}\` ` - } - msg += `connecting to host \`${request.host}\`` - super(msg) - this.code = 'ERESPONSETIMEOUT' - this.proxy = proxy - this.request = request - } -} - -class TransferTimeoutError extends Error { - constructor (request, proxy) { - let msg = 'Transfer timeout ' - if (proxy) { - msg += `from proxy \`${proxy.host}\` ` - } - msg += `for \`${request.host}\`` - super(msg) - this.code = 'ETRANSFERTIMEOUT' - this.proxy = proxy - this.request = request - } -} - -module.exports = { - InvalidProxyProtocolError, - ConnectionTimeoutError, - IdleTimeoutError, - ResponseTimeoutError, - TransferTimeoutError, -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/index.js deleted file mode 100644 index b33d6eaef07a21..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/index.js +++ /dev/null @@ -1,56 +0,0 @@ -'use strict' - -const { LRUCache } = require('lru-cache') -const { normalizeOptions, cacheOptions } = require('./options') -const { getProxy, proxyCache } = require('./proxy.js') -const dns = require('./dns.js') -const Agent = require('./agents.js') - -const agentCache = new LRUCache({ max: 20 }) - -const getAgent = (url, { agent, proxy, noProxy, ...options } = {}) => { - // false has meaning so this can't be a simple truthiness check - if (agent != null) { - return agent - } - - url = new URL(url) - - const proxyForUrl = getProxy(url, { proxy, noProxy }) - const normalizedOptions = { - ...normalizeOptions(options), - proxy: proxyForUrl, - } - - const cacheKey = cacheOptions({ - ...normalizedOptions, - secureEndpoint: url.protocol === 'https:', - }) - - if (agentCache.has(cacheKey)) { - return agentCache.get(cacheKey) - } - - const newAgent = new Agent(normalizedOptions) - agentCache.set(cacheKey, newAgent) - - return newAgent -} - -module.exports = { - getAgent, - Agent, - // these are exported for backwards compatability - HttpAgent: Agent, - HttpsAgent: Agent, - cache: { - proxy: proxyCache, - agent: agentCache, - dns: dns.cache, - clear: () => { - proxyCache.clear() - agentCache.clear() - dns.cache.clear() - }, - }, -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/options.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/options.js deleted file mode 100644 index 0bf53f725f0846..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/options.js +++ /dev/null @@ -1,86 +0,0 @@ -'use strict' - -const dns = require('./dns') - -const normalizeOptions = (opts) => { - const family = parseInt(opts.family ?? '0', 10) - const keepAlive = opts.keepAlive ?? true - - const normalized = { - // nodejs http agent options. these are all the defaults - // but kept here to increase the likelihood of cache hits - // https://nodejs.org/api/http.html#new-agentoptions - keepAliveMsecs: keepAlive ? 1000 : undefined, - maxSockets: opts.maxSockets ?? 15, - maxTotalSockets: Infinity, - maxFreeSockets: keepAlive ? 256 : undefined, - scheduling: 'fifo', - // then spread the rest of the options - ...opts, - // we already set these to their defaults that we want - family, - keepAlive, - // our custom timeout options - timeouts: { - // the standard timeout option is mapped to our idle timeout - // and then deleted below - idle: opts.timeout ?? 0, - connection: 0, - response: 0, - transfer: 0, - ...opts.timeouts, - }, - // get the dns options that go at the top level of socket connection - ...dns.getOptions({ family, ...opts.dns }), - } - - // remove timeout since we already used it to set our own idle timeout - delete normalized.timeout - - return normalized -} - -const createKey = (obj) => { - let key = '' - const sorted = Object.entries(obj).sort((a, b) => a[0] - b[0]) - for (let [k, v] of sorted) { - if (v == null) { - v = 'null' - } else if (v instanceof URL) { - v = v.toString() - } else if (typeof v === 'object') { - v = createKey(v) - } - key += `${k}:${v}:` - } - return key -} - -const cacheOptions = ({ secureEndpoint, ...options }) => createKey({ - secureEndpoint: !!secureEndpoint, - // socket connect options - family: options.family, - hints: options.hints, - localAddress: options.localAddress, - // tls specific connect options - strictSsl: secureEndpoint ? !!options.rejectUnauthorized : false, - ca: secureEndpoint ? options.ca : null, - cert: secureEndpoint ? options.cert : null, - key: secureEndpoint ? options.key : null, - // http agent options - keepAlive: options.keepAlive, - keepAliveMsecs: options.keepAliveMsecs, - maxSockets: options.maxSockets, - maxTotalSockets: options.maxTotalSockets, - maxFreeSockets: options.maxFreeSockets, - scheduling: options.scheduling, - // timeout options - timeouts: options.timeouts, - // proxy - proxy: options.proxy, -}) - -module.exports = { - normalizeOptions, - cacheOptions, -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/proxy.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/proxy.js deleted file mode 100644 index 6272e929e57bcf..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/lib/proxy.js +++ /dev/null @@ -1,88 +0,0 @@ -'use strict' - -const { HttpProxyAgent } = require('http-proxy-agent') -const { HttpsProxyAgent } = require('https-proxy-agent') -const { SocksProxyAgent } = require('socks-proxy-agent') -const { LRUCache } = require('lru-cache') -const { InvalidProxyProtocolError } = require('./errors.js') - -const PROXY_CACHE = new LRUCache({ max: 20 }) - -const SOCKS_PROTOCOLS = new Set(SocksProxyAgent.protocols) - -const PROXY_ENV_KEYS = new Set(['https_proxy', 'http_proxy', 'proxy', 'no_proxy']) - -const PROXY_ENV = Object.entries(process.env).reduce((acc, [key, value]) => { - key = key.toLowerCase() - if (PROXY_ENV_KEYS.has(key)) { - acc[key] = value - } - return acc -}, {}) - -const getProxyAgent = (url) => { - url = new URL(url) - - const protocol = url.protocol.slice(0, -1) - if (SOCKS_PROTOCOLS.has(protocol)) { - return SocksProxyAgent - } - if (protocol === 'https' || protocol === 'http') { - return [HttpProxyAgent, HttpsProxyAgent] - } - - throw new InvalidProxyProtocolError(url) -} - -const isNoProxy = (url, noProxy) => { - if (typeof noProxy === 'string') { - noProxy = noProxy.split(',').map((p) => p.trim()).filter(Boolean) - } - - if (!noProxy || !noProxy.length) { - return false - } - - const hostSegments = url.hostname.split('.').reverse() - - return noProxy.some((no) => { - const noSegments = no.split('.').filter(Boolean).reverse() - if (!noSegments.length) { - return false - } - - for (let i = 0; i < noSegments.length; i++) { - if (hostSegments[i] !== noSegments[i]) { - return false - } - } - - return true - }) -} - -const getProxy = (url, { proxy, noProxy }) => { - url = new URL(url) - - if (!proxy) { - proxy = url.protocol === 'https:' - ? PROXY_ENV.https_proxy - : PROXY_ENV.https_proxy || PROXY_ENV.http_proxy || PROXY_ENV.proxy - } - - if (!noProxy) { - noProxy = PROXY_ENV.no_proxy - } - - if (!proxy || isNoProxy(url, noProxy)) { - return null - } - - return new URL(proxy) -} - -module.exports = { - getProxyAgent, - getProxy, - proxyCache: PROXY_CACHE, -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/package.json b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/package.json deleted file mode 100644 index ef5b4e3228cc46..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/agent/package.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "name": "@npmcli/agent", - "version": "2.2.2", - "description": "the http/https agent used by the npm cli", - "main": "lib/index.js", - "scripts": { - "gencerts": "bash scripts/create-cert.sh", - "test": "tap", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap", - "posttest": "npm run lint" - }, - "author": "GitHub Inc.", - "license": "ISC", - "bugs": { - "url": "https://github.com/npm/agent/issues" - }, - "homepage": "https://github.com/npm/agent#readme", - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^16.14.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.21.3", - "publish": "true" - }, - "dependencies": { - "agent-base": "^7.1.0", - "http-proxy-agent": "^7.0.0", - "https-proxy-agent": "^7.0.1", - "lru-cache": "^10.0.1", - "socks-proxy-agent": "^8.0.3" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.21.3", - "minipass-fetch": "^3.0.3", - "nock": "^13.2.7", - "semver": "^7.5.4", - "simple-socks": "^3.1.0", - "tap": "^16.3.0" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/agent.git" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/LICENSE.md b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/LICENSE.md deleted file mode 100644 index 5fc208ff122e08..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ - - -ISC License - -Copyright npm, Inc. - -Permission to use, copy, modify, and/or distribute this -software for any purpose with or without fee is hereby -granted, provided that the above copyright notice and this -permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND NPM DISCLAIMS ALL -WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO -EVENT SHALL NPM BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/common/get-options.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/common/get-options.js deleted file mode 100644 index cb5982f79077ac..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/common/get-options.js +++ /dev/null @@ -1,20 +0,0 @@ -// given an input that may or may not be an object, return an object that has -// a copy of every defined property listed in 'copy'. if the input is not an -// object, assign it to the property named by 'wrap' -const getOptions = (input, { copy, wrap }) => { - const result = {} - - if (input && typeof input === 'object') { - for (const prop of copy) { - if (input[prop] !== undefined) { - result[prop] = input[prop] - } - } - } else { - result[wrap] = input - } - - return result -} - -module.exports = getOptions diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/common/node.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/common/node.js deleted file mode 100644 index 4d13bc037359d7..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/common/node.js +++ /dev/null @@ -1,9 +0,0 @@ -const semver = require('semver') - -const satisfies = (range) => { - return semver.satisfies(process.version, range, { includePrerelease: true }) -} - -module.exports = { - satisfies, -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/LICENSE b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/LICENSE deleted file mode 100644 index 93546dfb7655bf..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -(The MIT License) - -Copyright (c) 2011-2017 JP Richardson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files -(the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, - merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS -OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/errors.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/errors.js deleted file mode 100644 index 1cd1e05d0c533d..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/errors.js +++ /dev/null @@ -1,129 +0,0 @@ -'use strict' -const { inspect } = require('util') - -// adapted from node's internal/errors -// https://github.com/nodejs/node/blob/c8a04049/lib/internal/errors.js - -// close copy of node's internal SystemError class. -class SystemError { - constructor (code, prefix, context) { - // XXX context.code is undefined in all constructors used in cp/polyfill - // that may be a bug copied from node, maybe the constructor should use - // `code` not `errno`? nodejs/node#41104 - let message = `${prefix}: ${context.syscall} returned ` + - `${context.code} (${context.message})` - - if (context.path !== undefined) { - message += ` ${context.path}` - } - if (context.dest !== undefined) { - message += ` => ${context.dest}` - } - - this.code = code - Object.defineProperties(this, { - name: { - value: 'SystemError', - enumerable: false, - writable: true, - configurable: true, - }, - message: { - value: message, - enumerable: false, - writable: true, - configurable: true, - }, - info: { - value: context, - enumerable: true, - configurable: true, - writable: false, - }, - errno: { - get () { - return context.errno - }, - set (value) { - context.errno = value - }, - enumerable: true, - configurable: true, - }, - syscall: { - get () { - return context.syscall - }, - set (value) { - context.syscall = value - }, - enumerable: true, - configurable: true, - }, - }) - - if (context.path !== undefined) { - Object.defineProperty(this, 'path', { - get () { - return context.path - }, - set (value) { - context.path = value - }, - enumerable: true, - configurable: true, - }) - } - - if (context.dest !== undefined) { - Object.defineProperty(this, 'dest', { - get () { - return context.dest - }, - set (value) { - context.dest = value - }, - enumerable: true, - configurable: true, - }) - } - } - - toString () { - return `${this.name} [${this.code}]: ${this.message}` - } - - [Symbol.for('nodejs.util.inspect.custom')] (_recurseTimes, ctx) { - return inspect(this, { - ...ctx, - getters: true, - customInspect: false, - }) - } -} - -function E (code, message) { - module.exports[code] = class NodeError extends SystemError { - constructor (ctx) { - super(code, message, ctx) - } - } -} - -E('ERR_FS_CP_DIR_TO_NON_DIR', 'Cannot overwrite directory with non-directory') -E('ERR_FS_CP_EEXIST', 'Target already exists') -E('ERR_FS_CP_EINVAL', 'Invalid src or dest') -E('ERR_FS_CP_FIFO_PIPE', 'Cannot copy a FIFO pipe') -E('ERR_FS_CP_NON_DIR_TO_DIR', 'Cannot overwrite non-directory with directory') -E('ERR_FS_CP_SOCKET', 'Cannot copy a socket file') -E('ERR_FS_CP_SYMLINK_TO_SUBDIRECTORY', 'Cannot overwrite symlink in subdirectory of self') -E('ERR_FS_CP_UNKNOWN', 'Cannot copy an unknown file type') -E('ERR_FS_EISDIR', 'Path is a directory') - -module.exports.ERR_INVALID_ARG_TYPE = class ERR_INVALID_ARG_TYPE extends Error { - constructor (name, expected, actual) { - super() - this.code = 'ERR_INVALID_ARG_TYPE' - this.message = `The ${name} argument must be ${expected}. Received ${typeof actual}` - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/index.js deleted file mode 100644 index 972ce7aa12abef..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/index.js +++ /dev/null @@ -1,22 +0,0 @@ -const fs = require('fs/promises') -const getOptions = require('../common/get-options.js') -const node = require('../common/node.js') -const polyfill = require('./polyfill.js') - -// node 16.7.0 added fs.cp -const useNative = node.satisfies('>=16.7.0') - -const cp = async (src, dest, opts) => { - const options = getOptions(opts, { - copy: ['dereference', 'errorOnExist', 'filter', 'force', 'preserveTimestamps', 'recursive'], - }) - - // the polyfill is tested separately from this module, no need to hack - // process.version to try to trigger it just for coverage - // istanbul ignore next - return useNative - ? fs.cp(src, dest, options) - : polyfill(src, dest, options) -} - -module.exports = cp diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/polyfill.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/polyfill.js deleted file mode 100644 index 80eb10de971918..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/cp/polyfill.js +++ /dev/null @@ -1,428 +0,0 @@ -// this file is a modified version of the code in node 17.2.0 -// which is, in turn, a modified version of the fs-extra module on npm -// node core changes: -// - Use of the assert module has been replaced with core's error system. -// - All code related to the glob dependency has been removed. -// - Bring your own custom fs module is not currently supported. -// - Some basic code cleanup. -// changes here: -// - remove all callback related code -// - drop sync support -// - change assertions back to non-internal methods (see options.js) -// - throws ENOTDIR when rmdir gets an ENOENT for a path that exists in Windows -'use strict' - -const { - ERR_FS_CP_DIR_TO_NON_DIR, - ERR_FS_CP_EEXIST, - ERR_FS_CP_EINVAL, - ERR_FS_CP_FIFO_PIPE, - ERR_FS_CP_NON_DIR_TO_DIR, - ERR_FS_CP_SOCKET, - ERR_FS_CP_SYMLINK_TO_SUBDIRECTORY, - ERR_FS_CP_UNKNOWN, - ERR_FS_EISDIR, - ERR_INVALID_ARG_TYPE, -} = require('./errors.js') -const { - constants: { - errno: { - EEXIST, - EISDIR, - EINVAL, - ENOTDIR, - }, - }, -} = require('os') -const { - chmod, - copyFile, - lstat, - mkdir, - readdir, - readlink, - stat, - symlink, - unlink, - utimes, -} = require('fs/promises') -const { - dirname, - isAbsolute, - join, - parse, - resolve, - sep, - toNamespacedPath, -} = require('path') -const { fileURLToPath } = require('url') - -const defaultOptions = { - dereference: false, - errorOnExist: false, - filter: undefined, - force: true, - preserveTimestamps: false, - recursive: false, -} - -async function cp (src, dest, opts) { - if (opts != null && typeof opts !== 'object') { - throw new ERR_INVALID_ARG_TYPE('options', ['Object'], opts) - } - return cpFn( - toNamespacedPath(getValidatedPath(src)), - toNamespacedPath(getValidatedPath(dest)), - { ...defaultOptions, ...opts }) -} - -function getValidatedPath (fileURLOrPath) { - const path = fileURLOrPath != null && fileURLOrPath.href - && fileURLOrPath.origin - ? fileURLToPath(fileURLOrPath) - : fileURLOrPath - return path -} - -async function cpFn (src, dest, opts) { - // Warn about using preserveTimestamps on 32-bit node - // istanbul ignore next - if (opts.preserveTimestamps && process.arch === 'ia32') { - const warning = 'Using the preserveTimestamps option in 32-bit ' + - 'node is not recommended' - process.emitWarning(warning, 'TimestampPrecisionWarning') - } - const stats = await checkPaths(src, dest, opts) - const { srcStat, destStat } = stats - await checkParentPaths(src, srcStat, dest) - if (opts.filter) { - return handleFilter(checkParentDir, destStat, src, dest, opts) - } - return checkParentDir(destStat, src, dest, opts) -} - -async function checkPaths (src, dest, opts) { - const { 0: srcStat, 1: destStat } = await getStats(src, dest, opts) - if (destStat) { - if (areIdentical(srcStat, destStat)) { - throw new ERR_FS_CP_EINVAL({ - message: 'src and dest cannot be the same', - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - if (srcStat.isDirectory() && !destStat.isDirectory()) { - throw new ERR_FS_CP_DIR_TO_NON_DIR({ - message: `cannot overwrite directory ${src} ` + - `with non-directory ${dest}`, - path: dest, - syscall: 'cp', - errno: EISDIR, - }) - } - if (!srcStat.isDirectory() && destStat.isDirectory()) { - throw new ERR_FS_CP_NON_DIR_TO_DIR({ - message: `cannot overwrite non-directory ${src} ` + - `with directory ${dest}`, - path: dest, - syscall: 'cp', - errno: ENOTDIR, - }) - } - } - - if (srcStat.isDirectory() && isSrcSubdir(src, dest)) { - throw new ERR_FS_CP_EINVAL({ - message: `cannot copy ${src} to a subdirectory of self ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - return { srcStat, destStat } -} - -function areIdentical (srcStat, destStat) { - return destStat.ino && destStat.dev && destStat.ino === srcStat.ino && - destStat.dev === srcStat.dev -} - -function getStats (src, dest, opts) { - const statFunc = opts.dereference ? - (file) => stat(file, { bigint: true }) : - (file) => lstat(file, { bigint: true }) - return Promise.all([ - statFunc(src), - statFunc(dest).catch((err) => { - // istanbul ignore next: unsure how to cover. - if (err.code === 'ENOENT') { - return null - } - // istanbul ignore next: unsure how to cover. - throw err - }), - ]) -} - -async function checkParentDir (destStat, src, dest, opts) { - const destParent = dirname(dest) - const dirExists = await pathExists(destParent) - if (dirExists) { - return getStatsForCopy(destStat, src, dest, opts) - } - await mkdir(destParent, { recursive: true }) - return getStatsForCopy(destStat, src, dest, opts) -} - -function pathExists (dest) { - return stat(dest).then( - () => true, - // istanbul ignore next: not sure when this would occur - (err) => (err.code === 'ENOENT' ? false : Promise.reject(err))) -} - -// Recursively check if dest parent is a subdirectory of src. -// It works for all file types including symlinks since it -// checks the src and dest inodes. It starts from the deepest -// parent and stops once it reaches the src parent or the root path. -async function checkParentPaths (src, srcStat, dest) { - const srcParent = resolve(dirname(src)) - const destParent = resolve(dirname(dest)) - if (destParent === srcParent || destParent === parse(destParent).root) { - return - } - let destStat - try { - destStat = await stat(destParent, { bigint: true }) - } catch (err) { - // istanbul ignore else: not sure when this would occur - if (err.code === 'ENOENT') { - return - } - // istanbul ignore next: not sure when this would occur - throw err - } - if (areIdentical(srcStat, destStat)) { - throw new ERR_FS_CP_EINVAL({ - message: `cannot copy ${src} to a subdirectory of self ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - return checkParentPaths(src, srcStat, destParent) -} - -const normalizePathToArray = (path) => - resolve(path).split(sep).filter(Boolean) - -// Return true if dest is a subdir of src, otherwise false. -// It only checks the path strings. -function isSrcSubdir (src, dest) { - const srcArr = normalizePathToArray(src) - const destArr = normalizePathToArray(dest) - return srcArr.every((cur, i) => destArr[i] === cur) -} - -async function handleFilter (onInclude, destStat, src, dest, opts, cb) { - const include = await opts.filter(src, dest) - if (include) { - return onInclude(destStat, src, dest, opts, cb) - } -} - -function startCopy (destStat, src, dest, opts) { - if (opts.filter) { - return handleFilter(getStatsForCopy, destStat, src, dest, opts) - } - return getStatsForCopy(destStat, src, dest, opts) -} - -async function getStatsForCopy (destStat, src, dest, opts) { - const statFn = opts.dereference ? stat : lstat - const srcStat = await statFn(src) - // istanbul ignore else: can't portably test FIFO - if (srcStat.isDirectory() && opts.recursive) { - return onDir(srcStat, destStat, src, dest, opts) - } else if (srcStat.isDirectory()) { - throw new ERR_FS_EISDIR({ - message: `${src} is a directory (not copied)`, - path: src, - syscall: 'cp', - errno: EINVAL, - }) - } else if (srcStat.isFile() || - srcStat.isCharacterDevice() || - srcStat.isBlockDevice()) { - return onFile(srcStat, destStat, src, dest, opts) - } else if (srcStat.isSymbolicLink()) { - return onLink(destStat, src, dest) - } else if (srcStat.isSocket()) { - throw new ERR_FS_CP_SOCKET({ - message: `cannot copy a socket file: ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } else if (srcStat.isFIFO()) { - throw new ERR_FS_CP_FIFO_PIPE({ - message: `cannot copy a FIFO pipe: ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - // istanbul ignore next: should be unreachable - throw new ERR_FS_CP_UNKNOWN({ - message: `cannot copy an unknown file type: ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) -} - -function onFile (srcStat, destStat, src, dest, opts) { - if (!destStat) { - return _copyFile(srcStat, src, dest, opts) - } - return mayCopyFile(srcStat, src, dest, opts) -} - -async function mayCopyFile (srcStat, src, dest, opts) { - if (opts.force) { - await unlink(dest) - return _copyFile(srcStat, src, dest, opts) - } else if (opts.errorOnExist) { - throw new ERR_FS_CP_EEXIST({ - message: `${dest} already exists`, - path: dest, - syscall: 'cp', - errno: EEXIST, - }) - } -} - -async function _copyFile (srcStat, src, dest, opts) { - await copyFile(src, dest) - if (opts.preserveTimestamps) { - return handleTimestampsAndMode(srcStat.mode, src, dest) - } - return setDestMode(dest, srcStat.mode) -} - -async function handleTimestampsAndMode (srcMode, src, dest) { - // Make sure the file is writable before setting the timestamp - // otherwise open fails with EPERM when invoked with 'r+' - // (through utimes call) - if (fileIsNotWritable(srcMode)) { - await makeFileWritable(dest, srcMode) - return setDestTimestampsAndMode(srcMode, src, dest) - } - return setDestTimestampsAndMode(srcMode, src, dest) -} - -function fileIsNotWritable (srcMode) { - return (srcMode & 0o200) === 0 -} - -function makeFileWritable (dest, srcMode) { - return setDestMode(dest, srcMode | 0o200) -} - -async function setDestTimestampsAndMode (srcMode, src, dest) { - await setDestTimestamps(src, dest) - return setDestMode(dest, srcMode) -} - -function setDestMode (dest, srcMode) { - return chmod(dest, srcMode) -} - -async function setDestTimestamps (src, dest) { - // The initial srcStat.atime cannot be trusted - // because it is modified by the read(2) system call - // (See https://nodejs.org/api/fs.html#fs_stat_time_values) - const updatedSrcStat = await stat(src) - return utimes(dest, updatedSrcStat.atime, updatedSrcStat.mtime) -} - -function onDir (srcStat, destStat, src, dest, opts) { - if (!destStat) { - return mkDirAndCopy(srcStat.mode, src, dest, opts) - } - return copyDir(src, dest, opts) -} - -async function mkDirAndCopy (srcMode, src, dest, opts) { - await mkdir(dest) - await copyDir(src, dest, opts) - return setDestMode(dest, srcMode) -} - -async function copyDir (src, dest, opts) { - const dir = await readdir(src) - for (let i = 0; i < dir.length; i++) { - const item = dir[i] - const srcItem = join(src, item) - const destItem = join(dest, item) - const { destStat } = await checkPaths(srcItem, destItem, opts) - await startCopy(destStat, srcItem, destItem, opts) - } -} - -async function onLink (destStat, src, dest) { - let resolvedSrc = await readlink(src) - if (!isAbsolute(resolvedSrc)) { - resolvedSrc = resolve(dirname(src), resolvedSrc) - } - if (!destStat) { - return symlink(resolvedSrc, dest) - } - let resolvedDest - try { - resolvedDest = await readlink(dest) - } catch (err) { - // Dest exists and is a regular file or directory, - // Windows may throw UNKNOWN error. If dest already exists, - // fs throws error anyway, so no need to guard against it here. - // istanbul ignore next: can only test on windows - if (err.code === 'EINVAL' || err.code === 'UNKNOWN') { - return symlink(resolvedSrc, dest) - } - // istanbul ignore next: should not be possible - throw err - } - if (!isAbsolute(resolvedDest)) { - resolvedDest = resolve(dirname(dest), resolvedDest) - } - if (isSrcSubdir(resolvedSrc, resolvedDest)) { - throw new ERR_FS_CP_EINVAL({ - message: `cannot copy ${resolvedSrc} to a subdirectory of self ` + - `${resolvedDest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - // Do not copy if src is a subdir of dest since unlinking - // dest in this case would result in removing src contents - // and therefore a broken symlink would be created. - const srcStat = await stat(src) - if (srcStat.isDirectory() && isSrcSubdir(resolvedDest, resolvedSrc)) { - throw new ERR_FS_CP_SYMLINK_TO_SUBDIRECTORY({ - message: `cannot overwrite ${resolvedDest} with ${resolvedSrc}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - return copyLink(resolvedSrc, dest) -} - -async function copyLink (resolvedSrc, dest) { - await unlink(dest) - return symlink(resolvedSrc, dest) -} - -module.exports = cp diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/index.js deleted file mode 100644 index 81c746304cc428..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/index.js +++ /dev/null @@ -1,13 +0,0 @@ -'use strict' - -const cp = require('./cp/index.js') -const withTempDir = require('./with-temp-dir.js') -const readdirScoped = require('./readdir-scoped.js') -const moveFile = require('./move-file.js') - -module.exports = { - cp, - withTempDir, - readdirScoped, - moveFile, -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/move-file.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/move-file.js deleted file mode 100644 index d56e06d384659a..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/move-file.js +++ /dev/null @@ -1,78 +0,0 @@ -const { dirname, join, resolve, relative, isAbsolute } = require('path') -const fs = require('fs/promises') - -const pathExists = async path => { - try { - await fs.access(path) - return true - } catch (er) { - return er.code !== 'ENOENT' - } -} - -const moveFile = async (source, destination, options = {}, root = true, symlinks = []) => { - if (!source || !destination) { - throw new TypeError('`source` and `destination` file required') - } - - options = { - overwrite: true, - ...options, - } - - if (!options.overwrite && await pathExists(destination)) { - throw new Error(`The destination file exists: ${destination}`) - } - - await fs.mkdir(dirname(destination), { recursive: true }) - - try { - await fs.rename(source, destination) - } catch (error) { - if (error.code === 'EXDEV' || error.code === 'EPERM') { - const sourceStat = await fs.lstat(source) - if (sourceStat.isDirectory()) { - const files = await fs.readdir(source) - await Promise.all(files.map((file) => - moveFile(join(source, file), join(destination, file), options, false, symlinks) - )) - } else if (sourceStat.isSymbolicLink()) { - symlinks.push({ source, destination }) - } else { - await fs.copyFile(source, destination) - } - } else { - throw error - } - } - - if (root) { - await Promise.all(symlinks.map(async ({ source: symSource, destination: symDestination }) => { - let target = await fs.readlink(symSource) - // junction symlinks in windows will be absolute paths, so we need to - // make sure they point to the symlink destination - if (isAbsolute(target)) { - target = resolve(symDestination, relative(symSource, target)) - } - // try to determine what the actual file is so we can create the correct - // type of symlink in windows - let targetStat = 'file' - try { - targetStat = await fs.stat(resolve(dirname(symSource), target)) - if (targetStat.isDirectory()) { - targetStat = 'junction' - } - } catch { - // targetStat remains 'file' - } - await fs.symlink( - target, - symDestination, - targetStat - ) - })) - await fs.rm(source, { recursive: true, force: true }) - } -} - -module.exports = moveFile diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/readdir-scoped.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/readdir-scoped.js deleted file mode 100644 index cd601dfbe7486b..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/readdir-scoped.js +++ /dev/null @@ -1,20 +0,0 @@ -const { readdir } = require('fs/promises') -const { join } = require('path') - -const readdirScoped = async (dir) => { - const results = [] - - for (const item of await readdir(dir)) { - if (item.startsWith('@')) { - for (const scopedItem of await readdir(join(dir, item))) { - results.push(join(item, scopedItem)) - } - } else { - results.push(item) - } - } - - return results -} - -module.exports = readdirScoped diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/with-temp-dir.js b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/with-temp-dir.js deleted file mode 100644 index 0738ac4f29e1be..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/lib/with-temp-dir.js +++ /dev/null @@ -1,39 +0,0 @@ -const { join, sep } = require('path') - -const getOptions = require('./common/get-options.js') -const { mkdir, mkdtemp, rm } = require('fs/promises') - -// create a temp directory, ensure its permissions match its parent, then call -// the supplied function passing it the path to the directory. clean up after -// the function finishes, whether it throws or not -const withTempDir = async (root, fn, opts) => { - const options = getOptions(opts, { - copy: ['tmpPrefix'], - }) - // create the directory - await mkdir(root, { recursive: true }) - - const target = await mkdtemp(join(`${root}${sep}`, options.tmpPrefix || '')) - let err - let result - - try { - result = await fn(target) - } catch (_err) { - err = _err - } - - try { - await rm(target, { force: true, recursive: true }) - } catch { - // ignore errors - } - - if (err) { - throw err - } - - return result -} - -module.exports = withTempDir diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/package.json b/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/package.json deleted file mode 100644 index 5261a11b78000e..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/@npmcli/fs/package.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "name": "@npmcli/fs", - "version": "3.1.1", - "description": "filesystem utilities for the npm cli", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "scripts": { - "snap": "tap", - "test": "tap", - "npmclilint": "npmcli-lint", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "lintfix": "npm run lint -- --fix", - "posttest": "npm run lint", - "postsnap": "npm run lintfix --", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/fs.git" - }, - "keywords": [ - "npm", - "oss" - ], - "author": "GitHub Inc.", - "license": "ISC", - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "tap": "^16.0.1" - }, - "dependencies": { - "semver": "^7.3.5" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.22.0" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/LICENSE.md b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/LICENSE.md deleted file mode 100644 index 8d28acf866d932..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/LICENSE.md +++ /dev/null @@ -1,16 +0,0 @@ -ISC License - -Copyright (c) npm, Inc. - -Permission to use, copy, modify, and/or distribute this software for -any purpose with or without fee is hereby granted, provided that the -above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE COPYRIGHT HOLDER DISCLAIMS -ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR -CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/path.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/path.js deleted file mode 100644 index ad5a76a4f73f26..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/path.js +++ /dev/null @@ -1,29 +0,0 @@ -'use strict' - -const contentVer = require('../../package.json')['cache-version'].content -const hashToSegments = require('../util/hash-to-segments') -const path = require('path') -const ssri = require('ssri') - -// Current format of content file path: -// -// sha512-BaSE64Hex= -> -// ~/.my-cache/content-v2/sha512/ba/da/55deadbeefc0ffee -// -module.exports = contentPath - -function contentPath (cache, integrity) { - const sri = ssri.parse(integrity, { single: true }) - // contentPath is the *strongest* algo given - return path.join( - contentDir(cache), - sri.algorithm, - ...hashToSegments(sri.hexDigest()) - ) -} - -module.exports.contentDir = contentDir - -function contentDir (cache) { - return path.join(cache, `content-v${contentVer}`) -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/read.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/read.js deleted file mode 100644 index 5f6192c3cec566..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/read.js +++ /dev/null @@ -1,165 +0,0 @@ -'use strict' - -const fs = require('fs/promises') -const fsm = require('fs-minipass') -const ssri = require('ssri') -const contentPath = require('./path') -const Pipeline = require('minipass-pipeline') - -module.exports = read - -const MAX_SINGLE_READ_SIZE = 64 * 1024 * 1024 -async function read (cache, integrity, opts = {}) { - const { size } = opts - const { stat, cpath, sri } = await withContentSri(cache, integrity, async (cpath, sri) => { - // get size - const stat = size ? { size } : await fs.stat(cpath) - return { stat, cpath, sri } - }) - - if (stat.size > MAX_SINGLE_READ_SIZE) { - return readPipeline(cpath, stat.size, sri, new Pipeline()).concat() - } - - const data = await fs.readFile(cpath, { encoding: null }) - - if (stat.size !== data.length) { - throw sizeError(stat.size, data.length) - } - - if (!ssri.checkData(data, sri)) { - throw integrityError(sri, cpath) - } - - return data -} - -const readPipeline = (cpath, size, sri, stream) => { - stream.push( - new fsm.ReadStream(cpath, { - size, - readSize: MAX_SINGLE_READ_SIZE, - }), - ssri.integrityStream({ - integrity: sri, - size, - }) - ) - return stream -} - -module.exports.stream = readStream -module.exports.readStream = readStream - -function readStream (cache, integrity, opts = {}) { - const { size } = opts - const stream = new Pipeline() - // Set all this up to run on the stream and then just return the stream - Promise.resolve().then(async () => { - const { stat, cpath, sri } = await withContentSri(cache, integrity, async (cpath, sri) => { - // get size - const stat = size ? { size } : await fs.stat(cpath) - return { stat, cpath, sri } - }) - - return readPipeline(cpath, stat.size, sri, stream) - }).catch(err => stream.emit('error', err)) - - return stream -} - -module.exports.copy = copy - -function copy (cache, integrity, dest) { - return withContentSri(cache, integrity, (cpath) => { - return fs.copyFile(cpath, dest) - }) -} - -module.exports.hasContent = hasContent - -async function hasContent (cache, integrity) { - if (!integrity) { - return false - } - - try { - return await withContentSri(cache, integrity, async (cpath, sri) => { - const stat = await fs.stat(cpath) - return { size: stat.size, sri, stat } - }) - } catch (err) { - if (err.code === 'ENOENT') { - return false - } - - if (err.code === 'EPERM') { - /* istanbul ignore else */ - if (process.platform !== 'win32') { - throw err - } else { - return false - } - } - } -} - -async function withContentSri (cache, integrity, fn) { - const sri = ssri.parse(integrity) - // If `integrity` has multiple entries, pick the first digest - // with available local data. - const algo = sri.pickAlgorithm() - const digests = sri[algo] - - if (digests.length <= 1) { - const cpath = contentPath(cache, digests[0]) - return fn(cpath, digests[0]) - } else { - // Can't use race here because a generic error can happen before - // a ENOENT error, and can happen before a valid result - const results = await Promise.all(digests.map(async (meta) => { - try { - return await withContentSri(cache, meta, fn) - } catch (err) { - if (err.code === 'ENOENT') { - return Object.assign( - new Error('No matching content found for ' + sri.toString()), - { code: 'ENOENT' } - ) - } - return err - } - })) - // Return the first non error if it is found - const result = results.find((r) => !(r instanceof Error)) - if (result) { - return result - } - - // Throw the No matching content found error - const enoentError = results.find((r) => r.code === 'ENOENT') - if (enoentError) { - throw enoentError - } - - // Throw generic error - throw results.find((r) => r instanceof Error) - } -} - -function sizeError (expected, found) { - /* eslint-disable-next-line max-len */ - const err = new Error(`Bad data size: expected inserted data to be ${expected} bytes, but got ${found} instead`) - err.expected = expected - err.found = found - err.code = 'EBADSIZE' - return err -} - -function integrityError (sri, path) { - const err = new Error(`Integrity verification failed for ${sri} (${path})`) - err.code = 'EINTEGRITY' - err.sri = sri - err.path = path - return err -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/rm.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/rm.js deleted file mode 100644 index ce58d679e4cb25..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/rm.js +++ /dev/null @@ -1,18 +0,0 @@ -'use strict' - -const fs = require('fs/promises') -const contentPath = require('./path') -const { hasContent } = require('./read') - -module.exports = rm - -async function rm (cache, integrity) { - const content = await hasContent(cache, integrity) - // ~pretty~ sure we can't end up with a content lacking sri, but be safe - if (content && content.sri) { - await fs.rm(contentPath(cache, content.sri), { recursive: true, force: true }) - return true - } else { - return false - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/write.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/write.js deleted file mode 100644 index e7187abca8788a..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/content/write.js +++ /dev/null @@ -1,206 +0,0 @@ -'use strict' - -const events = require('events') - -const contentPath = require('./path') -const fs = require('fs/promises') -const { moveFile } = require('@npmcli/fs') -const { Minipass } = require('minipass') -const Pipeline = require('minipass-pipeline') -const Flush = require('minipass-flush') -const path = require('path') -const ssri = require('ssri') -const uniqueFilename = require('unique-filename') -const fsm = require('fs-minipass') - -module.exports = write - -// Cache of move operations in process so we don't duplicate -const moveOperations = new Map() - -async function write (cache, data, opts = {}) { - const { algorithms, size, integrity } = opts - - if (typeof size === 'number' && data.length !== size) { - throw sizeError(size, data.length) - } - - const sri = ssri.fromData(data, algorithms ? { algorithms } : {}) - if (integrity && !ssri.checkData(data, integrity, opts)) { - throw checksumError(integrity, sri) - } - - for (const algo in sri) { - const tmp = await makeTmp(cache, opts) - const hash = sri[algo].toString() - try { - await fs.writeFile(tmp.target, data, { flag: 'wx' }) - await moveToDestination(tmp, cache, hash, opts) - } finally { - if (!tmp.moved) { - await fs.rm(tmp.target, { recursive: true, force: true }) - } - } - } - return { integrity: sri, size: data.length } -} - -module.exports.stream = writeStream - -// writes proxied to the 'inputStream' that is passed to the Promise -// 'end' is deferred until content is handled. -class CacacheWriteStream extends Flush { - constructor (cache, opts) { - super() - this.opts = opts - this.cache = cache - this.inputStream = new Minipass() - this.inputStream.on('error', er => this.emit('error', er)) - this.inputStream.on('drain', () => this.emit('drain')) - this.handleContentP = null - } - - write (chunk, encoding, cb) { - if (!this.handleContentP) { - this.handleContentP = handleContent( - this.inputStream, - this.cache, - this.opts - ) - this.handleContentP.catch(error => this.emit('error', error)) - } - return this.inputStream.write(chunk, encoding, cb) - } - - flush (cb) { - this.inputStream.end(() => { - if (!this.handleContentP) { - const e = new Error('Cache input stream was empty') - e.code = 'ENODATA' - // empty streams are probably emitting end right away. - // defer this one tick by rejecting a promise on it. - return Promise.reject(e).catch(cb) - } - // eslint-disable-next-line promise/catch-or-return - this.handleContentP.then( - (res) => { - res.integrity && this.emit('integrity', res.integrity) - // eslint-disable-next-line promise/always-return - res.size !== null && this.emit('size', res.size) - cb() - }, - (er) => cb(er) - ) - }) - } -} - -function writeStream (cache, opts = {}) { - return new CacacheWriteStream(cache, opts) -} - -async function handleContent (inputStream, cache, opts) { - const tmp = await makeTmp(cache, opts) - try { - const res = await pipeToTmp(inputStream, cache, tmp.target, opts) - await moveToDestination( - tmp, - cache, - res.integrity, - opts - ) - return res - } finally { - if (!tmp.moved) { - await fs.rm(tmp.target, { recursive: true, force: true }) - } - } -} - -async function pipeToTmp (inputStream, cache, tmpTarget, opts) { - const outStream = new fsm.WriteStream(tmpTarget, { - flags: 'wx', - }) - - if (opts.integrityEmitter) { - // we need to create these all simultaneously since they can fire in any order - const [integrity, size] = await Promise.all([ - events.once(opts.integrityEmitter, 'integrity').then(res => res[0]), - events.once(opts.integrityEmitter, 'size').then(res => res[0]), - new Pipeline(inputStream, outStream).promise(), - ]) - return { integrity, size } - } - - let integrity - let size - const hashStream = ssri.integrityStream({ - integrity: opts.integrity, - algorithms: opts.algorithms, - size: opts.size, - }) - hashStream.on('integrity', i => { - integrity = i - }) - hashStream.on('size', s => { - size = s - }) - - const pipeline = new Pipeline(inputStream, hashStream, outStream) - await pipeline.promise() - return { integrity, size } -} - -async function makeTmp (cache, opts) { - const tmpTarget = uniqueFilename(path.join(cache, 'tmp'), opts.tmpPrefix) - await fs.mkdir(path.dirname(tmpTarget), { recursive: true }) - return { - target: tmpTarget, - moved: false, - } -} - -async function moveToDestination (tmp, cache, sri) { - const destination = contentPath(cache, sri) - const destDir = path.dirname(destination) - if (moveOperations.has(destination)) { - return moveOperations.get(destination) - } - moveOperations.set( - destination, - fs.mkdir(destDir, { recursive: true }) - .then(async () => { - await moveFile(tmp.target, destination, { overwrite: false }) - tmp.moved = true - return tmp.moved - }) - .catch(err => { - if (!err.message.startsWith('The destination file exists')) { - throw Object.assign(err, { code: 'EEXIST' }) - } - }).finally(() => { - moveOperations.delete(destination) - }) - - ) - return moveOperations.get(destination) -} - -function sizeError (expected, found) { - /* eslint-disable-next-line max-len */ - const err = new Error(`Bad data size: expected inserted data to be ${expected} bytes, but got ${found} instead`) - err.expected = expected - err.found = found - err.code = 'EBADSIZE' - return err -} - -function checksumError (expected, found) { - const err = new Error(`Integrity check failed: - Wanted: ${expected} - Found: ${found}`) - err.code = 'EINTEGRITY' - err.expected = expected - err.found = found - return err -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/entry-index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/entry-index.js deleted file mode 100644 index 89c28f2f257d48..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/entry-index.js +++ /dev/null @@ -1,336 +0,0 @@ -'use strict' - -const crypto = require('crypto') -const { - appendFile, - mkdir, - readFile, - readdir, - rm, - writeFile, -} = require('fs/promises') -const { Minipass } = require('minipass') -const path = require('path') -const ssri = require('ssri') -const uniqueFilename = require('unique-filename') - -const contentPath = require('./content/path') -const hashToSegments = require('./util/hash-to-segments') -const indexV = require('../package.json')['cache-version'].index -const { moveFile } = require('@npmcli/fs') - -const pMap = require('p-map') -const lsStreamConcurrency = 5 - -module.exports.NotFoundError = class NotFoundError extends Error { - constructor (cache, key) { - super(`No cache entry for ${key} found in ${cache}`) - this.code = 'ENOENT' - this.cache = cache - this.key = key - } -} - -module.exports.compact = compact - -async function compact (cache, key, matchFn, opts = {}) { - const bucket = bucketPath(cache, key) - const entries = await bucketEntries(bucket) - const newEntries = [] - // we loop backwards because the bottom-most result is the newest - // since we add new entries with appendFile - for (let i = entries.length - 1; i >= 0; --i) { - const entry = entries[i] - // a null integrity could mean either a delete was appended - // or the user has simply stored an index that does not map - // to any content. we determine if the user wants to keep the - // null integrity based on the validateEntry function passed in options. - // if the integrity is null and no validateEntry is provided, we break - // as we consider the null integrity to be a deletion of everything - // that came before it. - if (entry.integrity === null && !opts.validateEntry) { - break - } - - // if this entry is valid, and it is either the first entry or - // the newEntries array doesn't already include an entry that - // matches this one based on the provided matchFn, then we add - // it to the beginning of our list - if ((!opts.validateEntry || opts.validateEntry(entry) === true) && - (newEntries.length === 0 || - !newEntries.find((oldEntry) => matchFn(oldEntry, entry)))) { - newEntries.unshift(entry) - } - } - - const newIndex = '\n' + newEntries.map((entry) => { - const stringified = JSON.stringify(entry) - const hash = hashEntry(stringified) - return `${hash}\t${stringified}` - }).join('\n') - - const setup = async () => { - const target = uniqueFilename(path.join(cache, 'tmp'), opts.tmpPrefix) - await mkdir(path.dirname(target), { recursive: true }) - return { - target, - moved: false, - } - } - - const teardown = async (tmp) => { - if (!tmp.moved) { - return rm(tmp.target, { recursive: true, force: true }) - } - } - - const write = async (tmp) => { - await writeFile(tmp.target, newIndex, { flag: 'wx' }) - await mkdir(path.dirname(bucket), { recursive: true }) - // we use @npmcli/move-file directly here because we - // want to overwrite the existing file - await moveFile(tmp.target, bucket) - tmp.moved = true - } - - // write the file atomically - const tmp = await setup() - try { - await write(tmp) - } finally { - await teardown(tmp) - } - - // we reverse the list we generated such that the newest - // entries come first in order to make looping through them easier - // the true passed to formatEntry tells it to keep null - // integrity values, if they made it this far it's because - // validateEntry returned true, and as such we should return it - return newEntries.reverse().map((entry) => formatEntry(cache, entry, true)) -} - -module.exports.insert = insert - -async function insert (cache, key, integrity, opts = {}) { - const { metadata, size, time } = opts - const bucket = bucketPath(cache, key) - const entry = { - key, - integrity: integrity && ssri.stringify(integrity), - time: time || Date.now(), - size, - metadata, - } - try { - await mkdir(path.dirname(bucket), { recursive: true }) - const stringified = JSON.stringify(entry) - // NOTE - Cleverness ahoy! - // - // This works because it's tremendously unlikely for an entry to corrupt - // another while still preserving the string length of the JSON in - // question. So, we just slap the length in there and verify it on read. - // - // Thanks to @isaacs for the whiteboarding session that ended up with - // this. - await appendFile(bucket, `\n${hashEntry(stringified)}\t${stringified}`) - } catch (err) { - if (err.code === 'ENOENT') { - return undefined - } - - throw err - } - return formatEntry(cache, entry) -} - -module.exports.find = find - -async function find (cache, key) { - const bucket = bucketPath(cache, key) - try { - const entries = await bucketEntries(bucket) - return entries.reduce((latest, next) => { - if (next && next.key === key) { - return formatEntry(cache, next) - } else { - return latest - } - }, null) - } catch (err) { - if (err.code === 'ENOENT') { - return null - } else { - throw err - } - } -} - -module.exports.delete = del - -function del (cache, key, opts = {}) { - if (!opts.removeFully) { - return insert(cache, key, null, opts) - } - - const bucket = bucketPath(cache, key) - return rm(bucket, { recursive: true, force: true }) -} - -module.exports.lsStream = lsStream - -function lsStream (cache) { - const indexDir = bucketDir(cache) - const stream = new Minipass({ objectMode: true }) - - // Set all this up to run on the stream and then just return the stream - Promise.resolve().then(async () => { - const buckets = await readdirOrEmpty(indexDir) - await pMap(buckets, async (bucket) => { - const bucketPath = path.join(indexDir, bucket) - const subbuckets = await readdirOrEmpty(bucketPath) - await pMap(subbuckets, async (subbucket) => { - const subbucketPath = path.join(bucketPath, subbucket) - - // "/cachename//./*" - const subbucketEntries = await readdirOrEmpty(subbucketPath) - await pMap(subbucketEntries, async (entry) => { - const entryPath = path.join(subbucketPath, entry) - try { - const entries = await bucketEntries(entryPath) - // using a Map here prevents duplicate keys from showing up - // twice, I guess? - const reduced = entries.reduce((acc, entry) => { - acc.set(entry.key, entry) - return acc - }, new Map()) - // reduced is a map of key => entry - for (const entry of reduced.values()) { - const formatted = formatEntry(cache, entry) - if (formatted) { - stream.write(formatted) - } - } - } catch (err) { - if (err.code === 'ENOENT') { - return undefined - } - throw err - } - }, - { concurrency: lsStreamConcurrency }) - }, - { concurrency: lsStreamConcurrency }) - }, - { concurrency: lsStreamConcurrency }) - stream.end() - return stream - }).catch(err => stream.emit('error', err)) - - return stream -} - -module.exports.ls = ls - -async function ls (cache) { - const entries = await lsStream(cache).collect() - return entries.reduce((acc, xs) => { - acc[xs.key] = xs - return acc - }, {}) -} - -module.exports.bucketEntries = bucketEntries - -async function bucketEntries (bucket, filter) { - const data = await readFile(bucket, 'utf8') - return _bucketEntries(data, filter) -} - -function _bucketEntries (data) { - const entries = [] - data.split('\n').forEach((entry) => { - if (!entry) { - return - } - - const pieces = entry.split('\t') - if (!pieces[1] || hashEntry(pieces[1]) !== pieces[0]) { - // Hash is no good! Corruption or malice? Doesn't matter! - // EJECT EJECT - return - } - let obj - try { - obj = JSON.parse(pieces[1]) - } catch (_) { - // eslint-ignore-next-line no-empty-block - } - // coverage disabled here, no need to test with an entry that parses to something falsey - // istanbul ignore else - if (obj) { - entries.push(obj) - } - }) - return entries -} - -module.exports.bucketDir = bucketDir - -function bucketDir (cache) { - return path.join(cache, `index-v${indexV}`) -} - -module.exports.bucketPath = bucketPath - -function bucketPath (cache, key) { - const hashed = hashKey(key) - return path.join.apply( - path, - [bucketDir(cache)].concat(hashToSegments(hashed)) - ) -} - -module.exports.hashKey = hashKey - -function hashKey (key) { - return hash(key, 'sha256') -} - -module.exports.hashEntry = hashEntry - -function hashEntry (str) { - return hash(str, 'sha1') -} - -function hash (str, digest) { - return crypto - .createHash(digest) - .update(str) - .digest('hex') -} - -function formatEntry (cache, entry, keepAll) { - // Treat null digests as deletions. They'll shadow any previous entries. - if (!entry.integrity && !keepAll) { - return null - } - - return { - key: entry.key, - integrity: entry.integrity, - path: entry.integrity ? contentPath(cache, entry.integrity) : undefined, - size: entry.size, - time: entry.time, - metadata: entry.metadata, - } -} - -function readdirOrEmpty (dir) { - return readdir(dir).catch((err) => { - if (err.code === 'ENOENT' || err.code === 'ENOTDIR') { - return [] - } - - throw err - }) -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/get.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/get.js deleted file mode 100644 index 80ec206c7ecaaa..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/get.js +++ /dev/null @@ -1,170 +0,0 @@ -'use strict' - -const Collect = require('minipass-collect') -const { Minipass } = require('minipass') -const Pipeline = require('minipass-pipeline') - -const index = require('./entry-index') -const memo = require('./memoization') -const read = require('./content/read') - -async function getData (cache, key, opts = {}) { - const { integrity, memoize, size } = opts - const memoized = memo.get(cache, key, opts) - if (memoized && memoize !== false) { - return { - metadata: memoized.entry.metadata, - data: memoized.data, - integrity: memoized.entry.integrity, - size: memoized.entry.size, - } - } - - const entry = await index.find(cache, key, opts) - if (!entry) { - throw new index.NotFoundError(cache, key) - } - const data = await read(cache, entry.integrity, { integrity, size }) - if (memoize) { - memo.put(cache, entry, data, opts) - } - - return { - data, - metadata: entry.metadata, - size: entry.size, - integrity: entry.integrity, - } -} -module.exports = getData - -async function getDataByDigest (cache, key, opts = {}) { - const { integrity, memoize, size } = opts - const memoized = memo.get.byDigest(cache, key, opts) - if (memoized && memoize !== false) { - return memoized - } - - const res = await read(cache, key, { integrity, size }) - if (memoize) { - memo.put.byDigest(cache, key, res, opts) - } - return res -} -module.exports.byDigest = getDataByDigest - -const getMemoizedStream = (memoized) => { - const stream = new Minipass() - stream.on('newListener', function (ev, cb) { - ev === 'metadata' && cb(memoized.entry.metadata) - ev === 'integrity' && cb(memoized.entry.integrity) - ev === 'size' && cb(memoized.entry.size) - }) - stream.end(memoized.data) - return stream -} - -function getStream (cache, key, opts = {}) { - const { memoize, size } = opts - const memoized = memo.get(cache, key, opts) - if (memoized && memoize !== false) { - return getMemoizedStream(memoized) - } - - const stream = new Pipeline() - // Set all this up to run on the stream and then just return the stream - Promise.resolve().then(async () => { - const entry = await index.find(cache, key) - if (!entry) { - throw new index.NotFoundError(cache, key) - } - - stream.emit('metadata', entry.metadata) - stream.emit('integrity', entry.integrity) - stream.emit('size', entry.size) - stream.on('newListener', function (ev, cb) { - ev === 'metadata' && cb(entry.metadata) - ev === 'integrity' && cb(entry.integrity) - ev === 'size' && cb(entry.size) - }) - - const src = read.readStream( - cache, - entry.integrity, - { ...opts, size: typeof size !== 'number' ? entry.size : size } - ) - - if (memoize) { - const memoStream = new Collect.PassThrough() - memoStream.on('collect', data => memo.put(cache, entry, data, opts)) - stream.unshift(memoStream) - } - stream.unshift(src) - return stream - }).catch((err) => stream.emit('error', err)) - - return stream -} - -module.exports.stream = getStream - -function getStreamDigest (cache, integrity, opts = {}) { - const { memoize } = opts - const memoized = memo.get.byDigest(cache, integrity, opts) - if (memoized && memoize !== false) { - const stream = new Minipass() - stream.end(memoized) - return stream - } else { - const stream = read.readStream(cache, integrity, opts) - if (!memoize) { - return stream - } - - const memoStream = new Collect.PassThrough() - memoStream.on('collect', data => memo.put.byDigest( - cache, - integrity, - data, - opts - )) - return new Pipeline(stream, memoStream) - } -} - -module.exports.stream.byDigest = getStreamDigest - -function info (cache, key, opts = {}) { - const { memoize } = opts - const memoized = memo.get(cache, key, opts) - if (memoized && memoize !== false) { - return Promise.resolve(memoized.entry) - } else { - return index.find(cache, key) - } -} -module.exports.info = info - -async function copy (cache, key, dest, opts = {}) { - const entry = await index.find(cache, key, opts) - if (!entry) { - throw new index.NotFoundError(cache, key) - } - await read.copy(cache, entry.integrity, dest, opts) - return { - metadata: entry.metadata, - size: entry.size, - integrity: entry.integrity, - } -} - -module.exports.copy = copy - -async function copyByDigest (cache, key, dest, opts = {}) { - await read.copy(cache, key, dest, opts) - return key -} - -module.exports.copy.byDigest = copyByDigest - -module.exports.hasContent = read.hasContent diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/index.js deleted file mode 100644 index c9b0da5f3a271b..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/index.js +++ /dev/null @@ -1,42 +0,0 @@ -'use strict' - -const get = require('./get.js') -const put = require('./put.js') -const rm = require('./rm.js') -const verify = require('./verify.js') -const { clearMemoized } = require('./memoization.js') -const tmp = require('./util/tmp.js') -const index = require('./entry-index.js') - -module.exports.index = {} -module.exports.index.compact = index.compact -module.exports.index.insert = index.insert - -module.exports.ls = index.ls -module.exports.ls.stream = index.lsStream - -module.exports.get = get -module.exports.get.byDigest = get.byDigest -module.exports.get.stream = get.stream -module.exports.get.stream.byDigest = get.stream.byDigest -module.exports.get.copy = get.copy -module.exports.get.copy.byDigest = get.copy.byDigest -module.exports.get.info = get.info -module.exports.get.hasContent = get.hasContent - -module.exports.put = put -module.exports.put.stream = put.stream - -module.exports.rm = rm.entry -module.exports.rm.all = rm.all -module.exports.rm.entry = module.exports.rm -module.exports.rm.content = rm.content - -module.exports.clearMemoized = clearMemoized - -module.exports.tmp = {} -module.exports.tmp.mkdir = tmp.mkdir -module.exports.tmp.withTmp = tmp.withTmp - -module.exports.verify = verify -module.exports.verify.lastRun = verify.lastRun diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/memoization.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/memoization.js deleted file mode 100644 index 2ecc60912e4563..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/memoization.js +++ /dev/null @@ -1,72 +0,0 @@ -'use strict' - -const { LRUCache } = require('lru-cache') - -const MEMOIZED = new LRUCache({ - max: 500, - maxSize: 50 * 1024 * 1024, // 50MB - ttl: 3 * 60 * 1000, // 3 minutes - sizeCalculation: (entry, key) => key.startsWith('key:') ? entry.data.length : entry.length, -}) - -module.exports.clearMemoized = clearMemoized - -function clearMemoized () { - const old = {} - MEMOIZED.forEach((v, k) => { - old[k] = v - }) - MEMOIZED.clear() - return old -} - -module.exports.put = put - -function put (cache, entry, data, opts) { - pickMem(opts).set(`key:${cache}:${entry.key}`, { entry, data }) - putDigest(cache, entry.integrity, data, opts) -} - -module.exports.put.byDigest = putDigest - -function putDigest (cache, integrity, data, opts) { - pickMem(opts).set(`digest:${cache}:${integrity}`, data) -} - -module.exports.get = get - -function get (cache, key, opts) { - return pickMem(opts).get(`key:${cache}:${key}`) -} - -module.exports.get.byDigest = getDigest - -function getDigest (cache, integrity, opts) { - return pickMem(opts).get(`digest:${cache}:${integrity}`) -} - -class ObjProxy { - constructor (obj) { - this.obj = obj - } - - get (key) { - return this.obj[key] - } - - set (key, val) { - this.obj[key] = val - } -} - -function pickMem (opts) { - if (!opts || !opts.memoize) { - return MEMOIZED - } else if (opts.memoize.get && opts.memoize.set) { - return opts.memoize - } else if (typeof opts.memoize === 'object') { - return new ObjProxy(opts.memoize) - } else { - return MEMOIZED - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/put.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/put.js deleted file mode 100644 index 9fc932d5f6dec5..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/put.js +++ /dev/null @@ -1,80 +0,0 @@ -'use strict' - -const index = require('./entry-index') -const memo = require('./memoization') -const write = require('./content/write') -const Flush = require('minipass-flush') -const { PassThrough } = require('minipass-collect') -const Pipeline = require('minipass-pipeline') - -const putOpts = (opts) => ({ - algorithms: ['sha512'], - ...opts, -}) - -module.exports = putData - -async function putData (cache, key, data, opts = {}) { - const { memoize } = opts - opts = putOpts(opts) - const res = await write(cache, data, opts) - const entry = await index.insert(cache, key, res.integrity, { ...opts, size: res.size }) - if (memoize) { - memo.put(cache, entry, data, opts) - } - - return res.integrity -} - -module.exports.stream = putStream - -function putStream (cache, key, opts = {}) { - const { memoize } = opts - opts = putOpts(opts) - let integrity - let size - let error - - let memoData - const pipeline = new Pipeline() - // first item in the pipeline is the memoizer, because we need - // that to end first and get the collected data. - if (memoize) { - const memoizer = new PassThrough().on('collect', data => { - memoData = data - }) - pipeline.push(memoizer) - } - - // contentStream is a write-only, not a passthrough - // no data comes out of it. - const contentStream = write.stream(cache, opts) - .on('integrity', (int) => { - integrity = int - }) - .on('size', (s) => { - size = s - }) - .on('error', (err) => { - error = err - }) - - pipeline.push(contentStream) - - // last but not least, we write the index and emit hash and size, - // and memoize if we're doing that - pipeline.push(new Flush({ - async flush () { - if (!error) { - const entry = await index.insert(cache, key, integrity, { ...opts, size }) - if (memoize && memoData) { - memo.put(cache, entry, memoData, opts) - } - pipeline.emit('integrity', integrity) - pipeline.emit('size', size) - } - }, - })) - - return pipeline -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/rm.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/rm.js deleted file mode 100644 index a94760c7cf2430..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/rm.js +++ /dev/null @@ -1,31 +0,0 @@ -'use strict' - -const { rm } = require('fs/promises') -const glob = require('./util/glob.js') -const index = require('./entry-index') -const memo = require('./memoization') -const path = require('path') -const rmContent = require('./content/rm') - -module.exports = entry -module.exports.entry = entry - -function entry (cache, key, opts) { - memo.clearMemoized() - return index.delete(cache, key, opts) -} - -module.exports.content = content - -function content (cache, integrity) { - memo.clearMemoized() - return rmContent(cache, integrity) -} - -module.exports.all = all - -async function all (cache) { - memo.clearMemoized() - const paths = await glob(path.join(cache, '*(content-*|index-*)'), { silent: true, nosort: true }) - return Promise.all(paths.map((p) => rm(p, { recursive: true, force: true }))) -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/glob.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/glob.js deleted file mode 100644 index 8500c1c16a429f..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/glob.js +++ /dev/null @@ -1,7 +0,0 @@ -'use strict' - -const { glob } = require('glob') -const path = require('path') - -const globify = (pattern) => pattern.split(path.win32.sep).join(path.posix.sep) -module.exports = (path, options) => glob(globify(path), options) diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/hash-to-segments.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/hash-to-segments.js deleted file mode 100644 index 445599b5038088..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/hash-to-segments.js +++ /dev/null @@ -1,7 +0,0 @@ -'use strict' - -module.exports = hashToSegments - -function hashToSegments (hash) { - return [hash.slice(0, 2), hash.slice(2, 4), hash.slice(4)] -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/tmp.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/tmp.js deleted file mode 100644 index 0bf5302136ebeb..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/util/tmp.js +++ /dev/null @@ -1,26 +0,0 @@ -'use strict' - -const { withTempDir } = require('@npmcli/fs') -const fs = require('fs/promises') -const path = require('path') - -module.exports.mkdir = mktmpdir - -async function mktmpdir (cache, opts = {}) { - const { tmpPrefix } = opts - const tmpDir = path.join(cache, 'tmp') - await fs.mkdir(tmpDir, { recursive: true, owner: 'inherit' }) - // do not use path.join(), it drops the trailing / if tmpPrefix is unset - const target = `${tmpDir}${path.sep}${tmpPrefix || ''}` - return fs.mkdtemp(target, { owner: 'inherit' }) -} - -module.exports.withTmp = withTmp - -function withTmp (cache, opts, cb) { - if (!cb) { - cb = opts - opts = {} - } - return withTempDir(path.join(cache, 'tmp'), cb, opts) -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/verify.js b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/verify.js deleted file mode 100644 index d7423da1295b68..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/lib/verify.js +++ /dev/null @@ -1,257 +0,0 @@ -'use strict' - -const { - mkdir, - readFile, - rm, - stat, - truncate, - writeFile, -} = require('fs/promises') -const pMap = require('p-map') -const contentPath = require('./content/path') -const fsm = require('fs-minipass') -const glob = require('./util/glob.js') -const index = require('./entry-index') -const path = require('path') -const ssri = require('ssri') - -const hasOwnProperty = (obj, key) => - Object.prototype.hasOwnProperty.call(obj, key) - -const verifyOpts = (opts) => ({ - concurrency: 20, - log: { silly () {} }, - ...opts, -}) - -module.exports = verify - -async function verify (cache, opts) { - opts = verifyOpts(opts) - opts.log.silly('verify', 'verifying cache at', cache) - - const steps = [ - markStartTime, - fixPerms, - garbageCollect, - rebuildIndex, - cleanTmp, - writeVerifile, - markEndTime, - ] - - const stats = {} - for (const step of steps) { - const label = step.name - const start = new Date() - const s = await step(cache, opts) - if (s) { - Object.keys(s).forEach((k) => { - stats[k] = s[k] - }) - } - const end = new Date() - if (!stats.runTime) { - stats.runTime = {} - } - stats.runTime[label] = end - start - } - stats.runTime.total = stats.endTime - stats.startTime - opts.log.silly( - 'verify', - 'verification finished for', - cache, - 'in', - `${stats.runTime.total}ms` - ) - return stats -} - -async function markStartTime () { - return { startTime: new Date() } -} - -async function markEndTime () { - return { endTime: new Date() } -} - -async function fixPerms (cache, opts) { - opts.log.silly('verify', 'fixing cache permissions') - await mkdir(cache, { recursive: true }) - return null -} - -// Implements a naive mark-and-sweep tracing garbage collector. -// -// The algorithm is basically as follows: -// 1. Read (and filter) all index entries ("pointers") -// 2. Mark each integrity value as "live" -// 3. Read entire filesystem tree in `content-vX/` dir -// 4. If content is live, verify its checksum and delete it if it fails -// 5. If content is not marked as live, rm it. -// -async function garbageCollect (cache, opts) { - opts.log.silly('verify', 'garbage collecting content') - const indexStream = index.lsStream(cache) - const liveContent = new Set() - indexStream.on('data', (entry) => { - if (opts.filter && !opts.filter(entry)) { - return - } - - // integrity is stringified, re-parse it so we can get each hash - const integrity = ssri.parse(entry.integrity) - for (const algo in integrity) { - liveContent.add(integrity[algo].toString()) - } - }) - await new Promise((resolve, reject) => { - indexStream.on('end', resolve).on('error', reject) - }) - const contentDir = contentPath.contentDir(cache) - const files = await glob(path.join(contentDir, '**'), { - follow: false, - nodir: true, - nosort: true, - }) - const stats = { - verifiedContent: 0, - reclaimedCount: 0, - reclaimedSize: 0, - badContentCount: 0, - keptSize: 0, - } - await pMap( - files, - async (f) => { - const split = f.split(/[/\\]/) - const digest = split.slice(split.length - 3).join('') - const algo = split[split.length - 4] - const integrity = ssri.fromHex(digest, algo) - if (liveContent.has(integrity.toString())) { - const info = await verifyContent(f, integrity) - if (!info.valid) { - stats.reclaimedCount++ - stats.badContentCount++ - stats.reclaimedSize += info.size - } else { - stats.verifiedContent++ - stats.keptSize += info.size - } - } else { - // No entries refer to this content. We can delete. - stats.reclaimedCount++ - const s = await stat(f) - await rm(f, { recursive: true, force: true }) - stats.reclaimedSize += s.size - } - return stats - }, - { concurrency: opts.concurrency } - ) - return stats -} - -async function verifyContent (filepath, sri) { - const contentInfo = {} - try { - const { size } = await stat(filepath) - contentInfo.size = size - contentInfo.valid = true - await ssri.checkStream(new fsm.ReadStream(filepath), sri) - } catch (err) { - if (err.code === 'ENOENT') { - return { size: 0, valid: false } - } - if (err.code !== 'EINTEGRITY') { - throw err - } - - await rm(filepath, { recursive: true, force: true }) - contentInfo.valid = false - } - return contentInfo -} - -async function rebuildIndex (cache, opts) { - opts.log.silly('verify', 'rebuilding index') - const entries = await index.ls(cache) - const stats = { - missingContent: 0, - rejectedEntries: 0, - totalEntries: 0, - } - const buckets = {} - for (const k in entries) { - /* istanbul ignore else */ - if (hasOwnProperty(entries, k)) { - const hashed = index.hashKey(k) - const entry = entries[k] - const excluded = opts.filter && !opts.filter(entry) - excluded && stats.rejectedEntries++ - if (buckets[hashed] && !excluded) { - buckets[hashed].push(entry) - } else if (buckets[hashed] && excluded) { - // skip - } else if (excluded) { - buckets[hashed] = [] - buckets[hashed]._path = index.bucketPath(cache, k) - } else { - buckets[hashed] = [entry] - buckets[hashed]._path = index.bucketPath(cache, k) - } - } - } - await pMap( - Object.keys(buckets), - (key) => { - return rebuildBucket(cache, buckets[key], stats, opts) - }, - { concurrency: opts.concurrency } - ) - return stats -} - -async function rebuildBucket (cache, bucket, stats) { - await truncate(bucket._path) - // This needs to be serialized because cacache explicitly - // lets very racy bucket conflicts clobber each other. - for (const entry of bucket) { - const content = contentPath(cache, entry.integrity) - try { - await stat(content) - await index.insert(cache, entry.key, entry.integrity, { - metadata: entry.metadata, - size: entry.size, - time: entry.time, - }) - stats.totalEntries++ - } catch (err) { - if (err.code === 'ENOENT') { - stats.rejectedEntries++ - stats.missingContent++ - } else { - throw err - } - } - } -} - -function cleanTmp (cache, opts) { - opts.log.silly('verify', 'cleaning tmp directory') - return rm(path.join(cache, 'tmp'), { recursive: true, force: true }) -} - -async function writeVerifile (cache, opts) { - const verifile = path.join(cache, '_lastverified') - opts.log.silly('verify', 'writing verifile to ' + verifile) - return writeFile(verifile, `${Date.now()}`) -} - -module.exports.lastRun = lastRun - -async function lastRun (cache) { - const data = await readFile(path.join(cache, '_lastverified'), { encoding: 'utf8' }) - return new Date(+data) -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/package.json b/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/package.json deleted file mode 100644 index 6e6219158ed759..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/cacache/package.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "name": "cacache", - "version": "18.0.4", - "cache-version": { - "content": "2", - "index": "5" - }, - "description": "Fast, fault-tolerant, cross-platform, disk-based, data-agnostic, content-addressable cache.", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "scripts": { - "test": "tap", - "snap": "tap", - "coverage": "tap", - "test-docker": "docker run -it --rm --name pacotest -v \"$PWD\":/tmp -w /tmp node:latest npm test", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "npmclilint": "npmcli-lint", - "lintfix": "npm run lint -- --fix", - "postsnap": "npm run lintfix --", - "postlint": "template-oss-check", - "posttest": "npm run lint", - "template-oss-apply": "template-oss-apply --force" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/cacache.git" - }, - "keywords": [ - "cache", - "caching", - "content-addressable", - "sri", - "sri hash", - "subresource integrity", - "cache", - "storage", - "store", - "file store", - "filesystem", - "disk cache", - "disk storage" - ], - "license": "ISC", - "dependencies": { - "@npmcli/fs": "^3.1.0", - "fs-minipass": "^3.0.0", - "glob": "^10.2.2", - "lru-cache": "^10.0.1", - "minipass": "^7.0.3", - "minipass-collect": "^2.0.1", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "p-map": "^4.0.0", - "ssri": "^10.0.0", - "tar": "^6.1.11", - "unique-filename": "^3.0.0" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "tap": "^16.0.0" - }, - "engines": { - "node": "^16.14.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "windowsCI": false, - "version": "4.22.0", - "publish": "true" - }, - "author": "GitHub Inc.", - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/LICENSE b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/LICENSE deleted file mode 100644 index 1808eb2844231c..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/LICENSE +++ /dev/null @@ -1,16 +0,0 @@ -ISC License - -Copyright 2017-2022 (c) npm, Inc. - -Permission to use, copy, modify, and/or distribute this software for -any purpose with or without fee is hereby granted, provided that the -above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE COPYRIGHT HOLDER DISCLAIMS -ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR -CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/entry.js b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/entry.js deleted file mode 100644 index bfcfacbcc95e18..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/entry.js +++ /dev/null @@ -1,471 +0,0 @@ -const { Request, Response } = require('minipass-fetch') -const { Minipass } = require('minipass') -const MinipassFlush = require('minipass-flush') -const cacache = require('cacache') -const url = require('url') - -const CachingMinipassPipeline = require('../pipeline.js') -const CachePolicy = require('./policy.js') -const cacheKey = require('./key.js') -const remote = require('../remote.js') - -const hasOwnProperty = (obj, prop) => Object.prototype.hasOwnProperty.call(obj, prop) - -// allow list for request headers that will be written to the cache index -// note: we will also store any request headers -// that are named in a response's vary header -const KEEP_REQUEST_HEADERS = [ - 'accept-charset', - 'accept-encoding', - 'accept-language', - 'accept', - 'cache-control', -] - -// allow list for response headers that will be written to the cache index -// note: we must not store the real response's age header, or when we load -// a cache policy based on the metadata it will think the cached response -// is always stale -const KEEP_RESPONSE_HEADERS = [ - 'cache-control', - 'content-encoding', - 'content-language', - 'content-type', - 'date', - 'etag', - 'expires', - 'last-modified', - 'link', - 'location', - 'pragma', - 'vary', -] - -// return an object containing all metadata to be written to the index -const getMetadata = (request, response, options) => { - const metadata = { - time: Date.now(), - url: request.url, - reqHeaders: {}, - resHeaders: {}, - - // options on which we must match the request and vary the response - options: { - compress: options.compress != null ? options.compress : request.compress, - }, - } - - // only save the status if it's not a 200 or 304 - if (response.status !== 200 && response.status !== 304) { - metadata.status = response.status - } - - for (const name of KEEP_REQUEST_HEADERS) { - if (request.headers.has(name)) { - metadata.reqHeaders[name] = request.headers.get(name) - } - } - - // if the request's host header differs from the host in the url - // we need to keep it, otherwise it's just noise and we ignore it - const host = request.headers.get('host') - const parsedUrl = new url.URL(request.url) - if (host && parsedUrl.host !== host) { - metadata.reqHeaders.host = host - } - - // if the response has a vary header, make sure - // we store the relevant request headers too - if (response.headers.has('vary')) { - const vary = response.headers.get('vary') - // a vary of "*" means every header causes a different response. - // in that scenario, we do not include any additional headers - // as the freshness check will always fail anyway and we don't - // want to bloat the cache indexes - if (vary !== '*') { - // copy any other request headers that will vary the response - const varyHeaders = vary.trim().toLowerCase().split(/\s*,\s*/) - for (const name of varyHeaders) { - if (request.headers.has(name)) { - metadata.reqHeaders[name] = request.headers.get(name) - } - } - } - } - - for (const name of KEEP_RESPONSE_HEADERS) { - if (response.headers.has(name)) { - metadata.resHeaders[name] = response.headers.get(name) - } - } - - for (const name of options.cacheAdditionalHeaders) { - if (response.headers.has(name)) { - metadata.resHeaders[name] = response.headers.get(name) - } - } - - return metadata -} - -// symbols used to hide objects that may be lazily evaluated in a getter -const _request = Symbol('request') -const _response = Symbol('response') -const _policy = Symbol('policy') - -class CacheEntry { - constructor ({ entry, request, response, options }) { - if (entry) { - this.key = entry.key - this.entry = entry - // previous versions of this module didn't write an explicit timestamp in - // the metadata, so fall back to the entry's timestamp. we can't use the - // entry timestamp to determine staleness because cacache will update it - // when it verifies its data - this.entry.metadata.time = this.entry.metadata.time || this.entry.time - } else { - this.key = cacheKey(request) - } - - this.options = options - - // these properties are behind getters that lazily evaluate - this[_request] = request - this[_response] = response - this[_policy] = null - } - - // returns a CacheEntry instance that satisfies the given request - // or undefined if no existing entry satisfies - static async find (request, options) { - try { - // compacts the index and returns an array of unique entries - var matches = await cacache.index.compact(options.cachePath, cacheKey(request), (A, B) => { - const entryA = new CacheEntry({ entry: A, options }) - const entryB = new CacheEntry({ entry: B, options }) - return entryA.policy.satisfies(entryB.request) - }, { - validateEntry: (entry) => { - // clean out entries with a buggy content-encoding value - if (entry.metadata && - entry.metadata.resHeaders && - entry.metadata.resHeaders['content-encoding'] === null) { - return false - } - - // if an integrity is null, it needs to have a status specified - if (entry.integrity === null) { - return !!(entry.metadata && entry.metadata.status) - } - - return true - }, - }) - } catch (err) { - // if the compact request fails, ignore the error and return - return - } - - // a cache mode of 'reload' means to behave as though we have no cache - // on the way to the network. return undefined to allow cacheFetch to - // create a brand new request no matter what. - if (options.cache === 'reload') { - return - } - - // find the specific entry that satisfies the request - let match - for (const entry of matches) { - const _entry = new CacheEntry({ - entry, - options, - }) - - if (_entry.policy.satisfies(request)) { - match = _entry - break - } - } - - return match - } - - // if the user made a PUT/POST/PATCH then we invalidate our - // cache for the same url by deleting the index entirely - static async invalidate (request, options) { - const key = cacheKey(request) - try { - await cacache.rm.entry(options.cachePath, key, { removeFully: true }) - } catch (err) { - // ignore errors - } - } - - get request () { - if (!this[_request]) { - this[_request] = new Request(this.entry.metadata.url, { - method: 'GET', - headers: this.entry.metadata.reqHeaders, - ...this.entry.metadata.options, - }) - } - - return this[_request] - } - - get response () { - if (!this[_response]) { - this[_response] = new Response(null, { - url: this.entry.metadata.url, - counter: this.options.counter, - status: this.entry.metadata.status || 200, - headers: { - ...this.entry.metadata.resHeaders, - 'content-length': this.entry.size, - }, - }) - } - - return this[_response] - } - - get policy () { - if (!this[_policy]) { - this[_policy] = new CachePolicy({ - entry: this.entry, - request: this.request, - response: this.response, - options: this.options, - }) - } - - return this[_policy] - } - - // wraps the response in a pipeline that stores the data - // in the cache while the user consumes it - async store (status) { - // if we got a status other than 200, 301, or 308, - // or the CachePolicy forbid storage, append the - // cache status header and return it untouched - if ( - this.request.method !== 'GET' || - ![200, 301, 308].includes(this.response.status) || - !this.policy.storable() - ) { - this.response.headers.set('x-local-cache-status', 'skip') - return this.response - } - - const size = this.response.headers.get('content-length') - const cacheOpts = { - algorithms: this.options.algorithms, - metadata: getMetadata(this.request, this.response, this.options), - size, - integrity: this.options.integrity, - integrityEmitter: this.response.body.hasIntegrityEmitter && this.response.body, - } - - let body = null - // we only set a body if the status is a 200, redirects are - // stored as metadata only - if (this.response.status === 200) { - let cacheWriteResolve, cacheWriteReject - const cacheWritePromise = new Promise((resolve, reject) => { - cacheWriteResolve = resolve - cacheWriteReject = reject - }).catch((err) => { - body.emit('error', err) - }) - - body = new CachingMinipassPipeline({ events: ['integrity', 'size'] }, new MinipassFlush({ - flush () { - return cacheWritePromise - }, - })) - // this is always true since if we aren't reusing the one from the remote fetch, we - // are using the one from cacache - body.hasIntegrityEmitter = true - - const onResume = () => { - const tee = new Minipass() - const cacheStream = cacache.put.stream(this.options.cachePath, this.key, cacheOpts) - // re-emit the integrity and size events on our new response body so they can be reused - cacheStream.on('integrity', i => body.emit('integrity', i)) - cacheStream.on('size', s => body.emit('size', s)) - // stick a flag on here so downstream users will know if they can expect integrity events - tee.pipe(cacheStream) - // TODO if the cache write fails, log a warning but return the response anyway - // eslint-disable-next-line promise/catch-or-return - cacheStream.promise().then(cacheWriteResolve, cacheWriteReject) - body.unshift(tee) - body.unshift(this.response.body) - } - - body.once('resume', onResume) - body.once('end', () => body.removeListener('resume', onResume)) - } else { - await cacache.index.insert(this.options.cachePath, this.key, null, cacheOpts) - } - - // note: we do not set the x-local-cache-hash header because we do not know - // the hash value until after the write to the cache completes, which doesn't - // happen until after the response has been sent and it's too late to write - // the header anyway - this.response.headers.set('x-local-cache', encodeURIComponent(this.options.cachePath)) - this.response.headers.set('x-local-cache-key', encodeURIComponent(this.key)) - this.response.headers.set('x-local-cache-mode', 'stream') - this.response.headers.set('x-local-cache-status', status) - this.response.headers.set('x-local-cache-time', new Date().toISOString()) - const newResponse = new Response(body, { - url: this.response.url, - status: this.response.status, - headers: this.response.headers, - counter: this.options.counter, - }) - return newResponse - } - - // use the cached data to create a response and return it - async respond (method, options, status) { - let response - if (method === 'HEAD' || [301, 308].includes(this.response.status)) { - // if the request is a HEAD, or the response is a redirect, - // then the metadata in the entry already includes everything - // we need to build a response - response = this.response - } else { - // we're responding with a full cached response, so create a body - // that reads from cacache and attach it to a new Response - const body = new Minipass() - const headers = { ...this.policy.responseHeaders() } - - const onResume = () => { - const cacheStream = cacache.get.stream.byDigest( - this.options.cachePath, this.entry.integrity, { memoize: this.options.memoize } - ) - cacheStream.on('error', async (err) => { - cacheStream.pause() - if (err.code === 'EINTEGRITY') { - await cacache.rm.content( - this.options.cachePath, this.entry.integrity, { memoize: this.options.memoize } - ) - } - if (err.code === 'ENOENT' || err.code === 'EINTEGRITY') { - await CacheEntry.invalidate(this.request, this.options) - } - body.emit('error', err) - cacheStream.resume() - }) - // emit the integrity and size events based on our metadata so we're consistent - body.emit('integrity', this.entry.integrity) - body.emit('size', Number(headers['content-length'])) - cacheStream.pipe(body) - } - - body.once('resume', onResume) - body.once('end', () => body.removeListener('resume', onResume)) - response = new Response(body, { - url: this.entry.metadata.url, - counter: options.counter, - status: 200, - headers, - }) - } - - response.headers.set('x-local-cache', encodeURIComponent(this.options.cachePath)) - response.headers.set('x-local-cache-hash', encodeURIComponent(this.entry.integrity)) - response.headers.set('x-local-cache-key', encodeURIComponent(this.key)) - response.headers.set('x-local-cache-mode', 'stream') - response.headers.set('x-local-cache-status', status) - response.headers.set('x-local-cache-time', new Date(this.entry.metadata.time).toUTCString()) - return response - } - - // use the provided request along with this cache entry to - // revalidate the stored response. returns a response, either - // from the cache or from the update - async revalidate (request, options) { - const revalidateRequest = new Request(request, { - headers: this.policy.revalidationHeaders(request), - }) - - try { - // NOTE: be sure to remove the headers property from the - // user supplied options, since we have already defined - // them on the new request object. if they're still in the - // options then those will overwrite the ones from the policy - var response = await remote(revalidateRequest, { - ...options, - headers: undefined, - }) - } catch (err) { - // if the network fetch fails, return the stale - // cached response unless it has a cache-control - // of 'must-revalidate' - if (!this.policy.mustRevalidate) { - return this.respond(request.method, options, 'stale') - } - - throw err - } - - if (this.policy.revalidated(revalidateRequest, response)) { - // we got a 304, write a new index to the cache and respond from cache - const metadata = getMetadata(request, response, options) - // 304 responses do not include headers that are specific to the response data - // since they do not include a body, so we copy values for headers that were - // in the old cache entry to the new one, if the new metadata does not already - // include that header - for (const name of KEEP_RESPONSE_HEADERS) { - if ( - !hasOwnProperty(metadata.resHeaders, name) && - hasOwnProperty(this.entry.metadata.resHeaders, name) - ) { - metadata.resHeaders[name] = this.entry.metadata.resHeaders[name] - } - } - - for (const name of options.cacheAdditionalHeaders) { - const inMeta = hasOwnProperty(metadata.resHeaders, name) - const inEntry = hasOwnProperty(this.entry.metadata.resHeaders, name) - const inPolicy = hasOwnProperty(this.policy.response.headers, name) - - // if the header is in the existing entry, but it is not in the metadata - // then we need to write it to the metadata as this will refresh the on-disk cache - if (!inMeta && inEntry) { - metadata.resHeaders[name] = this.entry.metadata.resHeaders[name] - } - // if the header is in the metadata, but not in the policy, then we need to set - // it in the policy so that it's included in the immediate response. future - // responses will load a new cache entry, so we don't need to change that - if (!inPolicy && inMeta) { - this.policy.response.headers[name] = metadata.resHeaders[name] - } - } - - try { - await cacache.index.insert(options.cachePath, this.key, this.entry.integrity, { - size: this.entry.size, - metadata, - }) - } catch (err) { - // if updating the cache index fails, we ignore it and - // respond anyway - } - return this.respond(request.method, options, 'revalidated') - } - - // if we got a modified response, create a new entry based on it - const newEntry = new CacheEntry({ - request, - response, - options, - }) - - // respond with the new entry while writing it to the cache - return newEntry.store('updated') - } -} - -module.exports = CacheEntry diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/errors.js b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/errors.js deleted file mode 100644 index 67a66573bebe66..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/errors.js +++ /dev/null @@ -1,11 +0,0 @@ -class NotCachedError extends Error { - constructor (url) { - /* eslint-disable-next-line max-len */ - super(`request to ${url} failed: cache mode is 'only-if-cached' but no cached response is available.`) - this.code = 'ENOTCACHED' - } -} - -module.exports = { - NotCachedError, -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/index.js deleted file mode 100644 index 0de49d23fb9336..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/index.js +++ /dev/null @@ -1,49 +0,0 @@ -const { NotCachedError } = require('./errors.js') -const CacheEntry = require('./entry.js') -const remote = require('../remote.js') - -// do whatever is necessary to get a Response and return it -const cacheFetch = async (request, options) => { - // try to find a cached entry that satisfies this request - const entry = await CacheEntry.find(request, options) - if (!entry) { - // no cached result, if the cache mode is 'only-if-cached' that's a failure - if (options.cache === 'only-if-cached') { - throw new NotCachedError(request.url) - } - - // otherwise, we make a request, store it and return it - const response = await remote(request, options) - const newEntry = new CacheEntry({ request, response, options }) - return newEntry.store('miss') - } - - // we have a cached response that satisfies this request, however if the cache - // mode is 'no-cache' then we send the revalidation request no matter what - if (options.cache === 'no-cache') { - return entry.revalidate(request, options) - } - - // if the cached entry is not stale, or if the cache mode is 'force-cache' or - // 'only-if-cached' we can respond with the cached entry. set the status - // based on the result of needsRevalidation and respond - const _needsRevalidation = entry.policy.needsRevalidation(request) - if (options.cache === 'force-cache' || - options.cache === 'only-if-cached' || - !_needsRevalidation) { - return entry.respond(request.method, options, _needsRevalidation ? 'stale' : 'hit') - } - - // if we got here, the cache entry is stale so revalidate it - return entry.revalidate(request, options) -} - -cacheFetch.invalidate = async (request, options) => { - if (!options.cachePath) { - return - } - - return CacheEntry.invalidate(request, options) -} - -module.exports = cacheFetch diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/key.js b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/key.js deleted file mode 100644 index f7684d562b7fae..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/key.js +++ /dev/null @@ -1,17 +0,0 @@ -const { URL, format } = require('url') - -// options passed to url.format() when generating a key -const formatOptions = { - auth: false, - fragment: false, - search: true, - unicode: false, -} - -// returns a string to be used as the cache key for the Request -const cacheKey = (request) => { - const parsed = new URL(request.url) - return `make-fetch-happen:request-cache:${format(parsed, formatOptions)}` -} - -module.exports = cacheKey diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/policy.js b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/policy.js deleted file mode 100644 index ada3c8600dae92..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/cache/policy.js +++ /dev/null @@ -1,161 +0,0 @@ -const CacheSemantics = require('http-cache-semantics') -const Negotiator = require('negotiator') -const ssri = require('ssri') - -// options passed to http-cache-semantics constructor -const policyOptions = { - shared: false, - ignoreCargoCult: true, -} - -// a fake empty response, used when only testing the -// request for storability -const emptyResponse = { status: 200, headers: {} } - -// returns a plain object representation of the Request -const requestObject = (request) => { - const _obj = { - method: request.method, - url: request.url, - headers: {}, - compress: request.compress, - } - - request.headers.forEach((value, key) => { - _obj.headers[key] = value - }) - - return _obj -} - -// returns a plain object representation of the Response -const responseObject = (response) => { - const _obj = { - status: response.status, - headers: {}, - } - - response.headers.forEach((value, key) => { - _obj.headers[key] = value - }) - - return _obj -} - -class CachePolicy { - constructor ({ entry, request, response, options }) { - this.entry = entry - this.request = requestObject(request) - this.response = responseObject(response) - this.options = options - this.policy = new CacheSemantics(this.request, this.response, policyOptions) - - if (this.entry) { - // if we have an entry, copy the timestamp to the _responseTime - // this is necessary because the CacheSemantics constructor forces - // the value to Date.now() which means a policy created from a - // cache entry is likely to always identify itself as stale - this.policy._responseTime = this.entry.metadata.time - } - } - - // static method to quickly determine if a request alone is storable - static storable (request, options) { - // no cachePath means no caching - if (!options.cachePath) { - return false - } - - // user explicitly asked not to cache - if (options.cache === 'no-store') { - return false - } - - // we only cache GET and HEAD requests - if (!['GET', 'HEAD'].includes(request.method)) { - return false - } - - // otherwise, let http-cache-semantics make the decision - // based on the request's headers - const policy = new CacheSemantics(requestObject(request), emptyResponse, policyOptions) - return policy.storable() - } - - // returns true if the policy satisfies the request - satisfies (request) { - const _req = requestObject(request) - if (this.request.headers.host !== _req.headers.host) { - return false - } - - if (this.request.compress !== _req.compress) { - return false - } - - const negotiatorA = new Negotiator(this.request) - const negotiatorB = new Negotiator(_req) - - if (JSON.stringify(negotiatorA.mediaTypes()) !== JSON.stringify(negotiatorB.mediaTypes())) { - return false - } - - if (JSON.stringify(negotiatorA.languages()) !== JSON.stringify(negotiatorB.languages())) { - return false - } - - if (JSON.stringify(negotiatorA.encodings()) !== JSON.stringify(negotiatorB.encodings())) { - return false - } - - if (this.options.integrity) { - return ssri.parse(this.options.integrity).match(this.entry.integrity) - } - - return true - } - - // returns true if the request and response allow caching - storable () { - return this.policy.storable() - } - - // NOTE: this is a hack to avoid parsing the cache-control - // header ourselves, it returns true if the response's - // cache-control contains must-revalidate - get mustRevalidate () { - return !!this.policy._rescc['must-revalidate'] - } - - // returns true if the cached response requires revalidation - // for the given request - needsRevalidation (request) { - const _req = requestObject(request) - // force method to GET because we only cache GETs - // but can serve a HEAD from a cached GET - _req.method = 'GET' - return !this.policy.satisfiesWithoutRevalidation(_req) - } - - responseHeaders () { - return this.policy.responseHeaders() - } - - // returns a new object containing the appropriate headers - // to send a revalidation request - revalidationHeaders (request) { - const _req = requestObject(request) - return this.policy.revalidationHeaders(_req) - } - - // returns true if the request/response was revalidated - // successfully. returns false if a new response was received - revalidated (request, response) { - const _req = requestObject(request) - const _res = responseObject(response) - const policy = this.policy.revalidatedPolicy(_req, _res) - return !policy.modified - } -} - -module.exports = CachePolicy diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/fetch.js b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/fetch.js deleted file mode 100644 index 233ba67e165502..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/fetch.js +++ /dev/null @@ -1,118 +0,0 @@ -'use strict' - -const { FetchError, Request, isRedirect } = require('minipass-fetch') -const url = require('url') - -const CachePolicy = require('./cache/policy.js') -const cache = require('./cache/index.js') -const remote = require('./remote.js') - -// given a Request, a Response and user options -// return true if the response is a redirect that -// can be followed. we throw errors that will result -// in the fetch being rejected if the redirect is -// possible but invalid for some reason -const canFollowRedirect = (request, response, options) => { - if (!isRedirect(response.status)) { - return false - } - - if (options.redirect === 'manual') { - return false - } - - if (options.redirect === 'error') { - throw new FetchError(`redirect mode is set to error: ${request.url}`, - 'no-redirect', { code: 'ENOREDIRECT' }) - } - - if (!response.headers.has('location')) { - throw new FetchError(`redirect location header missing for: ${request.url}`, - 'no-location', { code: 'EINVALIDREDIRECT' }) - } - - if (request.counter >= request.follow) { - throw new FetchError(`maximum redirect reached at: ${request.url}`, - 'max-redirect', { code: 'EMAXREDIRECT' }) - } - - return true -} - -// given a Request, a Response, and the user's options return an object -// with a new Request and a new options object that will be used for -// following the redirect -const getRedirect = (request, response, options) => { - const _opts = { ...options } - const location = response.headers.get('location') - const redirectUrl = new url.URL(location, /^https?:/.test(location) ? undefined : request.url) - // Comment below is used under the following license: - /** - * @license - * Copyright (c) 2010-2012 Mikeal Rogers - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS - * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language - * governing permissions and limitations under the License. - */ - - // Remove authorization if changing hostnames (but not if just - // changing ports or protocols). This matches the behavior of request: - // https://github.com/request/request/blob/b12a6245/lib/redirect.js#L134-L138 - if (new url.URL(request.url).hostname !== redirectUrl.hostname) { - request.headers.delete('authorization') - request.headers.delete('cookie') - } - - // for POST request with 301/302 response, or any request with 303 response, - // use GET when following redirect - if ( - response.status === 303 || - (request.method === 'POST' && [301, 302].includes(response.status)) - ) { - _opts.method = 'GET' - _opts.body = null - request.headers.delete('content-length') - } - - _opts.headers = {} - request.headers.forEach((value, key) => { - _opts.headers[key] = value - }) - - _opts.counter = ++request.counter - const redirectReq = new Request(url.format(redirectUrl), _opts) - return { - request: redirectReq, - options: _opts, - } -} - -const fetch = async (request, options) => { - const response = CachePolicy.storable(request, options) - ? await cache(request, options) - : await remote(request, options) - - // if the request wasn't a GET or HEAD, and the response - // status is between 200 and 399 inclusive, invalidate the - // request url - if (!['GET', 'HEAD'].includes(request.method) && - response.status >= 200 && - response.status <= 399) { - await cache.invalidate(request, options) - } - - if (!canFollowRedirect(request, response, options)) { - return response - } - - const redirect = getRedirect(request, response, options) - return fetch(redirect.request, redirect.options) -} - -module.exports = fetch diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/index.js deleted file mode 100644 index 2f12e8e1b61131..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/index.js +++ /dev/null @@ -1,41 +0,0 @@ -const { FetchError, Headers, Request, Response } = require('minipass-fetch') - -const configureOptions = require('./options.js') -const fetch = require('./fetch.js') - -const makeFetchHappen = (url, opts) => { - const options = configureOptions(opts) - - const request = new Request(url, options) - return fetch(request, options) -} - -makeFetchHappen.defaults = (defaultUrl, defaultOptions = {}, wrappedFetch = makeFetchHappen) => { - if (typeof defaultUrl === 'object') { - defaultOptions = defaultUrl - defaultUrl = null - } - - const defaultedFetch = (url, options = {}) => { - const finalUrl = url || defaultUrl - const finalOptions = { - ...defaultOptions, - ...options, - headers: { - ...defaultOptions.headers, - ...options.headers, - }, - } - return wrappedFetch(finalUrl, finalOptions) - } - - defaultedFetch.defaults = (defaultUrl1, defaultOptions1 = {}) => - makeFetchHappen.defaults(defaultUrl1, defaultOptions1, defaultedFetch) - return defaultedFetch -} - -module.exports = makeFetchHappen -module.exports.FetchError = FetchError -module.exports.Headers = Headers -module.exports.Request = Request -module.exports.Response = Response diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/options.js b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/options.js deleted file mode 100644 index f77511279f831d..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/options.js +++ /dev/null @@ -1,54 +0,0 @@ -const dns = require('dns') - -const conditionalHeaders = [ - 'if-modified-since', - 'if-none-match', - 'if-unmodified-since', - 'if-match', - 'if-range', -] - -const configureOptions = (opts) => { - const { strictSSL, ...options } = { ...opts } - options.method = options.method ? options.method.toUpperCase() : 'GET' - options.rejectUnauthorized = strictSSL !== false - - if (!options.retry) { - options.retry = { retries: 0 } - } else if (typeof options.retry === 'string') { - const retries = parseInt(options.retry, 10) - if (isFinite(retries)) { - options.retry = { retries } - } else { - options.retry = { retries: 0 } - } - } else if (typeof options.retry === 'number') { - options.retry = { retries: options.retry } - } else { - options.retry = { retries: 0, ...options.retry } - } - - options.dns = { ttl: 5 * 60 * 1000, lookup: dns.lookup, ...options.dns } - - options.cache = options.cache || 'default' - if (options.cache === 'default') { - const hasConditionalHeader = Object.keys(options.headers || {}).some((name) => { - return conditionalHeaders.includes(name.toLowerCase()) - }) - if (hasConditionalHeader) { - options.cache = 'no-store' - } - } - - options.cacheAdditionalHeaders = options.cacheAdditionalHeaders || [] - - // cacheManager is deprecated, but if it's set and - // cachePath is not we should copy it to the new field - if (options.cacheManager && !options.cachePath) { - options.cachePath = options.cacheManager - } - - return options -} - -module.exports = configureOptions diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/pipeline.js b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/pipeline.js deleted file mode 100644 index b1d221b2d0ce31..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/pipeline.js +++ /dev/null @@ -1,41 +0,0 @@ -'use strict' - -const MinipassPipeline = require('minipass-pipeline') - -class CachingMinipassPipeline extends MinipassPipeline { - #events = [] - #data = new Map() - - constructor (opts, ...streams) { - // CRITICAL: do NOT pass the streams to the call to super(), this will start - // the flow of data and potentially cause the events we need to catch to emit - // before we've finished our own setup. instead we call super() with no args, - // finish our setup, and then push the streams into ourselves to start the - // data flow - super() - this.#events = opts.events - - /* istanbul ignore next - coverage disabled because this is pointless to test here */ - if (streams.length) { - this.push(...streams) - } - } - - on (event, handler) { - if (this.#events.includes(event) && this.#data.has(event)) { - return handler(...this.#data.get(event)) - } - - return super.on(event, handler) - } - - emit (event, ...data) { - if (this.#events.includes(event)) { - this.#data.set(event, data) - } - - return super.emit(event, ...data) - } -} - -module.exports = CachingMinipassPipeline diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/remote.js b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/remote.js deleted file mode 100644 index 8554564074de6e..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/lib/remote.js +++ /dev/null @@ -1,131 +0,0 @@ -const { Minipass } = require('minipass') -const fetch = require('minipass-fetch') -const promiseRetry = require('promise-retry') -const ssri = require('ssri') -const { log } = require('proc-log') - -const CachingMinipassPipeline = require('./pipeline.js') -const { getAgent } = require('@npmcli/agent') -const pkg = require('../package.json') - -const USER_AGENT = `${pkg.name}/${pkg.version} (+https://npm.im/${pkg.name})` - -const RETRY_ERRORS = [ - 'ECONNRESET', // remote socket closed on us - 'ECONNREFUSED', // remote host refused to open connection - 'EADDRINUSE', // failed to bind to a local port (proxy?) - 'ETIMEDOUT', // someone in the transaction is WAY TOO SLOW - // from @npmcli/agent - 'ECONNECTIONTIMEOUT', - 'EIDLETIMEOUT', - 'ERESPONSETIMEOUT', - 'ETRANSFERTIMEOUT', - // Known codes we do NOT retry on: - // ENOTFOUND (getaddrinfo failure. Either bad hostname, or offline) - // EINVALIDPROXY // invalid protocol from @npmcli/agent - // EINVALIDRESPONSE // invalid status code from @npmcli/agent -] - -const RETRY_TYPES = [ - 'request-timeout', -] - -// make a request directly to the remote source, -// retrying certain classes of errors as well as -// following redirects (through the cache if necessary) -// and verifying response integrity -const remoteFetch = (request, options) => { - const agent = getAgent(request.url, options) - if (!request.headers.has('connection')) { - request.headers.set('connection', agent ? 'keep-alive' : 'close') - } - - if (!request.headers.has('user-agent')) { - request.headers.set('user-agent', USER_AGENT) - } - - // keep our own options since we're overriding the agent - // and the redirect mode - const _opts = { - ...options, - agent, - redirect: 'manual', - } - - return promiseRetry(async (retryHandler, attemptNum) => { - const req = new fetch.Request(request, _opts) - try { - let res = await fetch(req, _opts) - if (_opts.integrity && res.status === 200) { - // we got a 200 response and the user has specified an expected - // integrity value, so wrap the response in an ssri stream to verify it - const integrityStream = ssri.integrityStream({ - algorithms: _opts.algorithms, - integrity: _opts.integrity, - size: _opts.size, - }) - const pipeline = new CachingMinipassPipeline({ - events: ['integrity', 'size'], - }, res.body, integrityStream) - // we also propagate the integrity and size events out to the pipeline so we can use - // this new response body as an integrityEmitter for cacache - integrityStream.on('integrity', i => pipeline.emit('integrity', i)) - integrityStream.on('size', s => pipeline.emit('size', s)) - res = new fetch.Response(pipeline, res) - // set an explicit flag so we know if our response body will emit integrity and size - res.body.hasIntegrityEmitter = true - } - - res.headers.set('x-fetch-attempts', attemptNum) - - // do not retry POST requests, or requests with a streaming body - // do retry requests with a 408, 420, 429 or 500+ status in the response - const isStream = Minipass.isStream(req.body) - const isRetriable = req.method !== 'POST' && - !isStream && - ([408, 420, 429].includes(res.status) || res.status >= 500) - - if (isRetriable) { - if (typeof options.onRetry === 'function') { - options.onRetry(res) - } - - /* eslint-disable-next-line max-len */ - log.http('fetch', `${req.method} ${req.url} attempt ${attemptNum} failed with ${res.status}`) - return retryHandler(res) - } - - return res - } catch (err) { - const code = (err.code === 'EPROMISERETRY') - ? err.retried.code - : err.code - - // err.retried will be the thing that was thrown from above - // if it's a response, we just got a bad status code and we - // can re-throw to allow the retry - const isRetryError = err.retried instanceof fetch.Response || - (RETRY_ERRORS.includes(code) && RETRY_TYPES.includes(err.type)) - - if (req.method === 'POST' || isRetryError) { - throw err - } - - if (typeof options.onRetry === 'function') { - options.onRetry(err) - } - - log.http('fetch', `${req.method} ${req.url} attempt ${attemptNum} failed with ${err.code}`) - return retryHandler(err) - } - }, options.retry).catch((err) => { - // don't reject for http errors, just return them - if (err.status >= 400 && err.type !== 'system') { - return err - } - - throw err - }) -} - -module.exports = remoteFetch diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/package.json b/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/package.json deleted file mode 100644 index 7adb4d1e7f9719..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/make-fetch-happen/package.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "name": "make-fetch-happen", - "version": "13.0.1", - "description": "Opinionated, caching, retrying fetch client", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "scripts": { - "test": "tap", - "posttest": "npm run lint", - "eslint": "eslint", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "lintfix": "npm run lint -- --fix", - "postlint": "template-oss-check", - "snap": "tap", - "template-oss-apply": "template-oss-apply --force" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/make-fetch-happen.git" - }, - "keywords": [ - "http", - "request", - "fetch", - "mean girls", - "caching", - "cache", - "subresource integrity" - ], - "author": "GitHub Inc.", - "license": "ISC", - "dependencies": { - "@npmcli/agent": "^2.0.0", - "cacache": "^18.0.0", - "http-cache-semantics": "^4.1.1", - "is-lambda": "^1.0.1", - "minipass": "^7.0.2", - "minipass-fetch": "^3.0.0", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "negotiator": "^0.6.3", - "proc-log": "^4.2.0", - "promise-retry": "^2.0.1", - "ssri": "^10.0.0" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.21.4", - "nock": "^13.2.4", - "safe-buffer": "^5.2.1", - "standard-version": "^9.3.2", - "tap": "^16.0.0" - }, - "engines": { - "node": "^16.14.0 || >=18.0.0" - }, - "tap": { - "color": 1, - "files": "test/*.js", - "check-coverage": true, - "timeout": 60, - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.21.4", - "publish": "true" - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/LICENSE b/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/LICENSE deleted file mode 100644 index 3c3410cdc12ee3..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -The MIT License (MIT) - -Copyright (c) Isaac Z. Schlueter and Contributors -Copyright (c) 2016 David Frank - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---- - -Note: This is a derivative work based on "node-fetch" by David Frank, -modified and distributed under the terms of the MIT license above. -https://github.com/bitinn/node-fetch diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/abort-error.js b/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/abort-error.js deleted file mode 100644 index b18f643269e375..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/abort-error.js +++ /dev/null @@ -1,17 +0,0 @@ -'use strict' -class AbortError extends Error { - constructor (message) { - super(message) - this.code = 'FETCH_ABORTED' - this.type = 'aborted' - Error.captureStackTrace(this, this.constructor) - } - - get name () { - return 'AbortError' - } - - // don't allow name to be overridden, but don't throw either - set name (s) {} -} -module.exports = AbortError diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/blob.js b/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/blob.js deleted file mode 100644 index 121b1730102e72..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/blob.js +++ /dev/null @@ -1,97 +0,0 @@ -'use strict' -const { Minipass } = require('minipass') -const TYPE = Symbol('type') -const BUFFER = Symbol('buffer') - -class Blob { - constructor (blobParts, options) { - this[TYPE] = '' - - const buffers = [] - let size = 0 - - if (blobParts) { - const a = blobParts - const length = Number(a.length) - for (let i = 0; i < length; i++) { - const element = a[i] - const buffer = element instanceof Buffer ? element - : ArrayBuffer.isView(element) - ? Buffer.from(element.buffer, element.byteOffset, element.byteLength) - : element instanceof ArrayBuffer ? Buffer.from(element) - : element instanceof Blob ? element[BUFFER] - : typeof element === 'string' ? Buffer.from(element) - : Buffer.from(String(element)) - size += buffer.length - buffers.push(buffer) - } - } - - this[BUFFER] = Buffer.concat(buffers, size) - - const type = options && options.type !== undefined - && String(options.type).toLowerCase() - if (type && !/[^\u0020-\u007E]/.test(type)) { - this[TYPE] = type - } - } - - get size () { - return this[BUFFER].length - } - - get type () { - return this[TYPE] - } - - text () { - return Promise.resolve(this[BUFFER].toString()) - } - - arrayBuffer () { - const buf = this[BUFFER] - const off = buf.byteOffset - const len = buf.byteLength - const ab = buf.buffer.slice(off, off + len) - return Promise.resolve(ab) - } - - stream () { - return new Minipass().end(this[BUFFER]) - } - - slice (start, end, type) { - const size = this.size - const relativeStart = start === undefined ? 0 - : start < 0 ? Math.max(size + start, 0) - : Math.min(start, size) - const relativeEnd = end === undefined ? size - : end < 0 ? Math.max(size + end, 0) - : Math.min(end, size) - const span = Math.max(relativeEnd - relativeStart, 0) - - const buffer = this[BUFFER] - const slicedBuffer = buffer.slice( - relativeStart, - relativeStart + span - ) - const blob = new Blob([], { type }) - blob[BUFFER] = slicedBuffer - return blob - } - - get [Symbol.toStringTag] () { - return 'Blob' - } - - static get BUFFER () { - return BUFFER - } -} - -Object.defineProperties(Blob.prototype, { - size: { enumerable: true }, - type: { enumerable: true }, -}) - -module.exports = Blob diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/body.js b/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/body.js deleted file mode 100644 index 62286bd1de0d91..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/body.js +++ /dev/null @@ -1,350 +0,0 @@ -'use strict' -const { Minipass } = require('minipass') -const MinipassSized = require('minipass-sized') - -const Blob = require('./blob.js') -const { BUFFER } = Blob -const FetchError = require('./fetch-error.js') - -// optional dependency on 'encoding' -let convert -try { - convert = require('encoding').convert -} catch (e) { - // defer error until textConverted is called -} - -const INTERNALS = Symbol('Body internals') -const CONSUME_BODY = Symbol('consumeBody') - -class Body { - constructor (bodyArg, options = {}) { - const { size = 0, timeout = 0 } = options - const body = bodyArg === undefined || bodyArg === null ? null - : isURLSearchParams(bodyArg) ? Buffer.from(bodyArg.toString()) - : isBlob(bodyArg) ? bodyArg - : Buffer.isBuffer(bodyArg) ? bodyArg - : Object.prototype.toString.call(bodyArg) === '[object ArrayBuffer]' - ? Buffer.from(bodyArg) - : ArrayBuffer.isView(bodyArg) - ? Buffer.from(bodyArg.buffer, bodyArg.byteOffset, bodyArg.byteLength) - : Minipass.isStream(bodyArg) ? bodyArg - : Buffer.from(String(bodyArg)) - - this[INTERNALS] = { - body, - disturbed: false, - error: null, - } - - this.size = size - this.timeout = timeout - - if (Minipass.isStream(body)) { - body.on('error', er => { - const error = er.name === 'AbortError' ? er - : new FetchError(`Invalid response while trying to fetch ${ - this.url}: ${er.message}`, 'system', er) - this[INTERNALS].error = error - }) - } - } - - get body () { - return this[INTERNALS].body - } - - get bodyUsed () { - return this[INTERNALS].disturbed - } - - arrayBuffer () { - return this[CONSUME_BODY]().then(buf => - buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength)) - } - - blob () { - const ct = this.headers && this.headers.get('content-type') || '' - return this[CONSUME_BODY]().then(buf => Object.assign( - new Blob([], { type: ct.toLowerCase() }), - { [BUFFER]: buf } - )) - } - - async json () { - const buf = await this[CONSUME_BODY]() - try { - return JSON.parse(buf.toString()) - } catch (er) { - throw new FetchError( - `invalid json response body at ${this.url} reason: ${er.message}`, - 'invalid-json' - ) - } - } - - text () { - return this[CONSUME_BODY]().then(buf => buf.toString()) - } - - buffer () { - return this[CONSUME_BODY]() - } - - textConverted () { - return this[CONSUME_BODY]().then(buf => convertBody(buf, this.headers)) - } - - [CONSUME_BODY] () { - if (this[INTERNALS].disturbed) { - return Promise.reject(new TypeError(`body used already for: ${ - this.url}`)) - } - - this[INTERNALS].disturbed = true - - if (this[INTERNALS].error) { - return Promise.reject(this[INTERNALS].error) - } - - // body is null - if (this.body === null) { - return Promise.resolve(Buffer.alloc(0)) - } - - if (Buffer.isBuffer(this.body)) { - return Promise.resolve(this.body) - } - - const upstream = isBlob(this.body) ? this.body.stream() : this.body - - /* istanbul ignore if: should never happen */ - if (!Minipass.isStream(upstream)) { - return Promise.resolve(Buffer.alloc(0)) - } - - const stream = this.size && upstream instanceof MinipassSized ? upstream - : !this.size && upstream instanceof Minipass && - !(upstream instanceof MinipassSized) ? upstream - : this.size ? new MinipassSized({ size: this.size }) - : new Minipass() - - // allow timeout on slow response body, but only if the stream is still writable. this - // makes the timeout center on the socket stream from lib/index.js rather than the - // intermediary minipass stream we create to receive the data - const resTimeout = this.timeout && stream.writable ? setTimeout(() => { - stream.emit('error', new FetchError( - `Response timeout while trying to fetch ${ - this.url} (over ${this.timeout}ms)`, 'body-timeout')) - }, this.timeout) : null - - // do not keep the process open just for this timeout, even - // though we expect it'll get cleared eventually. - if (resTimeout && resTimeout.unref) { - resTimeout.unref() - } - - // do the pipe in the promise, because the pipe() can send too much - // data through right away and upset the MP Sized object - return new Promise((resolve) => { - // if the stream is some other kind of stream, then pipe through a MP - // so we can collect it more easily. - if (stream !== upstream) { - upstream.on('error', er => stream.emit('error', er)) - upstream.pipe(stream) - } - resolve() - }).then(() => stream.concat()).then(buf => { - clearTimeout(resTimeout) - return buf - }).catch(er => { - clearTimeout(resTimeout) - // request was aborted, reject with this Error - if (er.name === 'AbortError' || er.name === 'FetchError') { - throw er - } else if (er.name === 'RangeError') { - throw new FetchError(`Could not create Buffer from response body for ${ - this.url}: ${er.message}`, 'system', er) - } else { - // other errors, such as incorrect content-encoding or content-length - throw new FetchError(`Invalid response body while trying to fetch ${ - this.url}: ${er.message}`, 'system', er) - } - }) - } - - static clone (instance) { - if (instance.bodyUsed) { - throw new Error('cannot clone body after it is used') - } - - const body = instance.body - - // check that body is a stream and not form-data object - // NB: can't clone the form-data object without having it as a dependency - if (Minipass.isStream(body) && typeof body.getBoundary !== 'function') { - // create a dedicated tee stream so that we don't lose data - // potentially sitting in the body stream's buffer by writing it - // immediately to p1 and not having it for p2. - const tee = new Minipass() - const p1 = new Minipass() - const p2 = new Minipass() - tee.on('error', er => { - p1.emit('error', er) - p2.emit('error', er) - }) - body.on('error', er => tee.emit('error', er)) - tee.pipe(p1) - tee.pipe(p2) - body.pipe(tee) - // set instance body to one fork, return the other - instance[INTERNALS].body = p1 - return p2 - } else { - return instance.body - } - } - - static extractContentType (body) { - return body === null || body === undefined ? null - : typeof body === 'string' ? 'text/plain;charset=UTF-8' - : isURLSearchParams(body) - ? 'application/x-www-form-urlencoded;charset=UTF-8' - : isBlob(body) ? body.type || null - : Buffer.isBuffer(body) ? null - : Object.prototype.toString.call(body) === '[object ArrayBuffer]' ? null - : ArrayBuffer.isView(body) ? null - : typeof body.getBoundary === 'function' - ? `multipart/form-data;boundary=${body.getBoundary()}` - : Minipass.isStream(body) ? null - : 'text/plain;charset=UTF-8' - } - - static getTotalBytes (instance) { - const { body } = instance - return (body === null || body === undefined) ? 0 - : isBlob(body) ? body.size - : Buffer.isBuffer(body) ? body.length - : body && typeof body.getLengthSync === 'function' && ( - // detect form data input from form-data module - body._lengthRetrievers && - /* istanbul ignore next */ body._lengthRetrievers.length === 0 || // 1.x - body.hasKnownLength && body.hasKnownLength()) // 2.x - ? body.getLengthSync() - : null - } - - static writeToStream (dest, instance) { - const { body } = instance - - if (body === null || body === undefined) { - dest.end() - } else if (Buffer.isBuffer(body) || typeof body === 'string') { - dest.end(body) - } else { - // body is stream or blob - const stream = isBlob(body) ? body.stream() : body - stream.on('error', er => dest.emit('error', er)).pipe(dest) - } - - return dest - } -} - -Object.defineProperties(Body.prototype, { - body: { enumerable: true }, - bodyUsed: { enumerable: true }, - arrayBuffer: { enumerable: true }, - blob: { enumerable: true }, - json: { enumerable: true }, - text: { enumerable: true }, -}) - -const isURLSearchParams = obj => - // Duck-typing as a necessary condition. - (typeof obj !== 'object' || - typeof obj.append !== 'function' || - typeof obj.delete !== 'function' || - typeof obj.get !== 'function' || - typeof obj.getAll !== 'function' || - typeof obj.has !== 'function' || - typeof obj.set !== 'function') ? false - // Brand-checking and more duck-typing as optional condition. - : obj.constructor.name === 'URLSearchParams' || - Object.prototype.toString.call(obj) === '[object URLSearchParams]' || - typeof obj.sort === 'function' - -const isBlob = obj => - typeof obj === 'object' && - typeof obj.arrayBuffer === 'function' && - typeof obj.type === 'string' && - typeof obj.stream === 'function' && - typeof obj.constructor === 'function' && - typeof obj.constructor.name === 'string' && - /^(Blob|File)$/.test(obj.constructor.name) && - /^(Blob|File)$/.test(obj[Symbol.toStringTag]) - -const convertBody = (buffer, headers) => { - /* istanbul ignore if */ - if (typeof convert !== 'function') { - throw new Error('The package `encoding` must be installed to use the textConverted() function') - } - - const ct = headers && headers.get('content-type') - let charset = 'utf-8' - let res - - // header - if (ct) { - res = /charset=([^;]*)/i.exec(ct) - } - - // no charset in content type, peek at response body for at most 1024 bytes - const str = buffer.slice(0, 1024).toString() - - // html5 - if (!res && str) { - res = / this.expect - ? 'max-size' : type - this.message = message - Error.captureStackTrace(this, this.constructor) - } - - get name () { - return 'FetchError' - } - - // don't allow name to be overwritten - set name (n) {} - - get [Symbol.toStringTag] () { - return 'FetchError' - } -} -module.exports = FetchError diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/headers.js b/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/headers.js deleted file mode 100644 index dd6e854d5ba399..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/headers.js +++ /dev/null @@ -1,267 +0,0 @@ -'use strict' -const invalidTokenRegex = /[^^_`a-zA-Z\-0-9!#$%&'*+.|~]/ -const invalidHeaderCharRegex = /[^\t\x20-\x7e\x80-\xff]/ - -const validateName = name => { - name = `${name}` - if (invalidTokenRegex.test(name) || name === '') { - throw new TypeError(`${name} is not a legal HTTP header name`) - } -} - -const validateValue = value => { - value = `${value}` - if (invalidHeaderCharRegex.test(value)) { - throw new TypeError(`${value} is not a legal HTTP header value`) - } -} - -const find = (map, name) => { - name = name.toLowerCase() - for (const key in map) { - if (key.toLowerCase() === name) { - return key - } - } - return undefined -} - -const MAP = Symbol('map') -class Headers { - constructor (init = undefined) { - this[MAP] = Object.create(null) - if (init instanceof Headers) { - const rawHeaders = init.raw() - const headerNames = Object.keys(rawHeaders) - for (const headerName of headerNames) { - for (const value of rawHeaders[headerName]) { - this.append(headerName, value) - } - } - return - } - - // no-op - if (init === undefined || init === null) { - return - } - - if (typeof init === 'object') { - const method = init[Symbol.iterator] - if (method !== null && method !== undefined) { - if (typeof method !== 'function') { - throw new TypeError('Header pairs must be iterable') - } - - // sequence> - // Note: per spec we have to first exhaust the lists then process them - const pairs = [] - for (const pair of init) { - if (typeof pair !== 'object' || - typeof pair[Symbol.iterator] !== 'function') { - throw new TypeError('Each header pair must be iterable') - } - const arrPair = Array.from(pair) - if (arrPair.length !== 2) { - throw new TypeError('Each header pair must be a name/value tuple') - } - pairs.push(arrPair) - } - - for (const pair of pairs) { - this.append(pair[0], pair[1]) - } - } else { - // record - for (const key of Object.keys(init)) { - this.append(key, init[key]) - } - } - } else { - throw new TypeError('Provided initializer must be an object') - } - } - - get (name) { - name = `${name}` - validateName(name) - const key = find(this[MAP], name) - if (key === undefined) { - return null - } - - return this[MAP][key].join(', ') - } - - forEach (callback, thisArg = undefined) { - let pairs = getHeaders(this) - for (let i = 0; i < pairs.length; i++) { - const [name, value] = pairs[i] - callback.call(thisArg, value, name, this) - // refresh in case the callback added more headers - pairs = getHeaders(this) - } - } - - set (name, value) { - name = `${name}` - value = `${value}` - validateName(name) - validateValue(value) - const key = find(this[MAP], name) - this[MAP][key !== undefined ? key : name] = [value] - } - - append (name, value) { - name = `${name}` - value = `${value}` - validateName(name) - validateValue(value) - const key = find(this[MAP], name) - if (key !== undefined) { - this[MAP][key].push(value) - } else { - this[MAP][name] = [value] - } - } - - has (name) { - name = `${name}` - validateName(name) - return find(this[MAP], name) !== undefined - } - - delete (name) { - name = `${name}` - validateName(name) - const key = find(this[MAP], name) - if (key !== undefined) { - delete this[MAP][key] - } - } - - raw () { - return this[MAP] - } - - keys () { - return new HeadersIterator(this, 'key') - } - - values () { - return new HeadersIterator(this, 'value') - } - - [Symbol.iterator] () { - return new HeadersIterator(this, 'key+value') - } - - entries () { - return new HeadersIterator(this, 'key+value') - } - - get [Symbol.toStringTag] () { - return 'Headers' - } - - static exportNodeCompatibleHeaders (headers) { - const obj = Object.assign(Object.create(null), headers[MAP]) - - // http.request() only supports string as Host header. This hack makes - // specifying custom Host header possible. - const hostHeaderKey = find(headers[MAP], 'Host') - if (hostHeaderKey !== undefined) { - obj[hostHeaderKey] = obj[hostHeaderKey][0] - } - - return obj - } - - static createHeadersLenient (obj) { - const headers = new Headers() - for (const name of Object.keys(obj)) { - if (invalidTokenRegex.test(name)) { - continue - } - - if (Array.isArray(obj[name])) { - for (const val of obj[name]) { - if (invalidHeaderCharRegex.test(val)) { - continue - } - - if (headers[MAP][name] === undefined) { - headers[MAP][name] = [val] - } else { - headers[MAP][name].push(val) - } - } - } else if (!invalidHeaderCharRegex.test(obj[name])) { - headers[MAP][name] = [obj[name]] - } - } - return headers - } -} - -Object.defineProperties(Headers.prototype, { - get: { enumerable: true }, - forEach: { enumerable: true }, - set: { enumerable: true }, - append: { enumerable: true }, - has: { enumerable: true }, - delete: { enumerable: true }, - keys: { enumerable: true }, - values: { enumerable: true }, - entries: { enumerable: true }, -}) - -const getHeaders = (headers, kind = 'key+value') => - Object.keys(headers[MAP]).sort().map( - kind === 'key' ? k => k.toLowerCase() - : kind === 'value' ? k => headers[MAP][k].join(', ') - : k => [k.toLowerCase(), headers[MAP][k].join(', ')] - ) - -const INTERNAL = Symbol('internal') - -class HeadersIterator { - constructor (target, kind) { - this[INTERNAL] = { - target, - kind, - index: 0, - } - } - - get [Symbol.toStringTag] () { - return 'HeadersIterator' - } - - next () { - /* istanbul ignore if: should be impossible */ - if (!this || Object.getPrototypeOf(this) !== HeadersIterator.prototype) { - throw new TypeError('Value of `this` is not a HeadersIterator') - } - - const { target, kind, index } = this[INTERNAL] - const values = getHeaders(target, kind) - const len = values.length - if (index >= len) { - return { - value: undefined, - done: true, - } - } - - this[INTERNAL].index++ - - return { value: values[index], done: false } - } -} - -// manually extend because 'extends' requires a ctor -Object.setPrototypeOf(HeadersIterator.prototype, - Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]()))) - -module.exports = Headers diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/index.js deleted file mode 100644 index da402161670e65..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/index.js +++ /dev/null @@ -1,377 +0,0 @@ -'use strict' -const { URL } = require('url') -const http = require('http') -const https = require('https') -const zlib = require('minizlib') -const { Minipass } = require('minipass') - -const Body = require('./body.js') -const { writeToStream, getTotalBytes } = Body -const Response = require('./response.js') -const Headers = require('./headers.js') -const { createHeadersLenient } = Headers -const Request = require('./request.js') -const { getNodeRequestOptions } = Request -const FetchError = require('./fetch-error.js') -const AbortError = require('./abort-error.js') - -// XXX this should really be split up and unit-ized for easier testing -// and better DRY implementation of data/http request aborting -const fetch = async (url, opts) => { - if (/^data:/.test(url)) { - const request = new Request(url, opts) - // delay 1 promise tick so that the consumer can abort right away - return Promise.resolve().then(() => new Promise((resolve, reject) => { - let type, data - try { - const { pathname, search } = new URL(url) - const split = pathname.split(',') - if (split.length < 2) { - throw new Error('invalid data: URI') - } - const mime = split.shift() - const base64 = /;base64$/.test(mime) - type = base64 ? mime.slice(0, -1 * ';base64'.length) : mime - const rawData = decodeURIComponent(split.join(',') + search) - data = base64 ? Buffer.from(rawData, 'base64') : Buffer.from(rawData) - } catch (er) { - return reject(new FetchError(`[${request.method}] ${ - request.url} invalid URL, ${er.message}`, 'system', er)) - } - - const { signal } = request - if (signal && signal.aborted) { - return reject(new AbortError('The user aborted a request.')) - } - - const headers = { 'Content-Length': data.length } - if (type) { - headers['Content-Type'] = type - } - return resolve(new Response(data, { headers })) - })) - } - - return new Promise((resolve, reject) => { - // build request object - const request = new Request(url, opts) - let options - try { - options = getNodeRequestOptions(request) - } catch (er) { - return reject(er) - } - - const send = (options.protocol === 'https:' ? https : http).request - const { signal } = request - let response = null - const abort = () => { - const error = new AbortError('The user aborted a request.') - reject(error) - if (Minipass.isStream(request.body) && - typeof request.body.destroy === 'function') { - request.body.destroy(error) - } - if (response && response.body) { - response.body.emit('error', error) - } - } - - if (signal && signal.aborted) { - return abort() - } - - const abortAndFinalize = () => { - abort() - finalize() - } - - const finalize = () => { - req.abort() - if (signal) { - signal.removeEventListener('abort', abortAndFinalize) - } - clearTimeout(reqTimeout) - } - - // send request - const req = send(options) - - if (signal) { - signal.addEventListener('abort', abortAndFinalize) - } - - let reqTimeout = null - if (request.timeout) { - req.once('socket', () => { - reqTimeout = setTimeout(() => { - reject(new FetchError(`network timeout at: ${ - request.url}`, 'request-timeout')) - finalize() - }, request.timeout) - }) - } - - req.on('error', er => { - // if a 'response' event is emitted before the 'error' event, then by the - // time this handler is run it's too late to reject the Promise for the - // response. instead, we forward the error event to the response stream - // so that the error will surface to the user when they try to consume - // the body. this is done as a side effect of aborting the request except - // for in windows, where we must forward the event manually, otherwise - // there is no longer a ref'd socket attached to the request and the - // stream never ends so the event loop runs out of work and the process - // exits without warning. - // coverage skipped here due to the difficulty in testing - // istanbul ignore next - if (req.res) { - req.res.emit('error', er) - } - reject(new FetchError(`request to ${request.url} failed, reason: ${ - er.message}`, 'system', er)) - finalize() - }) - - req.on('response', res => { - clearTimeout(reqTimeout) - - const headers = createHeadersLenient(res.headers) - - // HTTP fetch step 5 - if (fetch.isRedirect(res.statusCode)) { - // HTTP fetch step 5.2 - const location = headers.get('Location') - - // HTTP fetch step 5.3 - let locationURL = null - try { - locationURL = location === null ? null : new URL(location, request.url).toString() - } catch { - // error here can only be invalid URL in Location: header - // do not throw when options.redirect == manual - // let the user extract the errorneous redirect URL - if (request.redirect !== 'manual') { - /* eslint-disable-next-line max-len */ - reject(new FetchError(`uri requested responds with an invalid redirect URL: ${location}`, 'invalid-redirect')) - finalize() - return - } - } - - // HTTP fetch step 5.5 - if (request.redirect === 'error') { - reject(new FetchError('uri requested responds with a redirect, ' + - `redirect mode is set to error: ${request.url}`, 'no-redirect')) - finalize() - return - } else if (request.redirect === 'manual') { - // node-fetch-specific step: make manual redirect a bit easier to - // use by setting the Location header value to the resolved URL. - if (locationURL !== null) { - // handle corrupted header - try { - headers.set('Location', locationURL) - } catch (err) { - /* istanbul ignore next: nodejs server prevent invalid - response headers, we can't test this through normal - request */ - reject(err) - } - } - } else if (request.redirect === 'follow' && locationURL !== null) { - // HTTP-redirect fetch step 5 - if (request.counter >= request.follow) { - reject(new FetchError(`maximum redirect reached at: ${ - request.url}`, 'max-redirect')) - finalize() - return - } - - // HTTP-redirect fetch step 9 - if (res.statusCode !== 303 && - request.body && - getTotalBytes(request) === null) { - reject(new FetchError( - 'Cannot follow redirect with body being a readable stream', - 'unsupported-redirect' - )) - finalize() - return - } - - // Update host due to redirection - request.headers.set('host', (new URL(locationURL)).host) - - // HTTP-redirect fetch step 6 (counter increment) - // Create a new Request object. - const requestOpts = { - headers: new Headers(request.headers), - follow: request.follow, - counter: request.counter + 1, - agent: request.agent, - compress: request.compress, - method: request.method, - body: request.body, - signal: request.signal, - timeout: request.timeout, - } - - // if the redirect is to a new hostname, strip the authorization and cookie headers - const parsedOriginal = new URL(request.url) - const parsedRedirect = new URL(locationURL) - if (parsedOriginal.hostname !== parsedRedirect.hostname) { - requestOpts.headers.delete('authorization') - requestOpts.headers.delete('cookie') - } - - // HTTP-redirect fetch step 11 - if (res.statusCode === 303 || ( - (res.statusCode === 301 || res.statusCode === 302) && - request.method === 'POST' - )) { - requestOpts.method = 'GET' - requestOpts.body = undefined - requestOpts.headers.delete('content-length') - } - - // HTTP-redirect fetch step 15 - resolve(fetch(new Request(locationURL, requestOpts))) - finalize() - return - } - } // end if(isRedirect) - - // prepare response - res.once('end', () => - signal && signal.removeEventListener('abort', abortAndFinalize)) - - const body = new Minipass() - // if an error occurs, either on the response stream itself, on one of the - // decoder streams, or a response length timeout from the Body class, we - // forward the error through to our internal body stream. If we see an - // error event on that, we call finalize to abort the request and ensure - // we don't leave a socket believing a request is in flight. - // this is difficult to test, so lacks specific coverage. - body.on('error', finalize) - // exceedingly rare that the stream would have an error, - // but just in case we proxy it to the stream in use. - res.on('error', /* istanbul ignore next */ er => body.emit('error', er)) - res.on('data', (chunk) => body.write(chunk)) - res.on('end', () => body.end()) - - const responseOptions = { - url: request.url, - status: res.statusCode, - statusText: res.statusMessage, - headers: headers, - size: request.size, - timeout: request.timeout, - counter: request.counter, - trailer: new Promise(resolveTrailer => - res.on('end', () => resolveTrailer(createHeadersLenient(res.trailers)))), - } - - // HTTP-network fetch step 12.1.1.3 - const codings = headers.get('Content-Encoding') - - // HTTP-network fetch step 12.1.1.4: handle content codings - - // in following scenarios we ignore compression support - // 1. compression support is disabled - // 2. HEAD request - // 3. no Content-Encoding header - // 4. no content response (204) - // 5. content not modified response (304) - if (!request.compress || - request.method === 'HEAD' || - codings === null || - res.statusCode === 204 || - res.statusCode === 304) { - response = new Response(body, responseOptions) - resolve(response) - return - } - - // Be less strict when decoding compressed responses, since sometimes - // servers send slightly invalid responses that are still accepted - // by common browsers. - // Always using Z_SYNC_FLUSH is what cURL does. - const zlibOptions = { - flush: zlib.constants.Z_SYNC_FLUSH, - finishFlush: zlib.constants.Z_SYNC_FLUSH, - } - - // for gzip - if (codings === 'gzip' || codings === 'x-gzip') { - const unzip = new zlib.Gunzip(zlibOptions) - response = new Response( - // exceedingly rare that the stream would have an error, - // but just in case we proxy it to the stream in use. - body.on('error', /* istanbul ignore next */ er => unzip.emit('error', er)).pipe(unzip), - responseOptions - ) - resolve(response) - return - } - - // for deflate - if (codings === 'deflate' || codings === 'x-deflate') { - // handle the infamous raw deflate response from old servers - // a hack for old IIS and Apache servers - const raw = res.pipe(new Minipass()) - raw.once('data', chunk => { - // see http://stackoverflow.com/questions/37519828 - const decoder = (chunk[0] & 0x0F) === 0x08 - ? new zlib.Inflate() - : new zlib.InflateRaw() - // exceedingly rare that the stream would have an error, - // but just in case we proxy it to the stream in use. - body.on('error', /* istanbul ignore next */ er => decoder.emit('error', er)).pipe(decoder) - response = new Response(decoder, responseOptions) - resolve(response) - }) - return - } - - // for br - if (codings === 'br') { - // ignoring coverage so tests don't have to fake support (or lack of) for brotli - // istanbul ignore next - try { - var decoder = new zlib.BrotliDecompress() - } catch (err) { - reject(err) - finalize() - return - } - // exceedingly rare that the stream would have an error, - // but just in case we proxy it to the stream in use. - body.on('error', /* istanbul ignore next */ er => decoder.emit('error', er)).pipe(decoder) - response = new Response(decoder, responseOptions) - resolve(response) - return - } - - // otherwise, use response as-is - response = new Response(body, responseOptions) - resolve(response) - }) - - writeToStream(req, request) - }) -} - -module.exports = fetch - -fetch.isRedirect = code => - code === 301 || - code === 302 || - code === 303 || - code === 307 || - code === 308 - -fetch.Headers = Headers -fetch.Request = Request -fetch.Response = Response -fetch.FetchError = FetchError -fetch.AbortError = AbortError diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/request.js b/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/request.js deleted file mode 100644 index 054439e6699107..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/request.js +++ /dev/null @@ -1,282 +0,0 @@ -'use strict' -const { URL } = require('url') -const { Minipass } = require('minipass') -const Headers = require('./headers.js') -const { exportNodeCompatibleHeaders } = Headers -const Body = require('./body.js') -const { clone, extractContentType, getTotalBytes } = Body - -const version = require('../package.json').version -const defaultUserAgent = - `minipass-fetch/${version} (+https://github.com/isaacs/minipass-fetch)` - -const INTERNALS = Symbol('Request internals') - -const isRequest = input => - typeof input === 'object' && typeof input[INTERNALS] === 'object' - -const isAbortSignal = signal => { - const proto = ( - signal - && typeof signal === 'object' - && Object.getPrototypeOf(signal) - ) - return !!(proto && proto.constructor.name === 'AbortSignal') -} - -class Request extends Body { - constructor (input, init = {}) { - const parsedURL = isRequest(input) ? new URL(input.url) - : input && input.href ? new URL(input.href) - : new URL(`${input}`) - - if (isRequest(input)) { - init = { ...input[INTERNALS], ...init } - } else if (!input || typeof input === 'string') { - input = {} - } - - const method = (init.method || input.method || 'GET').toUpperCase() - const isGETHEAD = method === 'GET' || method === 'HEAD' - - if ((init.body !== null && init.body !== undefined || - isRequest(input) && input.body !== null) && isGETHEAD) { - throw new TypeError('Request with GET/HEAD method cannot have body') - } - - const inputBody = init.body !== null && init.body !== undefined ? init.body - : isRequest(input) && input.body !== null ? clone(input) - : null - - super(inputBody, { - timeout: init.timeout || input.timeout || 0, - size: init.size || input.size || 0, - }) - - const headers = new Headers(init.headers || input.headers || {}) - - if (inputBody !== null && inputBody !== undefined && - !headers.has('Content-Type')) { - const contentType = extractContentType(inputBody) - if (contentType) { - headers.append('Content-Type', contentType) - } - } - - const signal = 'signal' in init ? init.signal - : null - - if (signal !== null && signal !== undefined && !isAbortSignal(signal)) { - throw new TypeError('Expected signal must be an instanceof AbortSignal') - } - - // TLS specific options that are handled by node - const { - ca, - cert, - ciphers, - clientCertEngine, - crl, - dhparam, - ecdhCurve, - family, - honorCipherOrder, - key, - passphrase, - pfx, - rejectUnauthorized = process.env.NODE_TLS_REJECT_UNAUTHORIZED !== '0', - secureOptions, - secureProtocol, - servername, - sessionIdContext, - } = init - - this[INTERNALS] = { - method, - redirect: init.redirect || input.redirect || 'follow', - headers, - parsedURL, - signal, - ca, - cert, - ciphers, - clientCertEngine, - crl, - dhparam, - ecdhCurve, - family, - honorCipherOrder, - key, - passphrase, - pfx, - rejectUnauthorized, - secureOptions, - secureProtocol, - servername, - sessionIdContext, - } - - // node-fetch-only options - this.follow = init.follow !== undefined ? init.follow - : input.follow !== undefined ? input.follow - : 20 - this.compress = init.compress !== undefined ? init.compress - : input.compress !== undefined ? input.compress - : true - this.counter = init.counter || input.counter || 0 - this.agent = init.agent || input.agent - } - - get method () { - return this[INTERNALS].method - } - - get url () { - return this[INTERNALS].parsedURL.toString() - } - - get headers () { - return this[INTERNALS].headers - } - - get redirect () { - return this[INTERNALS].redirect - } - - get signal () { - return this[INTERNALS].signal - } - - clone () { - return new Request(this) - } - - get [Symbol.toStringTag] () { - return 'Request' - } - - static getNodeRequestOptions (request) { - const parsedURL = request[INTERNALS].parsedURL - const headers = new Headers(request[INTERNALS].headers) - - // fetch step 1.3 - if (!headers.has('Accept')) { - headers.set('Accept', '*/*') - } - - // Basic fetch - if (!/^https?:$/.test(parsedURL.protocol)) { - throw new TypeError('Only HTTP(S) protocols are supported') - } - - if (request.signal && - Minipass.isStream(request.body) && - typeof request.body.destroy !== 'function') { - throw new Error( - 'Cancellation of streamed requests with AbortSignal is not supported') - } - - // HTTP-network-or-cache fetch steps 2.4-2.7 - const contentLengthValue = - (request.body === null || request.body === undefined) && - /^(POST|PUT)$/i.test(request.method) ? '0' - : request.body !== null && request.body !== undefined - ? getTotalBytes(request) - : null - - if (contentLengthValue) { - headers.set('Content-Length', contentLengthValue + '') - } - - // HTTP-network-or-cache fetch step 2.11 - if (!headers.has('User-Agent')) { - headers.set('User-Agent', defaultUserAgent) - } - - // HTTP-network-or-cache fetch step 2.15 - if (request.compress && !headers.has('Accept-Encoding')) { - headers.set('Accept-Encoding', 'gzip,deflate') - } - - const agent = typeof request.agent === 'function' - ? request.agent(parsedURL) - : request.agent - - if (!headers.has('Connection') && !agent) { - headers.set('Connection', 'close') - } - - // TLS specific options that are handled by node - const { - ca, - cert, - ciphers, - clientCertEngine, - crl, - dhparam, - ecdhCurve, - family, - honorCipherOrder, - key, - passphrase, - pfx, - rejectUnauthorized, - secureOptions, - secureProtocol, - servername, - sessionIdContext, - } = request[INTERNALS] - - // HTTP-network fetch step 4.2 - // chunked encoding is handled by Node.js - - // we cannot spread parsedURL directly, so we have to read each property one-by-one - // and map them to the equivalent https?.request() method options - const urlProps = { - auth: parsedURL.username || parsedURL.password - ? `${parsedURL.username}:${parsedURL.password}` - : '', - host: parsedURL.host, - hostname: parsedURL.hostname, - path: `${parsedURL.pathname}${parsedURL.search}`, - port: parsedURL.port, - protocol: parsedURL.protocol, - } - - return { - ...urlProps, - method: request.method, - headers: exportNodeCompatibleHeaders(headers), - agent, - ca, - cert, - ciphers, - clientCertEngine, - crl, - dhparam, - ecdhCurve, - family, - honorCipherOrder, - key, - passphrase, - pfx, - rejectUnauthorized, - secureOptions, - secureProtocol, - servername, - sessionIdContext, - timeout: request.timeout, - } - } -} - -module.exports = Request - -Object.defineProperties(Request.prototype, { - method: { enumerable: true }, - url: { enumerable: true }, - headers: { enumerable: true }, - redirect: { enumerable: true }, - clone: { enumerable: true }, - signal: { enumerable: true }, -}) diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/response.js b/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/response.js deleted file mode 100644 index 54cb52db3594a7..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/lib/response.js +++ /dev/null @@ -1,90 +0,0 @@ -'use strict' -const http = require('http') -const { STATUS_CODES } = http - -const Headers = require('./headers.js') -const Body = require('./body.js') -const { clone, extractContentType } = Body - -const INTERNALS = Symbol('Response internals') - -class Response extends Body { - constructor (body = null, opts = {}) { - super(body, opts) - - const status = opts.status || 200 - const headers = new Headers(opts.headers) - - if (body !== null && body !== undefined && !headers.has('Content-Type')) { - const contentType = extractContentType(body) - if (contentType) { - headers.append('Content-Type', contentType) - } - } - - this[INTERNALS] = { - url: opts.url, - status, - statusText: opts.statusText || STATUS_CODES[status], - headers, - counter: opts.counter, - trailer: Promise.resolve(opts.trailer || new Headers()), - } - } - - get trailer () { - return this[INTERNALS].trailer - } - - get url () { - return this[INTERNALS].url || '' - } - - get status () { - return this[INTERNALS].status - } - - get ok () { - return this[INTERNALS].status >= 200 && this[INTERNALS].status < 300 - } - - get redirected () { - return this[INTERNALS].counter > 0 - } - - get statusText () { - return this[INTERNALS].statusText - } - - get headers () { - return this[INTERNALS].headers - } - - clone () { - return new Response(clone(this), { - url: this.url, - status: this.status, - statusText: this.statusText, - headers: this.headers, - ok: this.ok, - redirected: this.redirected, - trailer: this.trailer, - }) - } - - get [Symbol.toStringTag] () { - return 'Response' - } -} - -module.exports = Response - -Object.defineProperties(Response.prototype, { - url: { enumerable: true }, - status: { enumerable: true }, - ok: { enumerable: true }, - redirected: { enumerable: true }, - statusText: { enumerable: true }, - headers: { enumerable: true }, - clone: { enumerable: true }, -}) diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/package.json b/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/package.json deleted file mode 100644 index d491a7fba126d0..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/minipass-fetch/package.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "name": "minipass-fetch", - "version": "3.0.5", - "description": "An implementation of window.fetch in Node.js using Minipass streams", - "license": "MIT", - "main": "lib/index.js", - "scripts": { - "test:tls-fixtures": "./test/fixtures/tls/setup.sh", - "test": "tap", - "snap": "tap", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "lintfix": "npm run lint -- --fix", - "posttest": "npm run lint", - "template-oss-apply": "template-oss-apply --force" - }, - "tap": { - "coverage-map": "map.js", - "check-coverage": true, - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "@ungap/url-search-params": "^0.2.2", - "abort-controller": "^3.0.0", - "abortcontroller-polyfill": "~1.7.3", - "encoding": "^0.1.13", - "form-data": "^4.0.0", - "nock": "^13.2.4", - "parted": "^0.1.1", - "string-to-arraybuffer": "^1.0.2", - "tap": "^16.0.0" - }, - "dependencies": { - "minipass": "^7.0.3", - "minipass-sized": "^1.0.3", - "minizlib": "^2.1.2" - }, - "optionalDependencies": { - "encoding": "^0.1.13" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/minipass-fetch.git" - }, - "keywords": [ - "fetch", - "minipass", - "node-fetch", - "window.fetch" - ], - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "author": "GitHub Inc.", - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.22.0", - "publish": "true" - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/LICENSE b/deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/LICENSE deleted file mode 100644 index 83837797202b70..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -The ISC License - -Copyright (c) GitHub, Inc. - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/lib/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/lib/index.js deleted file mode 100644 index 86d90861078dab..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/lib/index.js +++ /dev/null @@ -1,153 +0,0 @@ -const META = Symbol('proc-log.meta') -module.exports = { - META: META, - output: { - LEVELS: [ - 'standard', - 'error', - 'buffer', - 'flush', - ], - KEYS: { - standard: 'standard', - error: 'error', - buffer: 'buffer', - flush: 'flush', - }, - standard: function (...args) { - return process.emit('output', 'standard', ...args) - }, - error: function (...args) { - return process.emit('output', 'error', ...args) - }, - buffer: function (...args) { - return process.emit('output', 'buffer', ...args) - }, - flush: function (...args) { - return process.emit('output', 'flush', ...args) - }, - }, - log: { - LEVELS: [ - 'notice', - 'error', - 'warn', - 'info', - 'verbose', - 'http', - 'silly', - 'timing', - 'pause', - 'resume', - ], - KEYS: { - notice: 'notice', - error: 'error', - warn: 'warn', - info: 'info', - verbose: 'verbose', - http: 'http', - silly: 'silly', - timing: 'timing', - pause: 'pause', - resume: 'resume', - }, - error: function (...args) { - return process.emit('log', 'error', ...args) - }, - notice: function (...args) { - return process.emit('log', 'notice', ...args) - }, - warn: function (...args) { - return process.emit('log', 'warn', ...args) - }, - info: function (...args) { - return process.emit('log', 'info', ...args) - }, - verbose: function (...args) { - return process.emit('log', 'verbose', ...args) - }, - http: function (...args) { - return process.emit('log', 'http', ...args) - }, - silly: function (...args) { - return process.emit('log', 'silly', ...args) - }, - timing: function (...args) { - return process.emit('log', 'timing', ...args) - }, - pause: function () { - return process.emit('log', 'pause') - }, - resume: function () { - return process.emit('log', 'resume') - }, - }, - time: { - LEVELS: [ - 'start', - 'end', - ], - KEYS: { - start: 'start', - end: 'end', - }, - start: function (name, fn) { - process.emit('time', 'start', name) - function end () { - return process.emit('time', 'end', name) - } - if (typeof fn === 'function') { - const res = fn() - if (res && res.finally) { - return res.finally(end) - } - end() - return res - } - return end - }, - end: function (name) { - return process.emit('time', 'end', name) - }, - }, - input: { - LEVELS: [ - 'start', - 'end', - 'read', - ], - KEYS: { - start: 'start', - end: 'end', - read: 'read', - }, - start: function (fn) { - process.emit('input', 'start') - function end () { - return process.emit('input', 'end') - } - if (typeof fn === 'function') { - const res = fn() - if (res && res.finally) { - return res.finally(end) - } - end() - return res - } - return end - }, - end: function () { - return process.emit('input', 'end') - }, - read: function (...args) { - let resolve, reject - const promise = new Promise((_resolve, _reject) => { - resolve = _resolve - reject = _reject - }) - process.emit('input', 'read', resolve, reject, ...args) - return promise - }, - }, -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/package.json b/deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/package.json deleted file mode 100644 index 4ab89102ecc9b5..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/proc-log/package.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "name": "proc-log", - "version": "4.2.0", - "files": [ - "bin/", - "lib/" - ], - "main": "lib/index.js", - "description": "just emit 'log' events on the process object", - "repository": { - "type": "git", - "url": "https://github.com/npm/proc-log.git" - }, - "author": "GitHub Inc.", - "license": "ISC", - "scripts": { - "test": "tap", - "snap": "tap", - "posttest": "npm run lint", - "postsnap": "eslint index.js test/*.js --fix", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "lintfix": "npm run lint -- --fix", - "template-oss-apply": "template-oss-apply --force" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.21.3", - "tap": "^16.0.1" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.21.3", - "publish": true - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/ssri/LICENSE.md b/deps/npm/node_modules/@sigstore/sign/node_modules/ssri/LICENSE.md deleted file mode 100644 index e335388869f50f..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/ssri/LICENSE.md +++ /dev/null @@ -1,16 +0,0 @@ -ISC License - -Copyright 2021 (c) npm, Inc. - -Permission to use, copy, modify, and/or distribute this software for -any purpose with or without fee is hereby granted, provided that the -above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE COPYRIGHT HOLDER DISCLAIMS -ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR -CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/ssri/lib/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/ssri/lib/index.js deleted file mode 100644 index 7d749ed480fb98..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/ssri/lib/index.js +++ /dev/null @@ -1,580 +0,0 @@ -'use strict' - -const crypto = require('crypto') -const { Minipass } = require('minipass') - -const SPEC_ALGORITHMS = ['sha512', 'sha384', 'sha256'] -const DEFAULT_ALGORITHMS = ['sha512'] - -// TODO: this should really be a hardcoded list of algorithms we support, -// rather than [a-z0-9]. -const BASE64_REGEX = /^[a-z0-9+/]+(?:=?=?)$/i -const SRI_REGEX = /^([a-z0-9]+)-([^?]+)([?\S*]*)$/ -const STRICT_SRI_REGEX = /^([a-z0-9]+)-([A-Za-z0-9+/=]{44,88})(\?[\x21-\x7E]*)?$/ -const VCHAR_REGEX = /^[\x21-\x7E]+$/ - -const getOptString = options => options?.length ? `?${options.join('?')}` : '' - -class IntegrityStream extends Minipass { - #emittedIntegrity - #emittedSize - #emittedVerified - - constructor (opts) { - super() - this.size = 0 - this.opts = opts - - // may be overridden later, but set now for class consistency - this.#getOptions() - - // options used for calculating stream. can't be changed. - if (opts?.algorithms) { - this.algorithms = [...opts.algorithms] - } else { - this.algorithms = [...DEFAULT_ALGORITHMS] - } - if (this.algorithm !== null && !this.algorithms.includes(this.algorithm)) { - this.algorithms.push(this.algorithm) - } - - this.hashes = this.algorithms.map(crypto.createHash) - } - - #getOptions () { - // For verification - this.sri = this.opts?.integrity ? parse(this.opts?.integrity, this.opts) : null - this.expectedSize = this.opts?.size - - if (!this.sri) { - this.algorithm = null - } else if (this.sri.isHash) { - this.goodSri = true - this.algorithm = this.sri.algorithm - } else { - this.goodSri = !this.sri.isEmpty() - this.algorithm = this.sri.pickAlgorithm(this.opts) - } - - this.digests = this.goodSri ? this.sri[this.algorithm] : null - this.optString = getOptString(this.opts?.options) - } - - on (ev, handler) { - if (ev === 'size' && this.#emittedSize) { - return handler(this.#emittedSize) - } - - if (ev === 'integrity' && this.#emittedIntegrity) { - return handler(this.#emittedIntegrity) - } - - if (ev === 'verified' && this.#emittedVerified) { - return handler(this.#emittedVerified) - } - - return super.on(ev, handler) - } - - emit (ev, data) { - if (ev === 'end') { - this.#onEnd() - } - return super.emit(ev, data) - } - - write (data) { - this.size += data.length - this.hashes.forEach(h => h.update(data)) - return super.write(data) - } - - #onEnd () { - if (!this.goodSri) { - this.#getOptions() - } - const newSri = parse(this.hashes.map((h, i) => { - return `${this.algorithms[i]}-${h.digest('base64')}${this.optString}` - }).join(' '), this.opts) - // Integrity verification mode - const match = this.goodSri && newSri.match(this.sri, this.opts) - if (typeof this.expectedSize === 'number' && this.size !== this.expectedSize) { - /* eslint-disable-next-line max-len */ - const err = new Error(`stream size mismatch when checking ${this.sri}.\n Wanted: ${this.expectedSize}\n Found: ${this.size}`) - err.code = 'EBADSIZE' - err.found = this.size - err.expected = this.expectedSize - err.sri = this.sri - this.emit('error', err) - } else if (this.sri && !match) { - /* eslint-disable-next-line max-len */ - const err = new Error(`${this.sri} integrity checksum failed when using ${this.algorithm}: wanted ${this.digests} but got ${newSri}. (${this.size} bytes)`) - err.code = 'EINTEGRITY' - err.found = newSri - err.expected = this.digests - err.algorithm = this.algorithm - err.sri = this.sri - this.emit('error', err) - } else { - this.#emittedSize = this.size - this.emit('size', this.size) - this.#emittedIntegrity = newSri - this.emit('integrity', newSri) - if (match) { - this.#emittedVerified = match - this.emit('verified', match) - } - } - } -} - -class Hash { - get isHash () { - return true - } - - constructor (hash, opts) { - const strict = opts?.strict - this.source = hash.trim() - - // set default values so that we make V8 happy to - // always see a familiar object template. - this.digest = '' - this.algorithm = '' - this.options = [] - - // 3.1. Integrity metadata (called "Hash" by ssri) - // https://w3c.github.io/webappsec-subresource-integrity/#integrity-metadata-description - const match = this.source.match( - strict - ? STRICT_SRI_REGEX - : SRI_REGEX - ) - if (!match) { - return - } - if (strict && !SPEC_ALGORITHMS.includes(match[1])) { - return - } - this.algorithm = match[1] - this.digest = match[2] - - const rawOpts = match[3] - if (rawOpts) { - this.options = rawOpts.slice(1).split('?') - } - } - - hexDigest () { - return this.digest && Buffer.from(this.digest, 'base64').toString('hex') - } - - toJSON () { - return this.toString() - } - - match (integrity, opts) { - const other = parse(integrity, opts) - if (!other) { - return false - } - if (other.isIntegrity) { - const algo = other.pickAlgorithm(opts, [this.algorithm]) - - if (!algo) { - return false - } - - const foundHash = other[algo].find(hash => hash.digest === this.digest) - - if (foundHash) { - return foundHash - } - - return false - } - return other.digest === this.digest ? other : false - } - - toString (opts) { - if (opts?.strict) { - // Strict mode enforces the standard as close to the foot of the - // letter as it can. - if (!( - // The spec has very restricted productions for algorithms. - // https://www.w3.org/TR/CSP2/#source-list-syntax - SPEC_ALGORITHMS.includes(this.algorithm) && - // Usually, if someone insists on using a "different" base64, we - // leave it as-is, since there's multiple standards, and the - // specified is not a URL-safe variant. - // https://www.w3.org/TR/CSP2/#base64_value - this.digest.match(BASE64_REGEX) && - // Option syntax is strictly visual chars. - // https://w3c.github.io/webappsec-subresource-integrity/#grammardef-option-expression - // https://tools.ietf.org/html/rfc5234#appendix-B.1 - this.options.every(opt => opt.match(VCHAR_REGEX)) - )) { - return '' - } - } - return `${this.algorithm}-${this.digest}${getOptString(this.options)}` - } -} - -function integrityHashToString (toString, sep, opts, hashes) { - const toStringIsNotEmpty = toString !== '' - - let shouldAddFirstSep = false - let complement = '' - - const lastIndex = hashes.length - 1 - - for (let i = 0; i < lastIndex; i++) { - const hashString = Hash.prototype.toString.call(hashes[i], opts) - - if (hashString) { - shouldAddFirstSep = true - - complement += hashString - complement += sep - } - } - - const finalHashString = Hash.prototype.toString.call(hashes[lastIndex], opts) - - if (finalHashString) { - shouldAddFirstSep = true - complement += finalHashString - } - - if (toStringIsNotEmpty && shouldAddFirstSep) { - return toString + sep + complement - } - - return toString + complement -} - -class Integrity { - get isIntegrity () { - return true - } - - toJSON () { - return this.toString() - } - - isEmpty () { - return Object.keys(this).length === 0 - } - - toString (opts) { - let sep = opts?.sep || ' ' - let toString = '' - - if (opts?.strict) { - // Entries must be separated by whitespace, according to spec. - sep = sep.replace(/\S+/g, ' ') - - for (const hash of SPEC_ALGORITHMS) { - if (this[hash]) { - toString = integrityHashToString(toString, sep, opts, this[hash]) - } - } - } else { - for (const hash of Object.keys(this)) { - toString = integrityHashToString(toString, sep, opts, this[hash]) - } - } - - return toString - } - - concat (integrity, opts) { - const other = typeof integrity === 'string' - ? integrity - : stringify(integrity, opts) - return parse(`${this.toString(opts)} ${other}`, opts) - } - - hexDigest () { - return parse(this, { single: true }).hexDigest() - } - - // add additional hashes to an integrity value, but prevent - // *changing* an existing integrity hash. - merge (integrity, opts) { - const other = parse(integrity, opts) - for (const algo in other) { - if (this[algo]) { - if (!this[algo].find(hash => - other[algo].find(otherhash => - hash.digest === otherhash.digest))) { - throw new Error('hashes do not match, cannot update integrity') - } - } else { - this[algo] = other[algo] - } - } - } - - match (integrity, opts) { - const other = parse(integrity, opts) - if (!other) { - return false - } - const algo = other.pickAlgorithm(opts, Object.keys(this)) - return ( - !!algo && - this[algo] && - other[algo] && - this[algo].find(hash => - other[algo].find(otherhash => - hash.digest === otherhash.digest - ) - ) - ) || false - } - - // Pick the highest priority algorithm present, optionally also limited to a - // set of hashes found in another integrity. When limiting it may return - // nothing. - pickAlgorithm (opts, hashes) { - const pickAlgorithm = opts?.pickAlgorithm || getPrioritizedHash - const keys = Object.keys(this).filter(k => { - if (hashes?.length) { - return hashes.includes(k) - } - return true - }) - if (keys.length) { - return keys.reduce((acc, algo) => pickAlgorithm(acc, algo) || acc) - } - // no intersection between this and hashes, - return null - } -} - -module.exports.parse = parse -function parse (sri, opts) { - if (!sri) { - return null - } - if (typeof sri === 'string') { - return _parse(sri, opts) - } else if (sri.algorithm && sri.digest) { - const fullSri = new Integrity() - fullSri[sri.algorithm] = [sri] - return _parse(stringify(fullSri, opts), opts) - } else { - return _parse(stringify(sri, opts), opts) - } -} - -function _parse (integrity, opts) { - // 3.4.3. Parse metadata - // https://w3c.github.io/webappsec-subresource-integrity/#parse-metadata - if (opts?.single) { - return new Hash(integrity, opts) - } - const hashes = integrity.trim().split(/\s+/).reduce((acc, string) => { - const hash = new Hash(string, opts) - if (hash.algorithm && hash.digest) { - const algo = hash.algorithm - if (!acc[algo]) { - acc[algo] = [] - } - acc[algo].push(hash) - } - return acc - }, new Integrity()) - return hashes.isEmpty() ? null : hashes -} - -module.exports.stringify = stringify -function stringify (obj, opts) { - if (obj.algorithm && obj.digest) { - return Hash.prototype.toString.call(obj, opts) - } else if (typeof obj === 'string') { - return stringify(parse(obj, opts), opts) - } else { - return Integrity.prototype.toString.call(obj, opts) - } -} - -module.exports.fromHex = fromHex -function fromHex (hexDigest, algorithm, opts) { - const optString = getOptString(opts?.options) - return parse( - `${algorithm}-${ - Buffer.from(hexDigest, 'hex').toString('base64') - }${optString}`, opts - ) -} - -module.exports.fromData = fromData -function fromData (data, opts) { - const algorithms = opts?.algorithms || [...DEFAULT_ALGORITHMS] - const optString = getOptString(opts?.options) - return algorithms.reduce((acc, algo) => { - const digest = crypto.createHash(algo).update(data).digest('base64') - const hash = new Hash( - `${algo}-${digest}${optString}`, - opts - ) - /* istanbul ignore else - it would be VERY strange if the string we - * just calculated with an algo did not have an algo or digest. - */ - if (hash.algorithm && hash.digest) { - const hashAlgo = hash.algorithm - if (!acc[hashAlgo]) { - acc[hashAlgo] = [] - } - acc[hashAlgo].push(hash) - } - return acc - }, new Integrity()) -} - -module.exports.fromStream = fromStream -function fromStream (stream, opts) { - const istream = integrityStream(opts) - return new Promise((resolve, reject) => { - stream.pipe(istream) - stream.on('error', reject) - istream.on('error', reject) - let sri - istream.on('integrity', s => { - sri = s - }) - istream.on('end', () => resolve(sri)) - istream.resume() - }) -} - -module.exports.checkData = checkData -function checkData (data, sri, opts) { - sri = parse(sri, opts) - if (!sri || !Object.keys(sri).length) { - if (opts?.error) { - throw Object.assign( - new Error('No valid integrity hashes to check against'), { - code: 'EINTEGRITY', - } - ) - } else { - return false - } - } - const algorithm = sri.pickAlgorithm(opts) - const digest = crypto.createHash(algorithm).update(data).digest('base64') - const newSri = parse({ algorithm, digest }) - const match = newSri.match(sri, opts) - opts = opts || {} - if (match || !(opts.error)) { - return match - } else if (typeof opts.size === 'number' && (data.length !== opts.size)) { - /* eslint-disable-next-line max-len */ - const err = new Error(`data size mismatch when checking ${sri}.\n Wanted: ${opts.size}\n Found: ${data.length}`) - err.code = 'EBADSIZE' - err.found = data.length - err.expected = opts.size - err.sri = sri - throw err - } else { - /* eslint-disable-next-line max-len */ - const err = new Error(`Integrity checksum failed when using ${algorithm}: Wanted ${sri}, but got ${newSri}. (${data.length} bytes)`) - err.code = 'EINTEGRITY' - err.found = newSri - err.expected = sri - err.algorithm = algorithm - err.sri = sri - throw err - } -} - -module.exports.checkStream = checkStream -function checkStream (stream, sri, opts) { - opts = opts || Object.create(null) - opts.integrity = sri - sri = parse(sri, opts) - if (!sri || !Object.keys(sri).length) { - return Promise.reject(Object.assign( - new Error('No valid integrity hashes to check against'), { - code: 'EINTEGRITY', - } - )) - } - const checker = integrityStream(opts) - return new Promise((resolve, reject) => { - stream.pipe(checker) - stream.on('error', reject) - checker.on('error', reject) - let verified - checker.on('verified', s => { - verified = s - }) - checker.on('end', () => resolve(verified)) - checker.resume() - }) -} - -module.exports.integrityStream = integrityStream -function integrityStream (opts = Object.create(null)) { - return new IntegrityStream(opts) -} - -module.exports.create = createIntegrity -function createIntegrity (opts) { - const algorithms = opts?.algorithms || [...DEFAULT_ALGORITHMS] - const optString = getOptString(opts?.options) - - const hashes = algorithms.map(crypto.createHash) - - return { - update: function (chunk, enc) { - hashes.forEach(h => h.update(chunk, enc)) - return this - }, - digest: function () { - const integrity = algorithms.reduce((acc, algo) => { - const digest = hashes.shift().digest('base64') - const hash = new Hash( - `${algo}-${digest}${optString}`, - opts - ) - /* istanbul ignore else - it would be VERY strange if the hash we - * just calculated with an algo did not have an algo or digest. - */ - if (hash.algorithm && hash.digest) { - const hashAlgo = hash.algorithm - if (!acc[hashAlgo]) { - acc[hashAlgo] = [] - } - acc[hashAlgo].push(hash) - } - return acc - }, new Integrity()) - - return integrity - }, - } -} - -const NODE_HASHES = crypto.getHashes() - -// This is a Best Effort™ at a reasonable priority for hash algos -const DEFAULT_PRIORITY = [ - 'md5', 'whirlpool', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512', - // TODO - it's unclear _which_ of these Node will actually use as its name - // for the algorithm, so we guesswork it based on the OpenSSL names. - 'sha3', - 'sha3-256', 'sha3-384', 'sha3-512', - 'sha3_256', 'sha3_384', 'sha3_512', -].filter(algo => NODE_HASHES.includes(algo)) - -function getPrioritizedHash (algo1, algo2) { - /* eslint-disable-next-line max-len */ - return DEFAULT_PRIORITY.indexOf(algo1.toLowerCase()) >= DEFAULT_PRIORITY.indexOf(algo2.toLowerCase()) - ? algo1 - : algo2 -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/ssri/package.json b/deps/npm/node_modules/@sigstore/sign/node_modules/ssri/package.json deleted file mode 100644 index 28395414e4643c..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/ssri/package.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "name": "ssri", - "version": "10.0.6", - "description": "Standard Subresource Integrity library -- parses, serializes, generates, and verifies integrity metadata according to the SRI spec.", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "scripts": { - "prerelease": "npm t", - "postrelease": "npm publish", - "posttest": "npm run lint", - "test": "tap", - "coverage": "tap", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap" - }, - "tap": { - "check-coverage": true, - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/ssri.git" - }, - "keywords": [ - "w3c", - "web", - "security", - "integrity", - "checksum", - "hashing", - "subresource integrity", - "sri", - "sri hash", - "sri string", - "sri generator", - "html" - ], - "author": "GitHub Inc.", - "license": "ISC", - "dependencies": { - "minipass": "^7.0.3" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "tap": "^16.0.1" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.22.0", - "publish": "true" - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/LICENSE b/deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/LICENSE deleted file mode 100644 index 69619c125ea7ef..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/LICENSE +++ /dev/null @@ -1,5 +0,0 @@ -Copyright npm, Inc - -Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/lib/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/lib/index.js deleted file mode 100644 index d067d2e709809a..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/lib/index.js +++ /dev/null @@ -1,7 +0,0 @@ -var path = require('path') - -var uniqueSlug = require('unique-slug') - -module.exports = function (filepath, prefix, uniq) { - return path.join(filepath, (prefix ? prefix + '-' : '') + uniqueSlug(uniq)) -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/package.json b/deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/package.json deleted file mode 100644 index b2fbf0666489a6..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-filename/package.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "name": "unique-filename", - "version": "3.0.0", - "description": "Generate a unique filename for use in temporary directories or caches.", - "main": "lib/index.js", - "scripts": { - "test": "tap", - "lint": "eslint \"**/*.js\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap", - "posttest": "npm run lint" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/unique-filename.git" - }, - "keywords": [], - "author": "GitHub Inc.", - "license": "ISC", - "bugs": { - "url": "https://github.com/iarna/unique-filename/issues" - }, - "homepage": "https://github.com/iarna/unique-filename", - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.5.1", - "tap": "^16.3.0" - }, - "dependencies": { - "unique-slug": "^4.0.0" - }, - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.5.1" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/LICENSE b/deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/LICENSE deleted file mode 100644 index 7953647e7760b8..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -The ISC License - -Copyright npm, Inc - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/lib/index.js b/deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/lib/index.js deleted file mode 100644 index 1bac84d95d7307..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/lib/index.js +++ /dev/null @@ -1,11 +0,0 @@ -'use strict' -var MurmurHash3 = require('imurmurhash') - -module.exports = function (uniq) { - if (uniq) { - var hash = new MurmurHash3(uniq) - return ('00000000' + hash.result().toString(16)).slice(-8) - } else { - return (Math.random().toString(16) + '0000000').slice(2, 10) - } -} diff --git a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/package.json b/deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/package.json deleted file mode 100644 index 33732cdbb42859..00000000000000 --- a/deps/npm/node_modules/@sigstore/sign/node_modules/unique-slug/package.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "name": "unique-slug", - "version": "4.0.0", - "description": "Generate a unique character string suitible for use in files and URLs.", - "main": "lib/index.js", - "scripts": { - "test": "tap", - "lint": "eslint \"**/*.js\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap", - "posttest": "npm run lint" - }, - "keywords": [], - "author": "GitHub Inc.", - "license": "ISC", - "devDependencies": { - "@npmcli/eslint-config": "^3.1.0", - "@npmcli/template-oss": "4.5.1", - "tap": "^16.3.0" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/unique-slug.git" - }, - "dependencies": { - "imurmurhash": "^0.1.4" - }, - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.5.1" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/@sigstore/tuf/dist/appdata.js b/deps/npm/node_modules/@sigstore/tuf/dist/appdata.js index c9a8ee92b531eb..06a8143e70da2f 100644 --- a/deps/npm/node_modules/@sigstore/tuf/dist/appdata.js +++ b/deps/npm/node_modules/@sigstore/tuf/dist/appdata.js @@ -3,7 +3,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); -exports.appDataPath = void 0; +exports.appDataPath = appDataPath; /* Copyright 2023 The Sigstore Authors. @@ -41,4 +41,3 @@ function appDataPath(name) { } } } -exports.appDataPath = appDataPath; diff --git a/deps/npm/node_modules/@sigstore/tuf/dist/client.js b/deps/npm/node_modules/@sigstore/tuf/dist/client.js index 2019c1fd30f886..328f49e40dbbd7 100644 --- a/deps/npm/node_modules/@sigstore/tuf/dist/client.js +++ b/deps/npm/node_modules/@sigstore/tuf/dist/client.js @@ -79,7 +79,6 @@ function seedCache({ cachePath, mirrorURL, tufRootPath, forceInit, }) { fs_1.default.copyFileSync(tufRootPath, cachedRootPath); } else { - /* eslint-disable @typescript-eslint/no-var-requires */ const seeds = require('../seeds.json'); const repoSeed = seeds[mirrorURL]; if (!repoSeed) { diff --git a/deps/npm/node_modules/@sigstore/tuf/dist/index.js b/deps/npm/node_modules/@sigstore/tuf/dist/index.js index 678c81d45d21ed..2af5de93ec5d2f 100644 --- a/deps/npm/node_modules/@sigstore/tuf/dist/index.js +++ b/deps/npm/node_modules/@sigstore/tuf/dist/index.js @@ -1,6 +1,8 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.TUFError = exports.initTUF = exports.getTrustedRoot = exports.DEFAULT_MIRROR_URL = void 0; +exports.TUFError = exports.DEFAULT_MIRROR_URL = void 0; +exports.getTrustedRoot = getTrustedRoot; +exports.initTUF = initTUF; /* Copyright 2023 The Sigstore Authors. @@ -31,14 +33,12 @@ options = {}) { const trustedRoot = await client.getTarget(TRUSTED_ROOT_TARGET); return protobuf_specs_1.TrustedRoot.fromJSON(JSON.parse(trustedRoot)); } -exports.getTrustedRoot = getTrustedRoot; async function initTUF( /* istanbul ignore next */ options = {}) { const client = createClient(options); return client.refresh().then(() => client); } -exports.initTUF = initTUF; // Create a TUF client with default options function createClient(options) { /* istanbul ignore next */ diff --git a/deps/npm/node_modules/@sigstore/tuf/dist/target.js b/deps/npm/node_modules/@sigstore/tuf/dist/target.js index 29eaf99a7e721c..5c6675bdfbf5fe 100644 --- a/deps/npm/node_modules/@sigstore/tuf/dist/target.js +++ b/deps/npm/node_modules/@sigstore/tuf/dist/target.js @@ -3,7 +3,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); -exports.readTarget = void 0; +exports.readTarget = readTarget; /* Copyright 2023 The Sigstore Authors. @@ -39,7 +39,6 @@ async function readTarget(tuf, targetPath) { }); }); } -exports.readTarget = readTarget; // Returns the local path to the specified target. If the target is not yet // cached locally, the provided TUF Updater will be used to download and // cache the target. diff --git a/deps/npm/node_modules/@sigstore/tuf/package.json b/deps/npm/node_modules/@sigstore/tuf/package.json index b7fd34ac9674eb..808689dfddf92f 100644 --- a/deps/npm/node_modules/@sigstore/tuf/package.json +++ b/deps/npm/node_modules/@sigstore/tuf/package.json @@ -1,6 +1,6 @@ { "name": "@sigstore/tuf", - "version": "2.3.4", + "version": "3.0.0", "description": "Client for the Sigstore TUF repository", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -28,14 +28,14 @@ }, "devDependencies": { "@sigstore/jest": "^0.0.0", - "@tufjs/repo-mock": "^2.0.1", + "@tufjs/repo-mock": "^3.0.1", "@types/make-fetch-happen": "^10.0.4" }, "dependencies": { "@sigstore/protobuf-specs": "^0.3.2", - "tuf-js": "^2.2.1" + "tuf-js": "^3.0.1" }, "engines": { - "node": "^16.14.0 || >=18.0.0" + "node": "^18.17.0 || >=20.5.0" } } diff --git a/deps/npm/node_modules/@sigstore/tuf/seeds.json b/deps/npm/node_modules/@sigstore/tuf/seeds.json index e8d97d5fa7a672..d1d3c6b5c46040 100644 --- a/deps/npm/node_modules/@sigstore/tuf/seeds.json +++ b/deps/npm/node_modules/@sigstore/tuf/seeds.json @@ -1 +1 @@ -{"https://tuf-repo-cdn.sigstore.dev":{"root.json":"{
	"signed": {
		"_type": "root",
		"spec_version": "1.0",
		"version": 9,
		"expires": "2024-09-12T06:53:10Z",
		"keys": {
			"1e1d65ce98b10addad4764febf7dda2d0436b3d3a3893579c0dddaea20e54849": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEzBzVOmHCPojMVLSI364WiiV8NPrD\n6IgRxVliskz/v+y3JER5mcVGcONliDcWMC5J2lfHmjPNPhb4H7xm8LzfSA==\n-----END PUBLIC KEY-----\n"
				}
			},
			"230e212616274a4195cdc28e9fce782c20e6c720f1a811b40f98228376bdd3ac": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAELrWvNt94v4R085ELeeCMxHp7PldF\n0/T1GxukUh2ODuggLGJE0pc1e8CSBf6CS91Fwo9FUOuRsjBUld+VqSyCdQ==\n-----END PUBLIC KEY-----\n"
				}
			},
			"3c344aa068fd4cc4e87dc50b612c02431fbc771e95003993683a2b0bf260cf0e": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEy8XKsmhBYDI8Jc0GwzBxeKax0cm5\nSTKEU65HPFunUn41sT8pi0FjM4IkHz/YUmwmLUO0Wt7lxhj6BkLIK4qYAw==\n-----END PUBLIC KEY-----\n"
				}
			},
			"923bb39e60dd6fa2c31e6ea55473aa93b64dd4e53e16fbe42f6a207d3f97de2d": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEWRiGr5+j+3J5SsH+Ztr5nE2H2wO7\nBV+nO3s93gLca18qTOzHY1oWyAGDykMSsGTUBSt9D+An0KfKsD2mfSM42Q==\n-----END PUBLIC KEY-----\n"
				}
			},
			"e2f59acb9488519407e18cbfc9329510be03c04aca9929d2f0301343fec85523": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEinikSsAQmYkNeH5eYq/CnIzLaacO\nxlSaawQDOwqKy/tCqxq5xxPSJc21K4WIhs9GyOkKfzueY3GILzcMJZ4cWw==\n-----END PUBLIC KEY-----\n"
				}
			},
			"ec81669734e017996c5b85f3d02c3de1dd4637a152019fe1af125d2f9368b95e": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEXsz3SZXFb8jMV42j6pJlyjbjR8K\nN3Bwocexq6LMIb5qsWKOQvLN16NUefLc4HswOoumRsVVaajSpQS6fobkRw==\n-----END PUBLIC KEY-----\n"
				}
			},
			"fdfa83a07b5a83589b87ded41f77f39d232ad91f7cce52868dacd06ba089849f": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE0ghrh92Lw1Yr3idGV5WqCtMDB8Cx\n+D8hdC4w2ZLNIplVRoVGLskYa3gheMyOjiJ8kPi15aQ2//7P+oj7UvJPGw==\n-----END PUBLIC KEY-----\n"
				}
			}
		},
		"roles": {
			"root": {
				"keyids": [
					"3c344aa068fd4cc4e87dc50b612c02431fbc771e95003993683a2b0bf260cf0e",
					"ec81669734e017996c5b85f3d02c3de1dd4637a152019fe1af125d2f9368b95e",
					"1e1d65ce98b10addad4764febf7dda2d0436b3d3a3893579c0dddaea20e54849",
					"e2f59acb9488519407e18cbfc9329510be03c04aca9929d2f0301343fec85523",
					"fdfa83a07b5a83589b87ded41f77f39d232ad91f7cce52868dacd06ba089849f"
				],
				"threshold": 3
			},
			"snapshot": {
				"keyids": [
					"230e212616274a4195cdc28e9fce782c20e6c720f1a811b40f98228376bdd3ac"
				],
				"threshold": 1
			},
			"targets": {
				"keyids": [
					"3c344aa068fd4cc4e87dc50b612c02431fbc771e95003993683a2b0bf260cf0e",
					"ec81669734e017996c5b85f3d02c3de1dd4637a152019fe1af125d2f9368b95e",
					"1e1d65ce98b10addad4764febf7dda2d0436b3d3a3893579c0dddaea20e54849",
					"e2f59acb9488519407e18cbfc9329510be03c04aca9929d2f0301343fec85523",
					"fdfa83a07b5a83589b87ded41f77f39d232ad91f7cce52868dacd06ba089849f"
				],
				"threshold": 3
			},
			"timestamp": {
				"keyids": [
					"923bb39e60dd6fa2c31e6ea55473aa93b64dd4e53e16fbe42f6a207d3f97de2d"
				],
				"threshold": 1
			}
		},
		"consistent_snapshot": true
	},
	"signatures": [
		{
			"keyid": "ff51e17fcf253119b7033f6f57512631da4a0969442afcf9fc8b141c7f2be99c",
			"sig": "30450221008b78f894c3cfed3bd486379c4e0e0dfb3e7dd8cbc4d5598d2818eea1ba3c7550022029d3d06e89d04d37849985dc46c0e10dc5b1fc68dc70af1ec9910303a1f3ee2f"
		},
		{
			"keyid": "25a0eb450fd3ee2bd79218c963dce3f1cc6118badf251bf149f0bd07d5cabe99",
			"sig": "30450221009e6b90b935e09b837a90d4402eaa27d5ea26eb7891948ba0ed7090841248f436022003dc2251c4d4a7999b91e9ad0868765ae09ac7269279f2a7899bafef7a2d9260"
		},
		{
			"keyid": "f5312f542c21273d9485a49394386c4575804770667f2ddb59b3bf0669fddd2f",
			"sig": "30440220099e907dcf90b7b6e109fd1d6e442006fccbb48894aaaff47ab824b03fb35d0d02202aa0a06c21a4233f37900a48bc8777d3b47f59e3a38616ce631a04df57f96736"
		},
		{
			"keyid": "3c344aa068fd4cc4e87dc50b612c02431fbc771e95003993683a2b0bf260cf0e",
			"sig": "30450221008b78f894c3cfed3bd486379c4e0e0dfb3e7dd8cbc4d5598d2818eea1ba3c7550022029d3d06e89d04d37849985dc46c0e10dc5b1fc68dc70af1ec9910303a1f3ee2f"
		},
		{
			"keyid": "ec81669734e017996c5b85f3d02c3de1dd4637a152019fe1af125d2f9368b95e",
			"sig": "30450221009e6b90b935e09b837a90d4402eaa27d5ea26eb7891948ba0ed7090841248f436022003dc2251c4d4a7999b91e9ad0868765ae09ac7269279f2a7899bafef7a2d9260"
		},
		{
			"keyid": "e2f59acb9488519407e18cbfc9329510be03c04aca9929d2f0301343fec85523",
			"sig": "304502200e5613b901e0f3e08eceabddc73f98b50ddf892e998d0b369c6e3d451ac48875022100940cf92d1f43ee2e5cdbb22572bb52925ed3863a688f7ffdd4bd2e2e56f028b3"
		},
		{
			"keyid": "2e61cd0cbf4a8f45809bda9f7f78c0d33ad11842ff94ae340873e2664dc843de",
			"sig": "304502202cff44f2215d7a47b28b8f5f580c2cfbbd1bfcfcbbe78de323045b2c0badc5e9022100c743949eb3f4ea5a4b9ae27ac6eddea1f0ff9bfd004f8a9a9d18c6e4142b6e75"
		},
		{
			"keyid": "1e1d65ce98b10addad4764febf7dda2d0436b3d3a3893579c0dddaea20e54849",
			"sig": "30440220099e907dcf90b7b6e109fd1d6e442006fccbb48894aaaff47ab824b03fb35d0d02202aa0a06c21a4233f37900a48bc8777d3b47f59e3a38616ce631a04df57f96736"
		},
		{
			"keyid": "fdfa83a07b5a83589b87ded41f77f39d232ad91f7cce52868dacd06ba089849f",
			"sig": "304502202cff44f2215d7a47b28b8f5f580c2cfbbd1bfcfcbbe78de323045b2c0badc5e9022100c743949eb3f4ea5a4b9ae27ac6eddea1f0ff9bfd004f8a9a9d18c6e4142b6e75"
		},
		{
			"keyid": "7f7513b25429a64473e10ce3ad2f3da372bbdd14b65d07bbaf547e7c8bbbe62b",
			"sig": "304502200e5613b901e0f3e08eceabddc73f98b50ddf892e998d0b369c6e3d451ac48875022100940cf92d1f43ee2e5cdbb22572bb52925ed3863a688f7ffdd4bd2e2e56f028b3"
		}
	]
}","targets":{"trusted_root.json":"{
  "mediaType": "application/vnd.dev.sigstore.trustedroot+json;version=0.1",
  "tlogs": [
    {
      "baseUrl": "https://rekor.sigstore.dev",
      "hashAlgorithm": "SHA2_256",
      "publicKey": {
        "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE2G2Y+2tabdTV5BcGiBIx0a9fAFwrkBbmLSGtks4L3qX6yYY0zufBnhC8Ur/iy55GhWP/9A/bY2LhC30M9+RYtw==",
        "keyDetails": "PKIX_ECDSA_P256_SHA_256",
        "validFor": {
          "start": "2021-01-12T11:53:27.000Z"
        }
      },
      "logId": {
        "keyId": "wNI9atQGlz+VWfO6LRygH4QUfY/8W4RFwiT5i5WRgB0="
      }
    }
  ],
  "certificateAuthorities": [
    {
      "subject": {
        "organization": "sigstore.dev",
        "commonName": "sigstore"
      },
      "uri": "https://fulcio.sigstore.dev",
      "certChain": {
        "certificates": [
          {
            "rawBytes": "MIIB+DCCAX6gAwIBAgITNVkDZoCiofPDsy7dfm6geLbuhzAKBggqhkjOPQQDAzAqMRUwEwYDVQQKEwxzaWdzdG9yZS5kZXYxETAPBgNVBAMTCHNpZ3N0b3JlMB4XDTIxMDMwNzAzMjAyOVoXDTMxMDIyMzAzMjAyOVowKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTB2MBAGByqGSM49AgEGBSuBBAAiA2IABLSyA7Ii5k+pNO8ZEWY0ylemWDowOkNa3kL+GZE5Z5GWehL9/A9bRNA3RbrsZ5i0JcastaRL7Sp5fp/jD5dxqc/UdTVnlvS16an+2Yfswe/QuLolRUCrcOE2+2iA5+tzd6NmMGQwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFMjFHQBBmiQpMlEk6w2uSu1KBtPsMB8GA1UdIwQYMBaAFMjFHQBBmiQpMlEk6w2uSu1KBtPsMAoGCCqGSM49BAMDA2gAMGUCMH8liWJfMui6vXXBhjDgY4MwslmN/TJxVe/83WrFomwmNf056y1X48F9c4m3a3ozXAIxAKjRay5/aj/jsKKGIkmQatjI8uupHr/+CxFvaJWmpYqNkLDGRU+9orzh5hI2RrcuaQ=="
          }
        ]
      },
      "validFor": {
        "start": "2021-03-07T03:20:29.000Z",
        "end": "2022-12-31T23:59:59.999Z"
      }
    },
    {
      "subject": {
        "organization": "sigstore.dev",
        "commonName": "sigstore"
      },
      "uri": "https://fulcio.sigstore.dev",
      "certChain": {
        "certificates": [
          {
            "rawBytes": "MIICGjCCAaGgAwIBAgIUALnViVfnU0brJasmRkHrn/UnfaQwCgYIKoZIzj0EAwMwKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0yMjA0MTMyMDA2MTVaFw0zMTEwMDUxMzU2NThaMDcxFTATBgNVBAoTDHNpZ3N0b3JlLmRldjEeMBwGA1UEAxMVc2lnc3RvcmUtaW50ZXJtZWRpYXRlMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE8RVS/ysH+NOvuDZyPIZtilgUF9NlarYpAd9HP1vBBH1U5CV77LSS7s0ZiH4nE7Hv7ptS6LvvR/STk798LVgMzLlJ4HeIfF3tHSaexLcYpSASr1kS0N/RgBJz/9jWCiXno3sweTAOBgNVHQ8BAf8EBAMCAQYwEwYDVR0lBAwwCgYIKwYBBQUHAwMwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQU39Ppz1YkEZb5qNjpKFWixi4YZD8wHwYDVR0jBBgwFoAUWMAeX5FFpWapesyQoZMi0CrFxfowCgYIKoZIzj0EAwMDZwAwZAIwPCsQK4DYiZYDPIaDi5HFKnfxXx6ASSVmERfsynYBiX2X6SJRnZU84/9DZdnFvvxmAjBOt6QpBlc4J/0DxvkTCqpclvziL6BCCPnjdlIB3Pu3BxsPmygUY7Ii2zbdCdliiow="
          },
          {
            "rawBytes": "MIIB9zCCAXygAwIBAgIUALZNAPFdxHPwjeDloDwyYChAO/4wCgYIKoZIzj0EAwMwKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0yMTEwMDcxMzU2NTlaFw0zMTEwMDUxMzU2NThaMCoxFTATBgNVBAoTDHNpZ3N0b3JlLmRldjERMA8GA1UEAxMIc2lnc3RvcmUwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAT7XeFT4rb3PQGwS4IajtLk3/OlnpgangaBclYpsYBr5i+4ynB07ceb3LP0OIOZdxexX69c5iVuyJRQ+Hz05yi+UF3uBWAlHpiS5sh0+H2GHE7SXrk1EC5m1Tr19L9gg92jYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRYwB5fkUWlZql6zJChkyLQKsXF+jAfBgNVHSMEGDAWgBRYwB5fkUWlZql6zJChkyLQKsXF+jAKBggqhkjOPQQDAwNpADBmAjEAj1nHeXZp+13NWBNa+EDsDP8G1WWg1tCMWP/WHPqpaVo0jhsweNFZgSs0eE7wYI4qAjEA2WB9ot98sIkoF3vZYdd3/VtWB5b9TNMea7Ix/stJ5TfcLLeABLE4BNJOsQ4vnBHJ"
          }
        ]
      },
      "validFor": {
        "start": "2022-04-13T20:06:15.000Z"
      }
    }
  ],
  "ctlogs": [
    {
      "baseUrl": "https://ctfe.sigstore.dev/test",
      "hashAlgorithm": "SHA2_256",
      "publicKey": {
        "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEbfwR+RJudXscgRBRpKX1XFDy3PyudDxz/SfnRi1fT8ekpfBd2O1uoz7jr3Z8nKzxA69EUQ+eFCFI3zeubPWU7w==",
        "keyDetails": "PKIX_ECDSA_P256_SHA_256",
        "validFor": {
          "start": "2021-03-14T00:00:00.000Z",
          "end": "2022-10-31T23:59:59.999Z"
        }
      },
      "logId": {
        "keyId": "CGCS8ChS/2hF0dFrJ4ScRWcYrBY9wzjSbea8IgY2b3I="
      }
    },
    {
      "baseUrl": "https://ctfe.sigstore.dev/2022",
      "hashAlgorithm": "SHA2_256",
      "publicKey": {
        "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEiPSlFi0CmFTfEjCUqF9HuCEcYXNKAaYalIJmBZ8yyezPjTqhxrKBpMnaocVtLJBI1eM3uXnQzQGAJdJ4gs9Fyw==",
        "keyDetails": "PKIX_ECDSA_P256_SHA_256",
        "validFor": {
          "start": "2022-10-20T00:00:00.000Z"
        }
      },
      "logId": {
        "keyId": "3T0wasbHETJjGR4cmWc3AqJKXrjePK3/h4pygC8p7o4="
      }
    }
  ],
  "timestampAuthorities": [
    {
      "subject": {
        "organization": "GitHub, Inc.",
        "commonName": "Internal Services Root"
      },
      "certChain": {
        "certificates": [
          {
            "rawBytes": "MIIB3DCCAWKgAwIBAgIUchkNsH36Xa04b1LqIc+qr9DVecMwCgYIKoZIzj0EAwMwMjEVMBMGA1UEChMMR2l0SHViLCBJbmMuMRkwFwYDVQQDExBUU0EgaW50ZXJtZWRpYXRlMB4XDTIzMDQxNDAwMDAwMFoXDTI0MDQxMzAwMDAwMFowMjEVMBMGA1UEChMMR2l0SHViLCBJbmMuMRkwFwYDVQQDExBUU0EgVGltZXN0YW1waW5nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEUD5ZNbSqYMd6r8qpOOEX9ibGnZT9GsuXOhr/f8U9FJugBGExKYp40OULS0erjZW7xV9xV52NnJf5OeDq4e5ZKqNWMFQwDgYDVR0PAQH/BAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUFBwMIMAwGA1UdEwEB/wQCMAAwHwYDVR0jBBgwFoAUaW1RudOgVt0leqY0WKYbuPr47wAwCgYIKoZIzj0EAwMDaAAwZQIwbUH9HvD4ejCZJOWQnqAlkqURllvu9M8+VqLbiRK+zSfZCZwsiljRn8MQQRSkXEE5AjEAg+VxqtojfVfu8DhzzhCx9GKETbJHb19iV72mMKUbDAFmzZ6bQ8b54Zb8tidy5aWe"
          },
          {
            "rawBytes": "MIICEDCCAZWgAwIBAgIUX8ZO5QXP7vN4dMQ5e9sU3nub8OgwCgYIKoZIzj0EAwMwODEVMBMGA1UEChMMR2l0SHViLCBJbmMuMR8wHQYDVQQDExZJbnRlcm5hbCBTZXJ2aWNlcyBSb290MB4XDTIzMDQxNDAwMDAwMFoXDTI4MDQxMjAwMDAwMFowMjEVMBMGA1UEChMMR2l0SHViLCBJbmMuMRkwFwYDVQQDExBUU0EgaW50ZXJtZWRpYXRlMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEvMLY/dTVbvIJYANAuszEwJnQE1llftynyMKIMhh48HmqbVr5ygybzsLRLVKbBWOdZ21aeJz+gZiytZetqcyF9WlER5NEMf6JV7ZNojQpxHq4RHGoGSceQv/qvTiZxEDKo2YwZDAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQUaW1RudOgVt0leqY0WKYbuPr47wAwHwYDVR0jBBgwFoAU9NYYlobnAG4c0/qjxyH/lq/wz+QwCgYIKoZIzj0EAwMDaQAwZgIxAK1B185ygCrIYFlIs3GjswjnwSMG6LY8woLVdakKDZxVa8f8cqMs1DhcxJ0+09w95QIxAO+tBzZk7vjUJ9iJgD4R6ZWTxQWKqNm74jO99o+o9sv4FI/SZTZTFyMn0IJEHdNmyA=="
          },
          {
            "rawBytes": "MIIB9DCCAXqgAwIBAgIUa/JAkdUjK4JUwsqtaiRJGWhqLSowCgYIKoZIzj0EAwMwODEVMBMGA1UEChMMR2l0SHViLCBJbmMuMR8wHQYDVQQDExZJbnRlcm5hbCBTZXJ2aWNlcyBSb290MB4XDTIzMDQxNDAwMDAwMFoXDTMzMDQxMTAwMDAwMFowODEVMBMGA1UEChMMR2l0SHViLCBJbmMuMR8wHQYDVQQDExZJbnRlcm5hbCBTZXJ2aWNlcyBSb290MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEf9jFAXxz4kx68AHRMOkFBhflDcMTvzaXz4x/FCcXjJ/1qEKon/qPIGnaURskDtyNbNDOpeJTDDFqt48iMPrnzpx6IZwqemfUJN4xBEZfza+pYt/iyod+9tZr20RRWSv/o0UwQzAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBAjAdBgNVHQ4EFgQU9NYYlobnAG4c0/qjxyH/lq/wz+QwCgYIKoZIzj0EAwMDaAAwZQIxALZLZ8BgRXzKxLMMN9VIlO+e4hrBnNBgF7tz7Hnrowv2NetZErIACKFymBlvWDvtMAIwZO+ki6ssQ1bsZo98O8mEAf2NZ7iiCgDDU0Vwjeco6zyeh0zBTs9/7gV6AHNQ53xD"
          }
        ]
      },
      "validFor": {
        "start": "2023-04-14T00:00:00.000Z"
      }
    }
  ]
}
","registry.npmjs.org%2Fkeys.json":"ewogICAgImtleXMiOiBbCiAgICAgICAgewogICAgICAgICAgICAia2V5SWQiOiAiU0hBMjU2OmpsM2J3c3d1ODBQampva0NnaDBvMnc1YzJVNExoUUFFNTdnajljejFrekEiLAogICAgICAgICAgICAia2V5VXNhZ2UiOiAibnBtOnNpZ25hdHVyZXMiLAogICAgICAgICAgICAicHVibGljS2V5IjogewogICAgICAgICAgICAgICAgInJhd0J5dGVzIjogIk1Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRTFPbGIzek1BRkZ4WEtIaUlrUU81Y0ozWWhsNWk2VVBwK0lodXRlQkpidUhjQTVVb2dLbzBFV3RsV3dXNktTYUtvVE5FWUw3SmxDUWlWbmtoQmt0VWdnPT0iLAogICAgICAgICAgICAgICAgImtleURldGFpbHMiOiAiUEtJWF9FQ0RTQV9QMjU2X1NIQV8yNTYiLAogICAgICAgICAgICAgICAgInZhbGlkRm9yIjogewogICAgICAgICAgICAgICAgICAgICJzdGFydCI6ICIxOTk5LTAxLTAxVDAwOjAwOjAwLjAwMFoiCiAgICAgICAgICAgICAgICB9CiAgICAgICAgICAgIH0KICAgICAgICB9LAogICAgICAgIHsKICAgICAgICAgICAgImtleUlkIjogIlNIQTI1NjpqbDNid3N3dTgwUGpqb2tDZ2gwbzJ3NWMyVTRMaFFBRTU3Z2o5Y3oxa3pBIiwKICAgICAgICAgICAgImtleVVzYWdlIjogIm5wbTphdHRlc3RhdGlvbnMiLAogICAgICAgICAgICAicHVibGljS2V5IjogewogICAgICAgICAgICAgICAgInJhd0J5dGVzIjogIk1Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRTFPbGIzek1BRkZ4WEtIaUlrUU81Y0ozWWhsNWk2VVBwK0lodXRlQkpidUhjQTVVb2dLbzBFV3RsV3dXNktTYUtvVE5FWUw3SmxDUWlWbmtoQmt0VWdnPT0iLAogICAgICAgICAgICAgICAgImtleURldGFpbHMiOiAiUEtJWF9FQ0RTQV9QMjU2X1NIQV8yNTYiLAogICAgICAgICAgICAgICAgInZhbGlkRm9yIjogewogICAgICAgICAgICAgICAgICAgICJzdGFydCI6ICIyMDIyLTEyLTAxVDAwOjAwOjAwLjAwMFoiCiAgICAgICAgICAgICAgICB9CiAgICAgICAgICAgIH0KICAgICAgICB9CiAgICBdCn0K"}}} +{"https://tuf-repo-cdn.sigstore.dev":{"root.json":"{
 "signatures": [
  {
   "keyid": "6f260089d5923daf20166ca657c543af618346ab971884a99962b01988bbe0c3",
   "sig": "30460221008ab1f6f17d4f9e6d7dcf1c88912b6b53cc10388644ae1f09bc37a082cd06003e022100e145ef4c7b782d4e8107b53437e669d0476892ce999903ae33d14448366996e7"
  },
  {
   "keyid": "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2",
   "sig": "3045022100c768b2f86da99569019c160a081da54ae36c34c0a3120d3cb69b53b7d113758e02204f671518f617b20d46537fae6c3b63bae8913f4f1962156105cc4f019ac35c6a"
  },
  {
   "keyid": "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06",
   "sig": "3045022100b4434e6995d368d23e74759acd0cb9013c83a5d3511f0f997ec54c456ae4350a022015b0e265d182d2b61dc74e155d98b3c3fbe564ba05286aa14c8df02c9b756516"
  },
  {
   "keyid": "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222",
   "sig": "304502210082c58411d989eb9f861410857d42381590ec9424dbdaa51e78ed13515431904e0220118185da6a6c2947131c17797e2bb7620ce26e5f301d1ceac5f2a7e58f9dcf2e"
  },
  {
   "keyid": "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70",
   "sig": "3046022100c78513854cae9c32eaa6b88e18912f48006c2757a258f917312caba75948eb9e022100d9e1b4ce0adfe9fd2e2148d7fa27a2f40ba1122bd69da7612d8d1776b013c91d"
  },
  {
   "keyid": "fdfa83a07b5a83589b87ded41f77f39d232ad91f7cce52868dacd06ba089849f",
   "sig": "3045022056483a2d5d9ea9cec6e11eadfb33c484b614298faca15acf1c431b11ed7f734c022100d0c1d726af92a87e4e66459ca5adf38a05b44e1f94318423f954bae8bca5bb2e"
  },
  {
   "keyid": "e2f59acb9488519407e18cbfc9329510be03c04aca9929d2f0301343fec85523",
   "sig": "3046022100d004de88024c32dc5653a9f4843cfc5215427048ad9600d2cf9c969e6edff3d2022100d9ebb798f5fc66af10899dece014a8628ccf3c5402cd4a4270207472f8f6e712"
  },
  {
   "keyid": "3c344aa068fd4cc4e87dc50b612c02431fbc771e95003993683a2b0bf260cf0e",
   "sig": "3046022100b7b09996c45ca2d4b05603e56baefa29718a0b71147cf8c6e66349baa61477df022100c4da80c717b4fa7bba0fd5c72da8a0499358b01358b2309f41d1456ea1e7e1d9"
  },
  {
   "keyid": "ec81669734e017996c5b85f3d02c3de1dd4637a152019fe1af125d2f9368b95e",
   "sig": "3046022100be9782c30744e411a82fa85b5138d601ce148bc19258aec64e7ec24478f38812022100caef63dcaf1a4b9a500d3bd0e3f164ec18f1b63d7a9460d9acab1066db0f016d"
  },
  {
   "keyid": "1e1d65ce98b10addad4764febf7dda2d0436b3d3a3893579c0dddaea20e54849",
   "sig": "30450220746ec3f8534ce55531d0d01ff64964ef440d1e7d2c4c142409b8e9769f1ada6f022100e3b929fcd93ea18feaa0825887a7210489879a66780c07a83f4bd46e2f09ab3b"
  }
 ],
 "signed": {
  "_type": "root",
  "consistent_snapshot": true,
  "expires": "2025-02-19T08:04:32Z",
  "keys": {
   "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06": {
    "keyid_hash_algorithms": [
     "sha256",
     "sha512"
    ],
    "keytype": "ecdsa",
    "keyval": {
     "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEzBzVOmHCPojMVLSI364WiiV8NPrD\n6IgRxVliskz/v+y3JER5mcVGcONliDcWMC5J2lfHmjPNPhb4H7xm8LzfSA==\n-----END PUBLIC KEY-----\n"
    },
    "scheme": "ecdsa-sha2-nistp256",
    "x-tuf-on-ci-keyowner": "@santiagotorres"
   },
   "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222": {
    "keyid_hash_algorithms": [
     "sha256",
     "sha512"
    ],
    "keytype": "ecdsa",
    "keyval": {
     "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEinikSsAQmYkNeH5eYq/CnIzLaacO\nxlSaawQDOwqKy/tCqxq5xxPSJc21K4WIhs9GyOkKfzueY3GILzcMJZ4cWw==\n-----END PUBLIC KEY-----\n"
    },
    "scheme": "ecdsa-sha2-nistp256",
    "x-tuf-on-ci-keyowner": "@bobcallaway"
   },
   "6f260089d5923daf20166ca657c543af618346ab971884a99962b01988bbe0c3": {
    "keyid_hash_algorithms": [
     "sha256",
     "sha512"
    ],
    "keytype": "ecdsa",
    "keyval": {
     "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEy8XKsmhBYDI8Jc0GwzBxeKax0cm5\nSTKEU65HPFunUn41sT8pi0FjM4IkHz/YUmwmLUO0Wt7lxhj6BkLIK4qYAw==\n-----END PUBLIC KEY-----\n"
    },
    "scheme": "ecdsa-sha2-nistp256",
    "x-tuf-on-ci-keyowner": "@dlorenc"
   },
   "7247f0dbad85b147e1863bade761243cc785dcb7aa410e7105dd3d2b61a36d2c": {
    "keyid_hash_algorithms": [
     "sha256",
     "sha512"
    ],
    "keytype": "ecdsa",
    "keyval": {
     "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEWRiGr5+j+3J5SsH+Ztr5nE2H2wO7\nBV+nO3s93gLca18qTOzHY1oWyAGDykMSsGTUBSt9D+An0KfKsD2mfSM42Q==\n-----END PUBLIC KEY-----\n"
    },
    "scheme": "ecdsa-sha2-nistp256",
    "x-tuf-on-ci-online-uri": "gcpkms://projects/sigstore-root-signing/locations/global/keyRings/root/cryptoKeys/timestamp"
   },
   "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70": {
    "keyid_hash_algorithms": [
     "sha256",
     "sha512"
    ],
    "keytype": "ecdsa",
    "keyval": {
     "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE0ghrh92Lw1Yr3idGV5WqCtMDB8Cx\n+D8hdC4w2ZLNIplVRoVGLskYa3gheMyOjiJ8kPi15aQ2//7P+oj7UvJPGw==\n-----END PUBLIC KEY-----\n"
    },
    "scheme": "ecdsa-sha2-nistp256",
    "x-tuf-on-ci-keyowner": "@joshuagl"
   },
   "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2": {
    "keyid_hash_algorithms": [
     "sha256",
     "sha512"
    ],
    "keytype": "ecdsa",
    "keyval": {
     "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEXsz3SZXFb8jMV42j6pJlyjbjR8K\nN3Bwocexq6LMIb5qsWKOQvLN16NUefLc4HswOoumRsVVaajSpQS6fobkRw==\n-----END PUBLIC KEY-----\n"
    },
    "scheme": "ecdsa-sha2-nistp256",
    "x-tuf-on-ci-keyowner": "@mnm678"
   }
  },
  "roles": {
   "root": {
    "keyids": [
     "6f260089d5923daf20166ca657c543af618346ab971884a99962b01988bbe0c3",
     "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2",
     "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06",
     "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222",
     "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70"
    ],
    "threshold": 3
   },
   "snapshot": {
    "keyids": [
     "7247f0dbad85b147e1863bade761243cc785dcb7aa410e7105dd3d2b61a36d2c"
    ],
    "threshold": 1,
    "x-tuf-on-ci-expiry-period": 3650,
    "x-tuf-on-ci-signing-period": 365
   },
   "targets": {
    "keyids": [
     "6f260089d5923daf20166ca657c543af618346ab971884a99962b01988bbe0c3",
     "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2",
     "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06",
     "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222",
     "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70"
    ],
    "threshold": 3
   },
   "timestamp": {
    "keyids": [
     "7247f0dbad85b147e1863bade761243cc785dcb7aa410e7105dd3d2b61a36d2c"
    ],
    "threshold": 1,
    "x-tuf-on-ci-expiry-period": 7,
    "x-tuf-on-ci-signing-period": 4
   }
  },
  "spec_version": "1.0",
  "version": 10,
  "x-tuf-on-ci-expiry-period": 182,
  "x-tuf-on-ci-signing-period": 31
 }
}","targets":{"trusted_root.json":"{
  "mediaType": "application/vnd.dev.sigstore.trustedroot+json;version=0.1",
  "tlogs": [
    {
      "baseUrl": "https://rekor.sigstore.dev",
      "hashAlgorithm": "SHA2_256",
      "publicKey": {
        "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE2G2Y+2tabdTV5BcGiBIx0a9fAFwrkBbmLSGtks4L3qX6yYY0zufBnhC8Ur/iy55GhWP/9A/bY2LhC30M9+RYtw==",
        "keyDetails": "PKIX_ECDSA_P256_SHA_256",
        "validFor": {
          "start": "2021-01-12T11:53:27.000Z"
        }
      },
      "logId": {
        "keyId": "wNI9atQGlz+VWfO6LRygH4QUfY/8W4RFwiT5i5WRgB0="
      }
    }
  ],
  "certificateAuthorities": [
    {
      "subject": {
        "organization": "sigstore.dev",
        "commonName": "sigstore"
      },
      "uri": "https://fulcio.sigstore.dev",
      "certChain": {
        "certificates": [
          {
            "rawBytes": "MIIB+DCCAX6gAwIBAgITNVkDZoCiofPDsy7dfm6geLbuhzAKBggqhkjOPQQDAzAqMRUwEwYDVQQKEwxzaWdzdG9yZS5kZXYxETAPBgNVBAMTCHNpZ3N0b3JlMB4XDTIxMDMwNzAzMjAyOVoXDTMxMDIyMzAzMjAyOVowKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTB2MBAGByqGSM49AgEGBSuBBAAiA2IABLSyA7Ii5k+pNO8ZEWY0ylemWDowOkNa3kL+GZE5Z5GWehL9/A9bRNA3RbrsZ5i0JcastaRL7Sp5fp/jD5dxqc/UdTVnlvS16an+2Yfswe/QuLolRUCrcOE2+2iA5+tzd6NmMGQwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFMjFHQBBmiQpMlEk6w2uSu1KBtPsMB8GA1UdIwQYMBaAFMjFHQBBmiQpMlEk6w2uSu1KBtPsMAoGCCqGSM49BAMDA2gAMGUCMH8liWJfMui6vXXBhjDgY4MwslmN/TJxVe/83WrFomwmNf056y1X48F9c4m3a3ozXAIxAKjRay5/aj/jsKKGIkmQatjI8uupHr/+CxFvaJWmpYqNkLDGRU+9orzh5hI2RrcuaQ=="
          }
        ]
      },
      "validFor": {
        "start": "2021-03-07T03:20:29.000Z",
        "end": "2022-12-31T23:59:59.999Z"
      }
    },
    {
      "subject": {
        "organization": "sigstore.dev",
        "commonName": "sigstore"
      },
      "uri": "https://fulcio.sigstore.dev",
      "certChain": {
        "certificates": [
          {
            "rawBytes": "MIICGjCCAaGgAwIBAgIUALnViVfnU0brJasmRkHrn/UnfaQwCgYIKoZIzj0EAwMwKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0yMjA0MTMyMDA2MTVaFw0zMTEwMDUxMzU2NThaMDcxFTATBgNVBAoTDHNpZ3N0b3JlLmRldjEeMBwGA1UEAxMVc2lnc3RvcmUtaW50ZXJtZWRpYXRlMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE8RVS/ysH+NOvuDZyPIZtilgUF9NlarYpAd9HP1vBBH1U5CV77LSS7s0ZiH4nE7Hv7ptS6LvvR/STk798LVgMzLlJ4HeIfF3tHSaexLcYpSASr1kS0N/RgBJz/9jWCiXno3sweTAOBgNVHQ8BAf8EBAMCAQYwEwYDVR0lBAwwCgYIKwYBBQUHAwMwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQU39Ppz1YkEZb5qNjpKFWixi4YZD8wHwYDVR0jBBgwFoAUWMAeX5FFpWapesyQoZMi0CrFxfowCgYIKoZIzj0EAwMDZwAwZAIwPCsQK4DYiZYDPIaDi5HFKnfxXx6ASSVmERfsynYBiX2X6SJRnZU84/9DZdnFvvxmAjBOt6QpBlc4J/0DxvkTCqpclvziL6BCCPnjdlIB3Pu3BxsPmygUY7Ii2zbdCdliiow="
          },
          {
            "rawBytes": "MIIB9zCCAXygAwIBAgIUALZNAPFdxHPwjeDloDwyYChAO/4wCgYIKoZIzj0EAwMwKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0yMTEwMDcxMzU2NTlaFw0zMTEwMDUxMzU2NThaMCoxFTATBgNVBAoTDHNpZ3N0b3JlLmRldjERMA8GA1UEAxMIc2lnc3RvcmUwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAT7XeFT4rb3PQGwS4IajtLk3/OlnpgangaBclYpsYBr5i+4ynB07ceb3LP0OIOZdxexX69c5iVuyJRQ+Hz05yi+UF3uBWAlHpiS5sh0+H2GHE7SXrk1EC5m1Tr19L9gg92jYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRYwB5fkUWlZql6zJChkyLQKsXF+jAfBgNVHSMEGDAWgBRYwB5fkUWlZql6zJChkyLQKsXF+jAKBggqhkjOPQQDAwNpADBmAjEAj1nHeXZp+13NWBNa+EDsDP8G1WWg1tCMWP/WHPqpaVo0jhsweNFZgSs0eE7wYI4qAjEA2WB9ot98sIkoF3vZYdd3/VtWB5b9TNMea7Ix/stJ5TfcLLeABLE4BNJOsQ4vnBHJ"
          }
        ]
      },
      "validFor": {
        "start": "2022-04-13T20:06:15.000Z"
      }
    }
  ],
  "ctlogs": [
    {
      "baseUrl": "https://ctfe.sigstore.dev/test",
      "hashAlgorithm": "SHA2_256",
      "publicKey": {
        "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEbfwR+RJudXscgRBRpKX1XFDy3PyudDxz/SfnRi1fT8ekpfBd2O1uoz7jr3Z8nKzxA69EUQ+eFCFI3zeubPWU7w==",
        "keyDetails": "PKIX_ECDSA_P256_SHA_256",
        "validFor": {
          "start": "2021-03-14T00:00:00.000Z",
          "end": "2022-10-31T23:59:59.999Z"
        }
      },
      "logId": {
        "keyId": "CGCS8ChS/2hF0dFrJ4ScRWcYrBY9wzjSbea8IgY2b3I="
      }
    },
    {
      "baseUrl": "https://ctfe.sigstore.dev/2022",
      "hashAlgorithm": "SHA2_256",
      "publicKey": {
        "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEiPSlFi0CmFTfEjCUqF9HuCEcYXNKAaYalIJmBZ8yyezPjTqhxrKBpMnaocVtLJBI1eM3uXnQzQGAJdJ4gs9Fyw==",
        "keyDetails": "PKIX_ECDSA_P256_SHA_256",
        "validFor": {
          "start": "2022-10-20T00:00:00.000Z"
        }
      },
      "logId": {
        "keyId": "3T0wasbHETJjGR4cmWc3AqJKXrjePK3/h4pygC8p7o4="
      }
    }
  ],
  "timestampAuthorities": [
    {
      "subject": {
        "organization": "GitHub, Inc.",
        "commonName": "Internal Services Root"
      },
      "certChain": {
        "certificates": [
          {
            "rawBytes": "MIIB3DCCAWKgAwIBAgIUchkNsH36Xa04b1LqIc+qr9DVecMwCgYIKoZIzj0EAwMwMjEVMBMGA1UEChMMR2l0SHViLCBJbmMuMRkwFwYDVQQDExBUU0EgaW50ZXJtZWRpYXRlMB4XDTIzMDQxNDAwMDAwMFoXDTI0MDQxMzAwMDAwMFowMjEVMBMGA1UEChMMR2l0SHViLCBJbmMuMRkwFwYDVQQDExBUU0EgVGltZXN0YW1waW5nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEUD5ZNbSqYMd6r8qpOOEX9ibGnZT9GsuXOhr/f8U9FJugBGExKYp40OULS0erjZW7xV9xV52NnJf5OeDq4e5ZKqNWMFQwDgYDVR0PAQH/BAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUFBwMIMAwGA1UdEwEB/wQCMAAwHwYDVR0jBBgwFoAUaW1RudOgVt0leqY0WKYbuPr47wAwCgYIKoZIzj0EAwMDaAAwZQIwbUH9HvD4ejCZJOWQnqAlkqURllvu9M8+VqLbiRK+zSfZCZwsiljRn8MQQRSkXEE5AjEAg+VxqtojfVfu8DhzzhCx9GKETbJHb19iV72mMKUbDAFmzZ6bQ8b54Zb8tidy5aWe"
          },
          {
            "rawBytes": "MIICEDCCAZWgAwIBAgIUX8ZO5QXP7vN4dMQ5e9sU3nub8OgwCgYIKoZIzj0EAwMwODEVMBMGA1UEChMMR2l0SHViLCBJbmMuMR8wHQYDVQQDExZJbnRlcm5hbCBTZXJ2aWNlcyBSb290MB4XDTIzMDQxNDAwMDAwMFoXDTI4MDQxMjAwMDAwMFowMjEVMBMGA1UEChMMR2l0SHViLCBJbmMuMRkwFwYDVQQDExBUU0EgaW50ZXJtZWRpYXRlMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEvMLY/dTVbvIJYANAuszEwJnQE1llftynyMKIMhh48HmqbVr5ygybzsLRLVKbBWOdZ21aeJz+gZiytZetqcyF9WlER5NEMf6JV7ZNojQpxHq4RHGoGSceQv/qvTiZxEDKo2YwZDAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQUaW1RudOgVt0leqY0WKYbuPr47wAwHwYDVR0jBBgwFoAU9NYYlobnAG4c0/qjxyH/lq/wz+QwCgYIKoZIzj0EAwMDaQAwZgIxAK1B185ygCrIYFlIs3GjswjnwSMG6LY8woLVdakKDZxVa8f8cqMs1DhcxJ0+09w95QIxAO+tBzZk7vjUJ9iJgD4R6ZWTxQWKqNm74jO99o+o9sv4FI/SZTZTFyMn0IJEHdNmyA=="
          },
          {
            "rawBytes": "MIIB9DCCAXqgAwIBAgIUa/JAkdUjK4JUwsqtaiRJGWhqLSowCgYIKoZIzj0EAwMwODEVMBMGA1UEChMMR2l0SHViLCBJbmMuMR8wHQYDVQQDExZJbnRlcm5hbCBTZXJ2aWNlcyBSb290MB4XDTIzMDQxNDAwMDAwMFoXDTMzMDQxMTAwMDAwMFowODEVMBMGA1UEChMMR2l0SHViLCBJbmMuMR8wHQYDVQQDExZJbnRlcm5hbCBTZXJ2aWNlcyBSb290MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEf9jFAXxz4kx68AHRMOkFBhflDcMTvzaXz4x/FCcXjJ/1qEKon/qPIGnaURskDtyNbNDOpeJTDDFqt48iMPrnzpx6IZwqemfUJN4xBEZfza+pYt/iyod+9tZr20RRWSv/o0UwQzAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBAjAdBgNVHQ4EFgQU9NYYlobnAG4c0/qjxyH/lq/wz+QwCgYIKoZIzj0EAwMDaAAwZQIxALZLZ8BgRXzKxLMMN9VIlO+e4hrBnNBgF7tz7Hnrowv2NetZErIACKFymBlvWDvtMAIwZO+ki6ssQ1bsZo98O8mEAf2NZ7iiCgDDU0Vwjeco6zyeh0zBTs9/7gV6AHNQ53xD"
          }
        ]
      },
      "validFor": {
        "start": "2023-04-14T00:00:00.000Z"
      }
    }
  ]
}
","registry.npmjs.org%2Fkeys.json":"ewogICAgImtleXMiOiBbCiAgICAgICAgewogICAgICAgICAgICAia2V5SWQiOiAiU0hBMjU2OmpsM2J3c3d1ODBQampva0NnaDBvMnc1YzJVNExoUUFFNTdnajljejFrekEiLAogICAgICAgICAgICAia2V5VXNhZ2UiOiAibnBtOnNpZ25hdHVyZXMiLAogICAgICAgICAgICAicHVibGljS2V5IjogewogICAgICAgICAgICAgICAgInJhd0J5dGVzIjogIk1Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRTFPbGIzek1BRkZ4WEtIaUlrUU81Y0ozWWhsNWk2VVBwK0lodXRlQkpidUhjQTVVb2dLbzBFV3RsV3dXNktTYUtvVE5FWUw3SmxDUWlWbmtoQmt0VWdnPT0iLAogICAgICAgICAgICAgICAgImtleURldGFpbHMiOiAiUEtJWF9FQ0RTQV9QMjU2X1NIQV8yNTYiLAogICAgICAgICAgICAgICAgInZhbGlkRm9yIjogewogICAgICAgICAgICAgICAgICAgICJzdGFydCI6ICIxOTk5LTAxLTAxVDAwOjAwOjAwLjAwMFoiCiAgICAgICAgICAgICAgICB9CiAgICAgICAgICAgIH0KICAgICAgICB9LAogICAgICAgIHsKICAgICAgICAgICAgImtleUlkIjogIlNIQTI1NjpqbDNid3N3dTgwUGpqb2tDZ2gwbzJ3NWMyVTRMaFFBRTU3Z2o5Y3oxa3pBIiwKICAgICAgICAgICAgImtleVVzYWdlIjogIm5wbTphdHRlc3RhdGlvbnMiLAogICAgICAgICAgICAicHVibGljS2V5IjogewogICAgICAgICAgICAgICAgInJhd0J5dGVzIjogIk1Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRTFPbGIzek1BRkZ4WEtIaUlrUU81Y0ozWWhsNWk2VVBwK0lodXRlQkpidUhjQTVVb2dLbzBFV3RsV3dXNktTYUtvVE5FWUw3SmxDUWlWbmtoQmt0VWdnPT0iLAogICAgICAgICAgICAgICAgImtleURldGFpbHMiOiAiUEtJWF9FQ0RTQV9QMjU2X1NIQV8yNTYiLAogICAgICAgICAgICAgICAgInZhbGlkRm9yIjogewogICAgICAgICAgICAgICAgICAgICJzdGFydCI6ICIyMDIyLTEyLTAxVDAwOjAwOjAwLjAwMFoiCiAgICAgICAgICAgICAgICB9CiAgICAgICAgICAgIH0KICAgICAgICB9CiAgICBdCn0K"}}} diff --git a/deps/npm/node_modules/ci-info/index.js b/deps/npm/node_modules/ci-info/index.js index 47907264581eb1..9eba6940c4147e 100644 --- a/deps/npm/node_modules/ci-info/index.js +++ b/deps/npm/node_modules/ci-info/index.js @@ -13,6 +13,7 @@ Object.defineProperty(exports, '_vendors', { exports.name = null exports.isPR = null +exports.id = null vendors.forEach(function (vendor) { const envs = Array.isArray(vendor.env) ? vendor.env : [vendor.env] @@ -27,45 +28,23 @@ vendors.forEach(function (vendor) { } exports.name = vendor.name - - switch (typeof vendor.pr) { - case 'string': - // "pr": "CIRRUS_PR" - exports.isPR = !!env[vendor.pr] - break - case 'object': - if ('env' in vendor.pr) { - // "pr": { "env": "BUILDKITE_PULL_REQUEST", "ne": "false" } - exports.isPR = vendor.pr.env in env && env[vendor.pr.env] !== vendor.pr.ne - } else if ('any' in vendor.pr) { - // "pr": { "any": ["ghprbPullId", "CHANGE_ID"] } - exports.isPR = vendor.pr.any.some(function (key) { - return !!env[key] - }) - } else { - // "pr": { "DRONE_BUILD_EVENT": "pull_request" } - exports.isPR = checkEnv(vendor.pr) - } - break - default: - // PR detection not supported for this vendor - exports.isPR = null - } + exports.isPR = checkPR(vendor) + exports.id = vendor.constant }) exports.isCI = !!( env.CI !== 'false' && // Bypass all checks if CI env is explicitly set to 'false' (env.BUILD_ID || // Jenkins, Cloudbees - env.BUILD_NUMBER || // Jenkins, TeamCity - env.CI || // Travis CI, CircleCI, Cirrus CI, Gitlab CI, Appveyor, CodeShip, dsari - env.CI_APP_ID || // Appflow - env.CI_BUILD_ID || // Appflow - env.CI_BUILD_NUMBER || // Appflow - env.CI_NAME || // Codeship and others - env.CONTINUOUS_INTEGRATION || // Travis CI, Cirrus CI - env.RUN_ID || // TaskCluster, dsari - exports.name || - false) + env.BUILD_NUMBER || // Jenkins, TeamCity + env.CI || // Travis CI, CircleCI, Cirrus CI, Gitlab CI, Appveyor, CodeShip, dsari + env.CI_APP_ID || // Appflow + env.CI_BUILD_ID || // Appflow + env.CI_BUILD_NUMBER || // Appflow + env.CI_NAME || // Codeship and others + env.CONTINUOUS_INTEGRATION || // Travis CI, Cirrus CI + env.RUN_ID || // TaskCluster, dsari + exports.name || + false) ) function checkEnv (obj) { @@ -79,12 +58,45 @@ function checkEnv (obj) { return env[obj.env] && env[obj.env].includes(obj.includes) // } } + if ('any' in obj) { return obj.any.some(function (k) { return !!env[k] }) } + return Object.keys(obj).every(function (k) { return env[k] === obj[k] }) } + +function checkPR (vendor) { + switch (typeof vendor.pr) { + case 'string': + // "pr": "CIRRUS_PR" + return !!env[vendor.pr] + case 'object': + if ('env' in vendor.pr) { + if ('any' in vendor.pr) { + // "pr": { "env": "CODEBUILD_WEBHOOK_EVENT", "any": ["PULL_REQUEST_CREATED", "PULL_REQUEST_UPDATED"] } + return vendor.pr.any.some(function (key) { + return env[vendor.pr.env] === key + }) + } else { + // "pr": { "env": "BUILDKITE_PULL_REQUEST", "ne": "false" } + return vendor.pr.env in env && env[vendor.pr.env] !== vendor.pr.ne + } + } else if ('any' in vendor.pr) { + // "pr": { "any": ["ghprbPullId", "CHANGE_ID"] } + return vendor.pr.any.some(function (key) { + return !!env[key] + }) + } else { + // "pr": { "DRONE_BUILD_EVENT": "pull_request" } + return checkEnv(vendor.pr) + } + default: + // PR detection not supported for this vendor + return null + } +} diff --git a/deps/npm/node_modules/ci-info/package.json b/deps/npm/node_modules/ci-info/package.json index 3c6b9e4adac8e8..156329d2ce379c 100644 --- a/deps/npm/node_modules/ci-info/package.json +++ b/deps/npm/node_modules/ci-info/package.json @@ -1,6 +1,6 @@ { "name": "ci-info", - "version": "4.0.0", + "version": "4.1.0", "description": "Get details about the current Continuous Integration environment", "main": "index.js", "typings": "index.d.ts", @@ -9,6 +9,18 @@ "repository": "https://github.com/watson/ci-info.git", "bugs": "https://github.com/watson/ci-info/issues", "homepage": "https://github.com/watson/ci-info", + "contributors": [ + { + "name": "Sibiraj", + "url": "https://github.com/sibiraj-s" + } + ], + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], "keywords": [ "ci", "continuous", @@ -22,22 +34,16 @@ "index.d.ts", "CHANGELOG.md" ], - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/sibiraj-s" - } - ], "scripts": { "lint:fix": "standard --fix", "test": "standard && node test.js", - "prepare": "husky install" + "prepare": "husky install || true" }, "devDependencies": { "clear-module": "^4.1.2", - "husky": "^8.0.3", - "standard": "^17.1.0", - "tape": "^5.7.0" + "husky": "^9.1.6", + "standard": "^17.1.2", + "tape": "^5.9.0" }, "engines": { "node": ">=8" diff --git a/deps/npm/node_modules/ci-info/vendors.json b/deps/npm/node_modules/ci-info/vendors.json index 6b65e3f9b541f8..64d5924d1a557e 100644 --- a/deps/npm/node_modules/ci-info/vendors.json +++ b/deps/npm/node_modules/ci-info/vendors.json @@ -8,7 +8,11 @@ { "name": "Appcircle", "constant": "APPCIRCLE", - "env": "AC_APPCIRCLE" + "env": "AC_APPCIRCLE", + "pr": { + "env": "AC_GIT_PR", + "ne": "false" + } }, { "name": "AppVeyor", @@ -19,7 +23,15 @@ { "name": "AWS CodeBuild", "constant": "CODEBUILD", - "env": "CODEBUILD_BUILD_ARN" + "env": "CODEBUILD_BUILD_ARN", + "pr": { + "env": "CODEBUILD_WEBHOOK_EVENT", + "any": [ + "PULL_REQUEST_CREATED", + "PULL_REQUEST_UPDATED", + "PULL_REQUEST_REOPENED" + ] + } }, { "name": "Azure Pipelines", diff --git a/deps/npm/node_modules/cross-spawn/lib/enoent.js b/deps/npm/node_modules/cross-spawn/lib/enoent.js index 14df9b623d0a20..da33471369c23f 100644 --- a/deps/npm/node_modules/cross-spawn/lib/enoent.js +++ b/deps/npm/node_modules/cross-spawn/lib/enoent.js @@ -24,7 +24,7 @@ function hookChildProcess(cp, parsed) { // the command exists and emit an "error" instead // See https://github.com/IndigoUnited/node-cross-spawn/issues/16 if (name === 'exit') { - const err = verifyENOENT(arg1, parsed, 'spawn'); + const err = verifyENOENT(arg1, parsed); if (err) { return originalEmit.call(cp, 'error', err); diff --git a/deps/npm/node_modules/cross-spawn/lib/util/escape.js b/deps/npm/node_modules/cross-spawn/lib/util/escape.js index b0bb84c3a14092..7bf2905cd035ad 100644 --- a/deps/npm/node_modules/cross-spawn/lib/util/escape.js +++ b/deps/npm/node_modules/cross-spawn/lib/util/escape.js @@ -15,15 +15,17 @@ function escapeArgument(arg, doubleEscapeMetaChars) { arg = `${arg}`; // Algorithm below is based on https://qntm.org/cmd + // It's slightly altered to disable JS backtracking to avoid hanging on specially crafted input + // Please see https://github.com/moxystudio/node-cross-spawn/pull/160 for more information // Sequence of backslashes followed by a double quote: // double up all the backslashes and escape the double quote - arg = arg.replace(/(\\*)"/g, '$1$1\\"'); + arg = arg.replace(/(?=(\\+?)?)\1"/g, '$1$1\\"'); // Sequence of backslashes followed by the end of the string // (which will become a double quote later): // double up all the backslashes - arg = arg.replace(/(\\*)$/, '$1$1'); + arg = arg.replace(/(?=(\\+?)?)\1$/, '$1$1'); // All other backslashes occur literally diff --git a/deps/npm/node_modules/cross-spawn/package.json b/deps/npm/node_modules/cross-spawn/package.json index 232ff97e04b213..24b2eb4c9900cf 100644 --- a/deps/npm/node_modules/cross-spawn/package.json +++ b/deps/npm/node_modules/cross-spawn/package.json @@ -1,6 +1,6 @@ { "name": "cross-spawn", - "version": "7.0.3", + "version": "7.0.6", "description": "Cross platform child_process#spawn and child_process#spawnSync", "keywords": [ "spawn", @@ -65,7 +65,7 @@ "lint-staged": "^9.2.5", "mkdirp": "^0.5.1", "rimraf": "^3.0.0", - "standard-version": "^7.0.0" + "standard-version": "^9.5.0" }, "engines": { "node": ">= 8" diff --git a/deps/npm/node_modules/debug/node_modules/ms/index.js b/deps/npm/node_modules/debug/node_modules/ms/index.js deleted file mode 100644 index c4498bcc212589..00000000000000 --- a/deps/npm/node_modules/debug/node_modules/ms/index.js +++ /dev/null @@ -1,162 +0,0 @@ -/** - * Helpers. - */ - -var s = 1000; -var m = s * 60; -var h = m * 60; -var d = h * 24; -var w = d * 7; -var y = d * 365.25; - -/** - * Parse or format the given `val`. - * - * Options: - * - * - `long` verbose formatting [false] - * - * @param {String|Number} val - * @param {Object} [options] - * @throws {Error} throw an error if val is not a non-empty string or a number - * @return {String|Number} - * @api public - */ - -module.exports = function(val, options) { - options = options || {}; - var type = typeof val; - if (type === 'string' && val.length > 0) { - return parse(val); - } else if (type === 'number' && isFinite(val)) { - return options.long ? fmtLong(val) : fmtShort(val); - } - throw new Error( - 'val is not a non-empty string or a valid number. val=' + - JSON.stringify(val) - ); -}; - -/** - * Parse the given `str` and return milliseconds. - * - * @param {String} str - * @return {Number} - * @api private - */ - -function parse(str) { - str = String(str); - if (str.length > 100) { - return; - } - var match = /^(-?(?:\d+)?\.?\d+) *(milliseconds?|msecs?|ms|seconds?|secs?|s|minutes?|mins?|m|hours?|hrs?|h|days?|d|weeks?|w|years?|yrs?|y)?$/i.exec( - str - ); - if (!match) { - return; - } - var n = parseFloat(match[1]); - var type = (match[2] || 'ms').toLowerCase(); - switch (type) { - case 'years': - case 'year': - case 'yrs': - case 'yr': - case 'y': - return n * y; - case 'weeks': - case 'week': - case 'w': - return n * w; - case 'days': - case 'day': - case 'd': - return n * d; - case 'hours': - case 'hour': - case 'hrs': - case 'hr': - case 'h': - return n * h; - case 'minutes': - case 'minute': - case 'mins': - case 'min': - case 'm': - return n * m; - case 'seconds': - case 'second': - case 'secs': - case 'sec': - case 's': - return n * s; - case 'milliseconds': - case 'millisecond': - case 'msecs': - case 'msec': - case 'ms': - return n; - default: - return undefined; - } -} - -/** - * Short format for `ms`. - * - * @param {Number} ms - * @return {String} - * @api private - */ - -function fmtShort(ms) { - var msAbs = Math.abs(ms); - if (msAbs >= d) { - return Math.round(ms / d) + 'd'; - } - if (msAbs >= h) { - return Math.round(ms / h) + 'h'; - } - if (msAbs >= m) { - return Math.round(ms / m) + 'm'; - } - if (msAbs >= s) { - return Math.round(ms / s) + 's'; - } - return ms + 'ms'; -} - -/** - * Long format for `ms`. - * - * @param {Number} ms - * @return {String} - * @api private - */ - -function fmtLong(ms) { - var msAbs = Math.abs(ms); - if (msAbs >= d) { - return plural(ms, msAbs, d, 'day'); - } - if (msAbs >= h) { - return plural(ms, msAbs, h, 'hour'); - } - if (msAbs >= m) { - return plural(ms, msAbs, m, 'minute'); - } - if (msAbs >= s) { - return plural(ms, msAbs, s, 'second'); - } - return ms + ' ms'; -} - -/** - * Pluralization helper. - */ - -function plural(ms, msAbs, n, name) { - var isPlural = msAbs >= n * 1.5; - return Math.round(ms / n) + ' ' + name + (isPlural ? 's' : ''); -} diff --git a/deps/npm/node_modules/debug/node_modules/ms/license.md b/deps/npm/node_modules/debug/node_modules/ms/license.md deleted file mode 100644 index 69b61253a38926..00000000000000 --- a/deps/npm/node_modules/debug/node_modules/ms/license.md +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Zeit, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/deps/npm/node_modules/debug/node_modules/ms/package.json b/deps/npm/node_modules/debug/node_modules/ms/package.json deleted file mode 100644 index eea666e1fb03d6..00000000000000 --- a/deps/npm/node_modules/debug/node_modules/ms/package.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "name": "ms", - "version": "2.1.2", - "description": "Tiny millisecond conversion utility", - "repository": "zeit/ms", - "main": "./index", - "files": [ - "index.js" - ], - "scripts": { - "precommit": "lint-staged", - "lint": "eslint lib/* bin/*", - "test": "mocha tests.js" - }, - "eslintConfig": { - "extends": "eslint:recommended", - "env": { - "node": true, - "es6": true - } - }, - "lint-staged": { - "*.js": [ - "npm run lint", - "prettier --single-quote --write", - "git add" - ] - }, - "license": "MIT", - "devDependencies": { - "eslint": "4.12.1", - "expect.js": "0.3.1", - "husky": "0.14.3", - "lint-staged": "5.0.0", - "mocha": "4.0.1" - } -} diff --git a/deps/npm/node_modules/debug/package.json b/deps/npm/node_modules/debug/package.json index 8eea05520554eb..2f782eb9aef450 100644 --- a/deps/npm/node_modules/debug/package.json +++ b/deps/npm/node_modules/debug/package.json @@ -1,6 +1,6 @@ { "name": "debug", - "version": "4.3.6", + "version": "4.3.7", "repository": { "type": "git", "url": "git://github.com/debug-js/debug.git" @@ -31,7 +31,7 @@ "test:coverage": "cat ./coverage/lcov.info | coveralls" }, "dependencies": { - "ms": "2.1.2" + "ms": "^2.1.3" }, "devDependencies": { "brfs": "^2.0.1", diff --git a/deps/npm/node_modules/hosted-git-info/lib/hosts.js b/deps/npm/node_modules/hosted-git-info/lib/hosts.js index 9a08efd1b2d7e9..2a88e95927772a 100644 --- a/deps/npm/node_modules/hosted-git-info/lib/hosts.js +++ b/deps/npm/node_modules/hosted-git-info/lib/hosts.js @@ -4,7 +4,11 @@ const maybeJoin = (...args) => args.every(arg => arg) ? args.join('') : '' const maybeEncode = (arg) => arg ? encodeURIComponent(arg) : '' -const formatHashFragment = (f) => f.toLowerCase().replace(/^\W+|\/|\W+$/g, '').replace(/\W+/g, '-') +const formatHashFragment = (f) => f.toLowerCase() + .replace(/^\W+/g, '') // strip leading non-characters + .replace(/(? diff --git a/deps/npm/node_modules/hosted-git-info/package.json b/deps/npm/node_modules/hosted-git-info/package.json index 3bb8bcd1f49825..78356159af7723 100644 --- a/deps/npm/node_modules/hosted-git-info/package.json +++ b/deps/npm/node_modules/hosted-git-info/package.json @@ -1,6 +1,6 @@ { "name": "hosted-git-info", - "version": "8.0.0", + "version": "8.0.2", "description": "Provides metadata and conversions from repository urls for GitHub, Bitbucket and GitLab", "main": "./lib/index.js", "repository": { @@ -35,7 +35,7 @@ }, "devDependencies": { "@npmcli/eslint-config": "^5.0.0", - "@npmcli/template-oss": "4.23.3", + "@npmcli/template-oss": "4.23.4", "tap": "^16.0.1" }, "files": [ @@ -55,7 +55,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.23.3", + "version": "4.23.4", "publish": "true" } } diff --git a/deps/npm/node_modules/init-package-json/lib/default-input.js b/deps/npm/node_modules/init-package-json/lib/default-input.js index 490e83c139887f..0a01bfa05fa45c 100644 --- a/deps/npm/node_modules/init-package-json/lib/default-input.js +++ b/deps/npm/node_modules/init-package-json/lib/default-input.js @@ -199,16 +199,17 @@ if (!package.scripts) { if (!package.repository) { exports.repository = async () => { - const gconf = await fs.readFile('.git/config', 'utf8').catch(() => '') + const gitConfigPath = path.resolve(dirname, '.git', 'config') + const gconf = await fs.readFile(gitConfigPath, 'utf8').catch(() => '') const lines = gconf.split(/\r?\n/) let url const i = lines.indexOf('[remote "origin"]') if (i !== -1) { - url = gconf[i + 1] + url = lines[i + 1] if (!url.match(/^\s*url =/)) { - url = gconf[i + 2] + url = lines[i + 2] } if (!url.match(/^\s*url =/)) { url = null diff --git a/deps/npm/node_modules/init-package-json/package.json b/deps/npm/node_modules/init-package-json/package.json index 21021ab701244b..d1de96476a9f14 100644 --- a/deps/npm/node_modules/init-package-json/package.json +++ b/deps/npm/node_modules/init-package-json/package.json @@ -1,6 +1,6 @@ { "name": "init-package-json", - "version": "7.0.1", + "version": "7.0.2", "main": "lib/init-package-json.js", "scripts": { "test": "tap", diff --git a/deps/npm/node_modules/is-lambda/index.js b/deps/npm/node_modules/is-lambda/index.js deleted file mode 100644 index b245ab1c68df1a..00000000000000 --- a/deps/npm/node_modules/is-lambda/index.js +++ /dev/null @@ -1,6 +0,0 @@ -'use strict' - -module.exports = !!( - (process.env.LAMBDA_TASK_ROOT && process.env.AWS_EXECUTION_ENV) || - false -) diff --git a/deps/npm/node_modules/is-lambda/package.json b/deps/npm/node_modules/is-lambda/package.json deleted file mode 100644 index d8550898b4e4d9..00000000000000 --- a/deps/npm/node_modules/is-lambda/package.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "is-lambda", - "version": "1.0.1", - "description": "Detect if your code is running on an AWS Lambda server", - "main": "index.js", - "dependencies": {}, - "devDependencies": { - "clear-require": "^1.0.1", - "standard": "^10.0.2" - }, - "scripts": { - "test": "standard && node test.js" - }, - "repository": { - "type": "git", - "url": "https://github.com/watson/is-lambda.git" - }, - "keywords": [ - "aws", - "hosting", - "hosted", - "lambda", - "detect" - ], - "author": "Thomas Watson Steen (https://twitter.com/wa7son)", - "license": "MIT", - "bugs": { - "url": "https://github.com/watson/is-lambda/issues" - }, - "homepage": "https://github.com/watson/is-lambda", - "coordinates": [ - 37.3859955, - -122.0838831 - ] -} diff --git a/deps/npm/node_modules/is-lambda/test.js b/deps/npm/node_modules/is-lambda/test.js deleted file mode 100644 index e8e73257ad4ad7..00000000000000 --- a/deps/npm/node_modules/is-lambda/test.js +++ /dev/null @@ -1,16 +0,0 @@ -'use strict' - -var assert = require('assert') -var clearRequire = require('clear-require') - -process.env.AWS_EXECUTION_ENV = 'AWS_Lambda_nodejs6.10' -process.env.LAMBDA_TASK_ROOT = '/var/task' - -var isCI = require('./') -assert(isCI) - -delete process.env.AWS_EXECUTION_ENV - -clearRequire('./') -isCI = require('./') -assert(!isCI) diff --git a/deps/npm/node_modules/libnpmpublish/lib/publish.js b/deps/npm/node_modules/libnpmpublish/lib/publish.js index 93d546efb5f0e1..2bcab4f3ba304e 100644 --- a/deps/npm/node_modules/libnpmpublish/lib/publish.js +++ b/deps/npm/node_modules/libnpmpublish/lib/publish.js @@ -137,7 +137,7 @@ const buildMetadata = async (registry, manifest, tarballData, spec, opts) => { if (provenance === true) { await ensureProvenanceGeneration(registry, spec, opts) - provenanceBundle = await generateProvenance([subject], opts) + provenanceBundle = await generateProvenance([subject], { legacyCompatibility: true, ...opts }) /* eslint-disable-next-line max-len */ log.notice('publish', `Signed provenance statement with source and build information from ${ciInfo.name}`) diff --git a/deps/npm/node_modules/libnpmpublish/package.json b/deps/npm/node_modules/libnpmpublish/package.json index f63d50f4e7b9c1..594f5041480b4a 100644 --- a/deps/npm/node_modules/libnpmpublish/package.json +++ b/deps/npm/node_modules/libnpmpublish/package.json @@ -1,6 +1,6 @@ { "name": "libnpmpublish", - "version": "10.0.0", + "version": "10.0.1", "description": "Programmatic API for the bits behind npm publish and unpublish", "author": "GitHub Inc.", "main": "lib/index.js", @@ -45,7 +45,7 @@ "npm-registry-fetch": "^18.0.1", "proc-log": "^5.0.0", "semver": "^7.3.7", - "sigstore": "^2.2.0", + "sigstore": "^3.0.0", "ssri": "^12.0.0" }, "engines": { diff --git a/deps/npm/node_modules/make-fetch-happen/lib/options.js b/deps/npm/node_modules/make-fetch-happen/lib/options.js index f77511279f831d..db51cc63248176 100644 --- a/deps/npm/node_modules/make-fetch-happen/lib/options.js +++ b/deps/npm/node_modules/make-fetch-happen/lib/options.js @@ -11,7 +11,12 @@ const conditionalHeaders = [ const configureOptions = (opts) => { const { strictSSL, ...options } = { ...opts } options.method = options.method ? options.method.toUpperCase() : 'GET' - options.rejectUnauthorized = strictSSL !== false + + if (strictSSL === undefined || strictSSL === null) { + options.rejectUnauthorized = process.env.NODE_TLS_REJECT_UNAUTHORIZED !== '0' + } else { + options.rejectUnauthorized = strictSSL !== false + } if (!options.retry) { options.retry = { retries: 0 } diff --git a/deps/npm/node_modules/make-fetch-happen/lib/remote.js b/deps/npm/node_modules/make-fetch-happen/lib/remote.js index 8554564074de6e..1d640e5380baaf 100644 --- a/deps/npm/node_modules/make-fetch-happen/lib/remote.js +++ b/deps/npm/node_modules/make-fetch-happen/lib/remote.js @@ -35,7 +35,8 @@ const RETRY_TYPES = [ // following redirects (through the cache if necessary) // and verifying response integrity const remoteFetch = (request, options) => { - const agent = getAgent(request.url, options) + // options.signal is intended for the fetch itself, not the agent. Attaching it to the agent will re-use that signal across multiple requests, which prevents any connections beyond the first one. + const agent = getAgent(request.url, { ...options, signal: undefined }) if (!request.headers.has('connection')) { request.headers.set('connection', agent ? 'keep-alive' : 'close') } diff --git a/deps/npm/node_modules/negotiator/HISTORY.md b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/HISTORY.md similarity index 95% rename from deps/npm/node_modules/negotiator/HISTORY.md rename to deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/HISTORY.md index a9a544914c43bb..63d537d3f68114 100644 --- a/deps/npm/node_modules/negotiator/HISTORY.md +++ b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/HISTORY.md @@ -1,3 +1,9 @@ +1.0.0 / 2024-08-31 +================== + + * Drop support for node <18 + * Added an option preferred encodings array #59 + 0.6.3 / 2022-01-22 ================== diff --git a/deps/npm/node_modules/negotiator/LICENSE b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/LICENSE similarity index 100% rename from deps/npm/node_modules/negotiator/LICENSE rename to deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/LICENSE diff --git a/deps/npm/node_modules/negotiator/index.js b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/index.js similarity index 90% rename from deps/npm/node_modules/negotiator/index.js rename to deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/index.js index 4788264b16c9f2..4f51315d6af4bd 100644 --- a/deps/npm/node_modules/negotiator/index.js +++ b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/index.js @@ -44,13 +44,14 @@ Negotiator.prototype.charsets = function charsets(available) { return preferredCharsets(this.request.headers['accept-charset'], available); }; -Negotiator.prototype.encoding = function encoding(available) { - var set = this.encodings(available); +Negotiator.prototype.encoding = function encoding(available, opts) { + var set = this.encodings(available, opts); return set && set[0]; }; -Negotiator.prototype.encodings = function encodings(available) { - return preferredEncodings(this.request.headers['accept-encoding'], available); +Negotiator.prototype.encodings = function encodings(available, options) { + var opts = options || {}; + return preferredEncodings(this.request.headers['accept-encoding'], available, opts.preferred); }; Negotiator.prototype.language = function language(available) { diff --git a/deps/npm/node_modules/negotiator/lib/charset.js b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/charset.js similarity index 100% rename from deps/npm/node_modules/negotiator/lib/charset.js rename to deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/charset.js diff --git a/deps/npm/node_modules/negotiator/lib/encoding.js b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/encoding.js similarity index 79% rename from deps/npm/node_modules/negotiator/lib/encoding.js rename to deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/encoding.js index 8432cd77b8a969..9ebb633d677433 100644 --- a/deps/npm/node_modules/negotiator/lib/encoding.js +++ b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/encoding.js @@ -96,7 +96,7 @@ function parseEncoding(str, i) { */ function getEncodingPriority(encoding, accepted, index) { - var priority = {o: -1, q: 0, s: 0}; + var priority = {encoding: encoding, o: -1, q: 0, s: 0}; for (var i = 0; i < accepted.length; i++) { var spec = specify(encoding, accepted[i], index); @@ -123,6 +123,7 @@ function specify(encoding, spec, index) { } return { + encoding: encoding, i: index, o: spec.i, q: spec.q, @@ -135,14 +136,34 @@ function specify(encoding, spec, index) { * @public */ -function preferredEncodings(accept, provided) { +function preferredEncodings(accept, provided, preferred) { var accepts = parseAcceptEncoding(accept || ''); + var comparator = preferred ? function comparator (a, b) { + if (a.q !== b.q) { + return b.q - a.q // higher quality first + } + + var aPreferred = preferred.indexOf(a.encoding) + var bPreferred = preferred.indexOf(b.encoding) + + if (aPreferred === -1 && bPreferred === -1) { + // consider the original specifity/order + return (b.s - a.s) || (a.o - b.o) || (a.i - b.i) + } + + if (aPreferred !== -1 && bPreferred !== -1) { + return aPreferred - bPreferred // consider the preferred order + } + + return aPreferred === -1 ? 1 : -1 // preferred first + } : compareSpecs; + if (!provided) { // sorted list of all encodings return accepts .filter(isQuality) - .sort(compareSpecs) + .sort(comparator) .map(getFullEncoding); } @@ -151,7 +172,7 @@ function preferredEncodings(accept, provided) { }); // sorted list of accepted encodings - return priorities.filter(isQuality).sort(compareSpecs).map(function getEncoding(priority) { + return priorities.filter(isQuality).sort(comparator).map(function getEncoding(priority) { return provided[priorities.indexOf(priority)]; }); } @@ -162,7 +183,7 @@ function preferredEncodings(accept, provided) { */ function compareSpecs(a, b) { - return (b.q - a.q) || (b.s - a.s) || (a.o - b.o) || (a.i - b.i) || 0; + return (b.q - a.q) || (b.s - a.s) || (a.o - b.o) || (a.i - b.i); } /** diff --git a/deps/npm/node_modules/negotiator/lib/language.js b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/language.js similarity index 100% rename from deps/npm/node_modules/negotiator/lib/language.js rename to deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/language.js diff --git a/deps/npm/node_modules/negotiator/lib/mediaType.js b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/mediaType.js similarity index 98% rename from deps/npm/node_modules/negotiator/lib/mediaType.js rename to deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/mediaType.js index 67309dd75f1b62..8e402ea88394c0 100644 --- a/deps/npm/node_modules/negotiator/lib/mediaType.js +++ b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/lib/mediaType.js @@ -69,7 +69,7 @@ function parseMediaType(str, i) { // get the value, unwrapping quotes var value = val && val[0] === '"' && val[val.length - 1] === '"' - ? val.substr(1, val.length - 2) + ? val.slice(1, -1) : val; if (key === 'q') { @@ -238,8 +238,8 @@ function splitKeyValuePair(str) { if (index === -1) { key = str; } else { - key = str.substr(0, index); - val = str.substr(index + 1); + key = str.slice(0, index); + val = str.slice(index + 1); } return [key, val]; diff --git a/deps/npm/node_modules/negotiator/package.json b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/package.json similarity index 89% rename from deps/npm/node_modules/negotiator/package.json rename to deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/package.json index 297635f6d34177..e4bdc1ef4f7481 100644 --- a/deps/npm/node_modules/negotiator/package.json +++ b/deps/npm/node_modules/make-fetch-happen/node_modules/negotiator/package.json @@ -1,7 +1,7 @@ { "name": "negotiator", "description": "HTTP content negotiation", - "version": "0.6.3", + "version": "1.0.0", "contributors": [ "Douglas Christopher Wilson ", "Federico Romero ", @@ -36,6 +36,7 @@ "scripts": { "lint": "eslint .", "test": "mocha --reporter spec --check-leaks --bail test/", + "test:debug": "mocha --reporter spec --check-leaks --inspect --inspect-brk test/", "test-ci": "nyc --reporter=lcov --reporter=text npm test", "test-cov": "nyc --reporter=html --reporter=text npm test" } diff --git a/deps/npm/node_modules/make-fetch-happen/package.json b/deps/npm/node_modules/make-fetch-happen/package.json index 0868ff6d7efa54..054fe841f13b73 100644 --- a/deps/npm/node_modules/make-fetch-happen/package.json +++ b/deps/npm/node_modules/make-fetch-happen/package.json @@ -1,6 +1,6 @@ { "name": "make-fetch-happen", - "version": "14.0.1", + "version": "14.0.3", "description": "Opinionated, caching, retrying fetch client", "main": "lib/index.js", "files": [ @@ -40,14 +40,14 @@ "minipass-fetch": "^4.0.0", "minipass-flush": "^1.0.5", "minipass-pipeline": "^1.2.4", - "negotiator": "^0.6.3", + "negotiator": "^1.0.0", "proc-log": "^5.0.0", "promise-retry": "^2.0.1", "ssri": "^12.0.0" }, "devDependencies": { "@npmcli/eslint-config": "^5.0.0", - "@npmcli/template-oss": "4.23.3", + "@npmcli/template-oss": "4.23.4", "nock": "^13.2.4", "safe-buffer": "^5.2.1", "standard-version": "^9.3.2", @@ -68,7 +68,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.23.3", + "version": "4.23.4", "publish": "true" } } diff --git a/deps/npm/node_modules/node-gyp/.release-please-manifest.json b/deps/npm/node_modules/node-gyp/.release-please-manifest.json index 01db3293b948bd..26a3463a2e0bb3 100644 --- a/deps/npm/node_modules/node-gyp/.release-please-manifest.json +++ b/deps/npm/node_modules/node-gyp/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "10.2.0" + ".": "11.0.0" } diff --git a/deps/npm/node_modules/node-gyp/CHANGELOG.md b/deps/npm/node_modules/node-gyp/CHANGELOG.md index 1d4e4185320bd6..8374a920b7caaa 100644 --- a/deps/npm/node_modules/node-gyp/CHANGELOG.md +++ b/deps/npm/node_modules/node-gyp/CHANGELOG.md @@ -1,5 +1,46 @@ # Changelog +## [11.0.0](https://github.com/nodejs/node-gyp/compare/v10.3.1...v11.0.0) (2024-12-03) + + +### ⚠ BREAKING CHANGES + +* drop node 16 support ([#3102](https://github.com/nodejs/node-gyp/issues/3102)) + +### Features + +* drop node 16 support ([#3102](https://github.com/nodejs/node-gyp/issues/3102)) ([0e6b6f8](https://github.com/nodejs/node-gyp/commit/0e6b6f8bea615cf031d76ecff9102a38e5474c72)) + + +### Miscellaneous + +* migrate from standard to neostandard ([#3103](https://github.com/nodejs/node-gyp/issues/3103)) ([a130178](https://github.com/nodejs/node-gyp/commit/a13017807d0ae7da8fa076b0bcf23153af7c60a6)) + +## [10.3.1](https://github.com/nodejs/node-gyp/compare/v10.3.0...v10.3.1) (2024-12-02) + + +### Miscellaneous + +* fix npm-publish dependencies and add provenance ([#3099](https://github.com/nodejs/node-gyp/issues/3099)) ([6dded88](https://github.com/nodejs/node-gyp/commit/6dded88065872a32f44114e60731ba4b701ec057)) + +## [10.3.0](https://github.com/nodejs/node-gyp/compare/v10.2.0...v10.3.0) (2024-11-29) + + +### Features + +* prohibit compiling with ClangCL on Windows ([#3098](https://github.com/nodejs/node-gyp/issues/3098)) ([88260bf](https://github.com/nodejs/node-gyp/commit/88260bf86aeb4c39959b78104a5edc3dc88d3aef)) + + +### Bug Fixes + +* **ci:** use correct release-please-action domain after organization url was changed ([#3032](https://github.com/nodejs/node-gyp/issues/3032)) ([d1ed3d4](https://github.com/nodejs/node-gyp/commit/d1ed3d4dc3a53b8ccab4093d002e43945bbece0e)) + + +### Miscellaneous + +* add links to Code of Conduct from root file ([#2196](https://github.com/nodejs/node-gyp/issues/2196)) ([d22e2eb](https://github.com/nodejs/node-gyp/commit/d22e2eb080807c6290533a67249c343a7605a989)) +* publish to npm with release-please ([#3051](https://github.com/nodejs/node-gyp/issues/3051)) ([8319847](https://github.com/nodejs/node-gyp/commit/831984736393a3ea8417efec5255f95d53a70785)) + ## [10.2.0](https://github.com/nodejs/node-gyp/compare/v10.1.0...v10.2.0) (2024-07-09) diff --git a/deps/npm/node_modules/node-gyp/CODE_OF_CONDUCT.md b/deps/npm/node_modules/node-gyp/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000000..4c211405596cb4 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/CODE_OF_CONDUCT.md @@ -0,0 +1,4 @@ +# Code of Conduct + +* [Node.js Code of Conduct](https://github.com/nodejs/admin/blob/master/CODE_OF_CONDUCT.md) +* [Node.js Moderation Policy](https://github.com/nodejs/admin/blob/master/Moderation-Policy.md) diff --git a/deps/npm/node_modules/node-gyp/eslint.config.js b/deps/npm/node_modules/node-gyp/eslint.config.js new file mode 100644 index 00000000000000..5212dc93d5304d --- /dev/null +++ b/deps/npm/node_modules/node-gyp/eslint.config.js @@ -0,0 +1,3 @@ +'use strict' + +module.exports = require('neostandard')({}) diff --git a/deps/npm/node_modules/node-gyp/lib/create-config-gypi.js b/deps/npm/node_modules/node-gyp/lib/create-config-gypi.js index d598dea6e2e7fa..01a820e9f2f312 100644 --- a/deps/npm/node_modules/node-gyp/lib/create-config-gypi.js +++ b/deps/npm/node_modules/node-gyp/lib/create-config-gypi.js @@ -96,6 +96,9 @@ async function getCurrentConfigGypi ({ gyp, nodeDir, vsInfo, python }) { } } variables.msbuild_path = vsInfo.msBuild + if (config.variables.clang === 1) { + config.variables.clang = 0 + } } // loop through the rest of the opts and add the unknown ones as variables. diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/lib/agents.js b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/lib/agents.js deleted file mode 100644 index c541b93001517e..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/lib/agents.js +++ /dev/null @@ -1,206 +0,0 @@ -'use strict' - -const net = require('net') -const tls = require('tls') -const { once } = require('events') -const timers = require('timers/promises') -const { normalizeOptions, cacheOptions } = require('./options') -const { getProxy, getProxyAgent, proxyCache } = require('./proxy.js') -const Errors = require('./errors.js') -const { Agent: AgentBase } = require('agent-base') - -module.exports = class Agent extends AgentBase { - #options - #timeouts - #proxy - #noProxy - #ProxyAgent - - constructor (options = {}) { - const { timeouts, proxy, noProxy, ...normalizedOptions } = normalizeOptions(options) - - super(normalizedOptions) - - this.#options = normalizedOptions - this.#timeouts = timeouts - - if (proxy) { - this.#proxy = new URL(proxy) - this.#noProxy = noProxy - this.#ProxyAgent = getProxyAgent(proxy) - } - } - - get proxy () { - return this.#proxy ? { url: this.#proxy } : {} - } - - #getProxy (options) { - if (!this.#proxy) { - return - } - - const proxy = getProxy(`${options.protocol}//${options.host}:${options.port}`, { - proxy: this.#proxy, - noProxy: this.#noProxy, - }) - - if (!proxy) { - return - } - - const cacheKey = cacheOptions({ - ...options, - ...this.#options, - timeouts: this.#timeouts, - proxy, - }) - - if (proxyCache.has(cacheKey)) { - return proxyCache.get(cacheKey) - } - - let ProxyAgent = this.#ProxyAgent - if (Array.isArray(ProxyAgent)) { - ProxyAgent = this.isSecureEndpoint(options) ? ProxyAgent[1] : ProxyAgent[0] - } - - const proxyAgent = new ProxyAgent(proxy, { - ...this.#options, - socketOptions: { family: this.#options.family }, - }) - proxyCache.set(cacheKey, proxyAgent) - - return proxyAgent - } - - // takes an array of promises and races them against the connection timeout - // which will throw the necessary error if it is hit. This will return the - // result of the promise race. - async #timeoutConnection ({ promises, options, timeout }, ac = new AbortController()) { - if (timeout) { - const connectionTimeout = timers.setTimeout(timeout, null, { signal: ac.signal }) - .then(() => { - throw new Errors.ConnectionTimeoutError(`${options.host}:${options.port}`) - }).catch((err) => { - if (err.name === 'AbortError') { - return - } - throw err - }) - promises.push(connectionTimeout) - } - - let result - try { - result = await Promise.race(promises) - ac.abort() - } catch (err) { - ac.abort() - throw err - } - return result - } - - async connect (request, options) { - // if the connection does not have its own lookup function - // set, then use the one from our options - options.lookup ??= this.#options.lookup - - let socket - let timeout = this.#timeouts.connection - const isSecureEndpoint = this.isSecureEndpoint(options) - - const proxy = this.#getProxy(options) - if (proxy) { - // some of the proxies will wait for the socket to fully connect before - // returning so we have to await this while also racing it against the - // connection timeout. - const start = Date.now() - socket = await this.#timeoutConnection({ - options, - timeout, - promises: [proxy.connect(request, options)], - }) - // see how much time proxy.connect took and subtract it from - // the timeout - if (timeout) { - timeout = timeout - (Date.now() - start) - } - } else { - socket = (isSecureEndpoint ? tls : net).connect(options) - } - - socket.setKeepAlive(this.keepAlive, this.keepAliveMsecs) - socket.setNoDelay(this.keepAlive) - - const abortController = new AbortController() - const { signal } = abortController - - const connectPromise = socket[isSecureEndpoint ? 'secureConnecting' : 'connecting'] - ? once(socket, isSecureEndpoint ? 'secureConnect' : 'connect', { signal }) - : Promise.resolve() - - await this.#timeoutConnection({ - options, - timeout, - promises: [ - connectPromise, - once(socket, 'error', { signal }).then((err) => { - throw err[0] - }), - ], - }, abortController) - - if (this.#timeouts.idle) { - socket.setTimeout(this.#timeouts.idle, () => { - socket.destroy(new Errors.IdleTimeoutError(`${options.host}:${options.port}`)) - }) - } - - return socket - } - - addRequest (request, options) { - const proxy = this.#getProxy(options) - // it would be better to call proxy.addRequest here but this causes the - // http-proxy-agent to call its super.addRequest which causes the request - // to be added to the agent twice. since we only support 3 agents - // currently (see the required agents in proxy.js) we have manually - // checked that the only public methods we need to call are called in the - // next block. this could change in the future and presumably we would get - // failing tests until we have properly called the necessary methods on - // each of our proxy agents - if (proxy?.setRequestProps) { - proxy.setRequestProps(request, options) - } - - request.setHeader('connection', this.keepAlive ? 'keep-alive' : 'close') - - if (this.#timeouts.response) { - let responseTimeout - request.once('finish', () => { - setTimeout(() => { - request.destroy(new Errors.ResponseTimeoutError(request, this.#proxy)) - }, this.#timeouts.response) - }) - request.once('response', () => { - clearTimeout(responseTimeout) - }) - } - - if (this.#timeouts.transfer) { - let transferTimeout - request.once('response', (res) => { - setTimeout(() => { - res.destroy(new Errors.TransferTimeoutError(request, this.#proxy)) - }, this.#timeouts.transfer) - res.once('close', () => { - clearTimeout(transferTimeout) - }) - }) - } - - return super.addRequest(request, options) - } -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/lib/dns.js b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/lib/dns.js deleted file mode 100644 index 3c6946c566d736..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/lib/dns.js +++ /dev/null @@ -1,53 +0,0 @@ -'use strict' - -const { LRUCache } = require('lru-cache') -const dns = require('dns') - -// this is a factory so that each request can have its own opts (i.e. ttl) -// while still sharing the cache across all requests -const cache = new LRUCache({ max: 50 }) - -const getOptions = ({ - family = 0, - hints = dns.ADDRCONFIG, - all = false, - verbatim = undefined, - ttl = 5 * 60 * 1000, - lookup = dns.lookup, -}) => ({ - // hints and lookup are returned since both are top level properties to (net|tls).connect - hints, - lookup: (hostname, ...args) => { - const callback = args.pop() // callback is always last arg - const lookupOptions = args[0] ?? {} - - const options = { - family, - hints, - all, - verbatim, - ...(typeof lookupOptions === 'number' ? { family: lookupOptions } : lookupOptions), - } - - const key = JSON.stringify({ hostname, ...options }) - - if (cache.has(key)) { - const cached = cache.get(key) - return process.nextTick(callback, null, ...cached) - } - - lookup(hostname, options, (err, ...result) => { - if (err) { - return callback(err) - } - - cache.set(key, result, { ttl }) - return callback(null, ...result) - }) - }, -}) - -module.exports = { - cache, - getOptions, -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/lib/errors.js b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/lib/errors.js deleted file mode 100644 index 70475aec8eb357..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/lib/errors.js +++ /dev/null @@ -1,61 +0,0 @@ -'use strict' - -class InvalidProxyProtocolError extends Error { - constructor (url) { - super(`Invalid protocol \`${url.protocol}\` connecting to proxy \`${url.host}\``) - this.code = 'EINVALIDPROXY' - this.proxy = url - } -} - -class ConnectionTimeoutError extends Error { - constructor (host) { - super(`Timeout connecting to host \`${host}\``) - this.code = 'ECONNECTIONTIMEOUT' - this.host = host - } -} - -class IdleTimeoutError extends Error { - constructor (host) { - super(`Idle timeout reached for host \`${host}\``) - this.code = 'EIDLETIMEOUT' - this.host = host - } -} - -class ResponseTimeoutError extends Error { - constructor (request, proxy) { - let msg = 'Response timeout ' - if (proxy) { - msg += `from proxy \`${proxy.host}\` ` - } - msg += `connecting to host \`${request.host}\`` - super(msg) - this.code = 'ERESPONSETIMEOUT' - this.proxy = proxy - this.request = request - } -} - -class TransferTimeoutError extends Error { - constructor (request, proxy) { - let msg = 'Transfer timeout ' - if (proxy) { - msg += `from proxy \`${proxy.host}\` ` - } - msg += `for \`${request.host}\`` - super(msg) - this.code = 'ETRANSFERTIMEOUT' - this.proxy = proxy - this.request = request - } -} - -module.exports = { - InvalidProxyProtocolError, - ConnectionTimeoutError, - IdleTimeoutError, - ResponseTimeoutError, - TransferTimeoutError, -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/lib/index.js b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/lib/index.js deleted file mode 100644 index b33d6eaef07a21..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/lib/index.js +++ /dev/null @@ -1,56 +0,0 @@ -'use strict' - -const { LRUCache } = require('lru-cache') -const { normalizeOptions, cacheOptions } = require('./options') -const { getProxy, proxyCache } = require('./proxy.js') -const dns = require('./dns.js') -const Agent = require('./agents.js') - -const agentCache = new LRUCache({ max: 20 }) - -const getAgent = (url, { agent, proxy, noProxy, ...options } = {}) => { - // false has meaning so this can't be a simple truthiness check - if (agent != null) { - return agent - } - - url = new URL(url) - - const proxyForUrl = getProxy(url, { proxy, noProxy }) - const normalizedOptions = { - ...normalizeOptions(options), - proxy: proxyForUrl, - } - - const cacheKey = cacheOptions({ - ...normalizedOptions, - secureEndpoint: url.protocol === 'https:', - }) - - if (agentCache.has(cacheKey)) { - return agentCache.get(cacheKey) - } - - const newAgent = new Agent(normalizedOptions) - agentCache.set(cacheKey, newAgent) - - return newAgent -} - -module.exports = { - getAgent, - Agent, - // these are exported for backwards compatability - HttpAgent: Agent, - HttpsAgent: Agent, - cache: { - proxy: proxyCache, - agent: agentCache, - dns: dns.cache, - clear: () => { - proxyCache.clear() - agentCache.clear() - dns.cache.clear() - }, - }, -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/lib/options.js b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/lib/options.js deleted file mode 100644 index 0bf53f725f0846..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/lib/options.js +++ /dev/null @@ -1,86 +0,0 @@ -'use strict' - -const dns = require('./dns') - -const normalizeOptions = (opts) => { - const family = parseInt(opts.family ?? '0', 10) - const keepAlive = opts.keepAlive ?? true - - const normalized = { - // nodejs http agent options. these are all the defaults - // but kept here to increase the likelihood of cache hits - // https://nodejs.org/api/http.html#new-agentoptions - keepAliveMsecs: keepAlive ? 1000 : undefined, - maxSockets: opts.maxSockets ?? 15, - maxTotalSockets: Infinity, - maxFreeSockets: keepAlive ? 256 : undefined, - scheduling: 'fifo', - // then spread the rest of the options - ...opts, - // we already set these to their defaults that we want - family, - keepAlive, - // our custom timeout options - timeouts: { - // the standard timeout option is mapped to our idle timeout - // and then deleted below - idle: opts.timeout ?? 0, - connection: 0, - response: 0, - transfer: 0, - ...opts.timeouts, - }, - // get the dns options that go at the top level of socket connection - ...dns.getOptions({ family, ...opts.dns }), - } - - // remove timeout since we already used it to set our own idle timeout - delete normalized.timeout - - return normalized -} - -const createKey = (obj) => { - let key = '' - const sorted = Object.entries(obj).sort((a, b) => a[0] - b[0]) - for (let [k, v] of sorted) { - if (v == null) { - v = 'null' - } else if (v instanceof URL) { - v = v.toString() - } else if (typeof v === 'object') { - v = createKey(v) - } - key += `${k}:${v}:` - } - return key -} - -const cacheOptions = ({ secureEndpoint, ...options }) => createKey({ - secureEndpoint: !!secureEndpoint, - // socket connect options - family: options.family, - hints: options.hints, - localAddress: options.localAddress, - // tls specific connect options - strictSsl: secureEndpoint ? !!options.rejectUnauthorized : false, - ca: secureEndpoint ? options.ca : null, - cert: secureEndpoint ? options.cert : null, - key: secureEndpoint ? options.key : null, - // http agent options - keepAlive: options.keepAlive, - keepAliveMsecs: options.keepAliveMsecs, - maxSockets: options.maxSockets, - maxTotalSockets: options.maxTotalSockets, - maxFreeSockets: options.maxFreeSockets, - scheduling: options.scheduling, - // timeout options - timeouts: options.timeouts, - // proxy - proxy: options.proxy, -}) - -module.exports = { - normalizeOptions, - cacheOptions, -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/lib/proxy.js b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/lib/proxy.js deleted file mode 100644 index 6272e929e57bcf..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/lib/proxy.js +++ /dev/null @@ -1,88 +0,0 @@ -'use strict' - -const { HttpProxyAgent } = require('http-proxy-agent') -const { HttpsProxyAgent } = require('https-proxy-agent') -const { SocksProxyAgent } = require('socks-proxy-agent') -const { LRUCache } = require('lru-cache') -const { InvalidProxyProtocolError } = require('./errors.js') - -const PROXY_CACHE = new LRUCache({ max: 20 }) - -const SOCKS_PROTOCOLS = new Set(SocksProxyAgent.protocols) - -const PROXY_ENV_KEYS = new Set(['https_proxy', 'http_proxy', 'proxy', 'no_proxy']) - -const PROXY_ENV = Object.entries(process.env).reduce((acc, [key, value]) => { - key = key.toLowerCase() - if (PROXY_ENV_KEYS.has(key)) { - acc[key] = value - } - return acc -}, {}) - -const getProxyAgent = (url) => { - url = new URL(url) - - const protocol = url.protocol.slice(0, -1) - if (SOCKS_PROTOCOLS.has(protocol)) { - return SocksProxyAgent - } - if (protocol === 'https' || protocol === 'http') { - return [HttpProxyAgent, HttpsProxyAgent] - } - - throw new InvalidProxyProtocolError(url) -} - -const isNoProxy = (url, noProxy) => { - if (typeof noProxy === 'string') { - noProxy = noProxy.split(',').map((p) => p.trim()).filter(Boolean) - } - - if (!noProxy || !noProxy.length) { - return false - } - - const hostSegments = url.hostname.split('.').reverse() - - return noProxy.some((no) => { - const noSegments = no.split('.').filter(Boolean).reverse() - if (!noSegments.length) { - return false - } - - for (let i = 0; i < noSegments.length; i++) { - if (hostSegments[i] !== noSegments[i]) { - return false - } - } - - return true - }) -} - -const getProxy = (url, { proxy, noProxy }) => { - url = new URL(url) - - if (!proxy) { - proxy = url.protocol === 'https:' - ? PROXY_ENV.https_proxy - : PROXY_ENV.https_proxy || PROXY_ENV.http_proxy || PROXY_ENV.proxy - } - - if (!noProxy) { - noProxy = PROXY_ENV.no_proxy - } - - if (!proxy || isNoProxy(url, noProxy)) { - return null - } - - return new URL(proxy) -} - -module.exports = { - getProxyAgent, - getProxy, - proxyCache: PROXY_CACHE, -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/package.json b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/package.json deleted file mode 100644 index ef5b4e3228cc46..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/agent/package.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "name": "@npmcli/agent", - "version": "2.2.2", - "description": "the http/https agent used by the npm cli", - "main": "lib/index.js", - "scripts": { - "gencerts": "bash scripts/create-cert.sh", - "test": "tap", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap", - "posttest": "npm run lint" - }, - "author": "GitHub Inc.", - "license": "ISC", - "bugs": { - "url": "https://github.com/npm/agent/issues" - }, - "homepage": "https://github.com/npm/agent#readme", - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^16.14.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.21.3", - "publish": "true" - }, - "dependencies": { - "agent-base": "^7.1.0", - "http-proxy-agent": "^7.0.0", - "https-proxy-agent": "^7.0.1", - "lru-cache": "^10.0.1", - "socks-proxy-agent": "^8.0.3" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.21.3", - "minipass-fetch": "^3.0.3", - "nock": "^13.2.7", - "semver": "^7.5.4", - "simple-socks": "^3.1.0", - "tap": "^16.3.0" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/agent.git" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/LICENSE.md b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/LICENSE.md deleted file mode 100644 index 5fc208ff122e08..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ - - -ISC License - -Copyright npm, Inc. - -Permission to use, copy, modify, and/or distribute this -software for any purpose with or without fee is hereby -granted, provided that the above copyright notice and this -permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND NPM DISCLAIMS ALL -WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO -EVENT SHALL NPM BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/common/get-options.js b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/common/get-options.js deleted file mode 100644 index cb5982f79077ac..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/common/get-options.js +++ /dev/null @@ -1,20 +0,0 @@ -// given an input that may or may not be an object, return an object that has -// a copy of every defined property listed in 'copy'. if the input is not an -// object, assign it to the property named by 'wrap' -const getOptions = (input, { copy, wrap }) => { - const result = {} - - if (input && typeof input === 'object') { - for (const prop of copy) { - if (input[prop] !== undefined) { - result[prop] = input[prop] - } - } - } else { - result[wrap] = input - } - - return result -} - -module.exports = getOptions diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/common/node.js b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/common/node.js deleted file mode 100644 index 4d13bc037359d7..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/common/node.js +++ /dev/null @@ -1,9 +0,0 @@ -const semver = require('semver') - -const satisfies = (range) => { - return semver.satisfies(process.version, range, { includePrerelease: true }) -} - -module.exports = { - satisfies, -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/cp/LICENSE b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/cp/LICENSE deleted file mode 100644 index 93546dfb7655bf..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/cp/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -(The MIT License) - -Copyright (c) 2011-2017 JP Richardson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files -(the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, - merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS -OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/cp/errors.js b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/cp/errors.js deleted file mode 100644 index 1cd1e05d0c533d..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/cp/errors.js +++ /dev/null @@ -1,129 +0,0 @@ -'use strict' -const { inspect } = require('util') - -// adapted from node's internal/errors -// https://github.com/nodejs/node/blob/c8a04049/lib/internal/errors.js - -// close copy of node's internal SystemError class. -class SystemError { - constructor (code, prefix, context) { - // XXX context.code is undefined in all constructors used in cp/polyfill - // that may be a bug copied from node, maybe the constructor should use - // `code` not `errno`? nodejs/node#41104 - let message = `${prefix}: ${context.syscall} returned ` + - `${context.code} (${context.message})` - - if (context.path !== undefined) { - message += ` ${context.path}` - } - if (context.dest !== undefined) { - message += ` => ${context.dest}` - } - - this.code = code - Object.defineProperties(this, { - name: { - value: 'SystemError', - enumerable: false, - writable: true, - configurable: true, - }, - message: { - value: message, - enumerable: false, - writable: true, - configurable: true, - }, - info: { - value: context, - enumerable: true, - configurable: true, - writable: false, - }, - errno: { - get () { - return context.errno - }, - set (value) { - context.errno = value - }, - enumerable: true, - configurable: true, - }, - syscall: { - get () { - return context.syscall - }, - set (value) { - context.syscall = value - }, - enumerable: true, - configurable: true, - }, - }) - - if (context.path !== undefined) { - Object.defineProperty(this, 'path', { - get () { - return context.path - }, - set (value) { - context.path = value - }, - enumerable: true, - configurable: true, - }) - } - - if (context.dest !== undefined) { - Object.defineProperty(this, 'dest', { - get () { - return context.dest - }, - set (value) { - context.dest = value - }, - enumerable: true, - configurable: true, - }) - } - } - - toString () { - return `${this.name} [${this.code}]: ${this.message}` - } - - [Symbol.for('nodejs.util.inspect.custom')] (_recurseTimes, ctx) { - return inspect(this, { - ...ctx, - getters: true, - customInspect: false, - }) - } -} - -function E (code, message) { - module.exports[code] = class NodeError extends SystemError { - constructor (ctx) { - super(code, message, ctx) - } - } -} - -E('ERR_FS_CP_DIR_TO_NON_DIR', 'Cannot overwrite directory with non-directory') -E('ERR_FS_CP_EEXIST', 'Target already exists') -E('ERR_FS_CP_EINVAL', 'Invalid src or dest') -E('ERR_FS_CP_FIFO_PIPE', 'Cannot copy a FIFO pipe') -E('ERR_FS_CP_NON_DIR_TO_DIR', 'Cannot overwrite non-directory with directory') -E('ERR_FS_CP_SOCKET', 'Cannot copy a socket file') -E('ERR_FS_CP_SYMLINK_TO_SUBDIRECTORY', 'Cannot overwrite symlink in subdirectory of self') -E('ERR_FS_CP_UNKNOWN', 'Cannot copy an unknown file type') -E('ERR_FS_EISDIR', 'Path is a directory') - -module.exports.ERR_INVALID_ARG_TYPE = class ERR_INVALID_ARG_TYPE extends Error { - constructor (name, expected, actual) { - super() - this.code = 'ERR_INVALID_ARG_TYPE' - this.message = `The ${name} argument must be ${expected}. Received ${typeof actual}` - } -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/cp/index.js b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/cp/index.js deleted file mode 100644 index 972ce7aa12abef..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/cp/index.js +++ /dev/null @@ -1,22 +0,0 @@ -const fs = require('fs/promises') -const getOptions = require('../common/get-options.js') -const node = require('../common/node.js') -const polyfill = require('./polyfill.js') - -// node 16.7.0 added fs.cp -const useNative = node.satisfies('>=16.7.0') - -const cp = async (src, dest, opts) => { - const options = getOptions(opts, { - copy: ['dereference', 'errorOnExist', 'filter', 'force', 'preserveTimestamps', 'recursive'], - }) - - // the polyfill is tested separately from this module, no need to hack - // process.version to try to trigger it just for coverage - // istanbul ignore next - return useNative - ? fs.cp(src, dest, options) - : polyfill(src, dest, options) -} - -module.exports = cp diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/cp/polyfill.js b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/cp/polyfill.js deleted file mode 100644 index 80eb10de971918..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/cp/polyfill.js +++ /dev/null @@ -1,428 +0,0 @@ -// this file is a modified version of the code in node 17.2.0 -// which is, in turn, a modified version of the fs-extra module on npm -// node core changes: -// - Use of the assert module has been replaced with core's error system. -// - All code related to the glob dependency has been removed. -// - Bring your own custom fs module is not currently supported. -// - Some basic code cleanup. -// changes here: -// - remove all callback related code -// - drop sync support -// - change assertions back to non-internal methods (see options.js) -// - throws ENOTDIR when rmdir gets an ENOENT for a path that exists in Windows -'use strict' - -const { - ERR_FS_CP_DIR_TO_NON_DIR, - ERR_FS_CP_EEXIST, - ERR_FS_CP_EINVAL, - ERR_FS_CP_FIFO_PIPE, - ERR_FS_CP_NON_DIR_TO_DIR, - ERR_FS_CP_SOCKET, - ERR_FS_CP_SYMLINK_TO_SUBDIRECTORY, - ERR_FS_CP_UNKNOWN, - ERR_FS_EISDIR, - ERR_INVALID_ARG_TYPE, -} = require('./errors.js') -const { - constants: { - errno: { - EEXIST, - EISDIR, - EINVAL, - ENOTDIR, - }, - }, -} = require('os') -const { - chmod, - copyFile, - lstat, - mkdir, - readdir, - readlink, - stat, - symlink, - unlink, - utimes, -} = require('fs/promises') -const { - dirname, - isAbsolute, - join, - parse, - resolve, - sep, - toNamespacedPath, -} = require('path') -const { fileURLToPath } = require('url') - -const defaultOptions = { - dereference: false, - errorOnExist: false, - filter: undefined, - force: true, - preserveTimestamps: false, - recursive: false, -} - -async function cp (src, dest, opts) { - if (opts != null && typeof opts !== 'object') { - throw new ERR_INVALID_ARG_TYPE('options', ['Object'], opts) - } - return cpFn( - toNamespacedPath(getValidatedPath(src)), - toNamespacedPath(getValidatedPath(dest)), - { ...defaultOptions, ...opts }) -} - -function getValidatedPath (fileURLOrPath) { - const path = fileURLOrPath != null && fileURLOrPath.href - && fileURLOrPath.origin - ? fileURLToPath(fileURLOrPath) - : fileURLOrPath - return path -} - -async function cpFn (src, dest, opts) { - // Warn about using preserveTimestamps on 32-bit node - // istanbul ignore next - if (opts.preserveTimestamps && process.arch === 'ia32') { - const warning = 'Using the preserveTimestamps option in 32-bit ' + - 'node is not recommended' - process.emitWarning(warning, 'TimestampPrecisionWarning') - } - const stats = await checkPaths(src, dest, opts) - const { srcStat, destStat } = stats - await checkParentPaths(src, srcStat, dest) - if (opts.filter) { - return handleFilter(checkParentDir, destStat, src, dest, opts) - } - return checkParentDir(destStat, src, dest, opts) -} - -async function checkPaths (src, dest, opts) { - const { 0: srcStat, 1: destStat } = await getStats(src, dest, opts) - if (destStat) { - if (areIdentical(srcStat, destStat)) { - throw new ERR_FS_CP_EINVAL({ - message: 'src and dest cannot be the same', - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - if (srcStat.isDirectory() && !destStat.isDirectory()) { - throw new ERR_FS_CP_DIR_TO_NON_DIR({ - message: `cannot overwrite directory ${src} ` + - `with non-directory ${dest}`, - path: dest, - syscall: 'cp', - errno: EISDIR, - }) - } - if (!srcStat.isDirectory() && destStat.isDirectory()) { - throw new ERR_FS_CP_NON_DIR_TO_DIR({ - message: `cannot overwrite non-directory ${src} ` + - `with directory ${dest}`, - path: dest, - syscall: 'cp', - errno: ENOTDIR, - }) - } - } - - if (srcStat.isDirectory() && isSrcSubdir(src, dest)) { - throw new ERR_FS_CP_EINVAL({ - message: `cannot copy ${src} to a subdirectory of self ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - return { srcStat, destStat } -} - -function areIdentical (srcStat, destStat) { - return destStat.ino && destStat.dev && destStat.ino === srcStat.ino && - destStat.dev === srcStat.dev -} - -function getStats (src, dest, opts) { - const statFunc = opts.dereference ? - (file) => stat(file, { bigint: true }) : - (file) => lstat(file, { bigint: true }) - return Promise.all([ - statFunc(src), - statFunc(dest).catch((err) => { - // istanbul ignore next: unsure how to cover. - if (err.code === 'ENOENT') { - return null - } - // istanbul ignore next: unsure how to cover. - throw err - }), - ]) -} - -async function checkParentDir (destStat, src, dest, opts) { - const destParent = dirname(dest) - const dirExists = await pathExists(destParent) - if (dirExists) { - return getStatsForCopy(destStat, src, dest, opts) - } - await mkdir(destParent, { recursive: true }) - return getStatsForCopy(destStat, src, dest, opts) -} - -function pathExists (dest) { - return stat(dest).then( - () => true, - // istanbul ignore next: not sure when this would occur - (err) => (err.code === 'ENOENT' ? false : Promise.reject(err))) -} - -// Recursively check if dest parent is a subdirectory of src. -// It works for all file types including symlinks since it -// checks the src and dest inodes. It starts from the deepest -// parent and stops once it reaches the src parent or the root path. -async function checkParentPaths (src, srcStat, dest) { - const srcParent = resolve(dirname(src)) - const destParent = resolve(dirname(dest)) - if (destParent === srcParent || destParent === parse(destParent).root) { - return - } - let destStat - try { - destStat = await stat(destParent, { bigint: true }) - } catch (err) { - // istanbul ignore else: not sure when this would occur - if (err.code === 'ENOENT') { - return - } - // istanbul ignore next: not sure when this would occur - throw err - } - if (areIdentical(srcStat, destStat)) { - throw new ERR_FS_CP_EINVAL({ - message: `cannot copy ${src} to a subdirectory of self ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - return checkParentPaths(src, srcStat, destParent) -} - -const normalizePathToArray = (path) => - resolve(path).split(sep).filter(Boolean) - -// Return true if dest is a subdir of src, otherwise false. -// It only checks the path strings. -function isSrcSubdir (src, dest) { - const srcArr = normalizePathToArray(src) - const destArr = normalizePathToArray(dest) - return srcArr.every((cur, i) => destArr[i] === cur) -} - -async function handleFilter (onInclude, destStat, src, dest, opts, cb) { - const include = await opts.filter(src, dest) - if (include) { - return onInclude(destStat, src, dest, opts, cb) - } -} - -function startCopy (destStat, src, dest, opts) { - if (opts.filter) { - return handleFilter(getStatsForCopy, destStat, src, dest, opts) - } - return getStatsForCopy(destStat, src, dest, opts) -} - -async function getStatsForCopy (destStat, src, dest, opts) { - const statFn = opts.dereference ? stat : lstat - const srcStat = await statFn(src) - // istanbul ignore else: can't portably test FIFO - if (srcStat.isDirectory() && opts.recursive) { - return onDir(srcStat, destStat, src, dest, opts) - } else if (srcStat.isDirectory()) { - throw new ERR_FS_EISDIR({ - message: `${src} is a directory (not copied)`, - path: src, - syscall: 'cp', - errno: EINVAL, - }) - } else if (srcStat.isFile() || - srcStat.isCharacterDevice() || - srcStat.isBlockDevice()) { - return onFile(srcStat, destStat, src, dest, opts) - } else if (srcStat.isSymbolicLink()) { - return onLink(destStat, src, dest) - } else if (srcStat.isSocket()) { - throw new ERR_FS_CP_SOCKET({ - message: `cannot copy a socket file: ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } else if (srcStat.isFIFO()) { - throw new ERR_FS_CP_FIFO_PIPE({ - message: `cannot copy a FIFO pipe: ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - // istanbul ignore next: should be unreachable - throw new ERR_FS_CP_UNKNOWN({ - message: `cannot copy an unknown file type: ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) -} - -function onFile (srcStat, destStat, src, dest, opts) { - if (!destStat) { - return _copyFile(srcStat, src, dest, opts) - } - return mayCopyFile(srcStat, src, dest, opts) -} - -async function mayCopyFile (srcStat, src, dest, opts) { - if (opts.force) { - await unlink(dest) - return _copyFile(srcStat, src, dest, opts) - } else if (opts.errorOnExist) { - throw new ERR_FS_CP_EEXIST({ - message: `${dest} already exists`, - path: dest, - syscall: 'cp', - errno: EEXIST, - }) - } -} - -async function _copyFile (srcStat, src, dest, opts) { - await copyFile(src, dest) - if (opts.preserveTimestamps) { - return handleTimestampsAndMode(srcStat.mode, src, dest) - } - return setDestMode(dest, srcStat.mode) -} - -async function handleTimestampsAndMode (srcMode, src, dest) { - // Make sure the file is writable before setting the timestamp - // otherwise open fails with EPERM when invoked with 'r+' - // (through utimes call) - if (fileIsNotWritable(srcMode)) { - await makeFileWritable(dest, srcMode) - return setDestTimestampsAndMode(srcMode, src, dest) - } - return setDestTimestampsAndMode(srcMode, src, dest) -} - -function fileIsNotWritable (srcMode) { - return (srcMode & 0o200) === 0 -} - -function makeFileWritable (dest, srcMode) { - return setDestMode(dest, srcMode | 0o200) -} - -async function setDestTimestampsAndMode (srcMode, src, dest) { - await setDestTimestamps(src, dest) - return setDestMode(dest, srcMode) -} - -function setDestMode (dest, srcMode) { - return chmod(dest, srcMode) -} - -async function setDestTimestamps (src, dest) { - // The initial srcStat.atime cannot be trusted - // because it is modified by the read(2) system call - // (See https://nodejs.org/api/fs.html#fs_stat_time_values) - const updatedSrcStat = await stat(src) - return utimes(dest, updatedSrcStat.atime, updatedSrcStat.mtime) -} - -function onDir (srcStat, destStat, src, dest, opts) { - if (!destStat) { - return mkDirAndCopy(srcStat.mode, src, dest, opts) - } - return copyDir(src, dest, opts) -} - -async function mkDirAndCopy (srcMode, src, dest, opts) { - await mkdir(dest) - await copyDir(src, dest, opts) - return setDestMode(dest, srcMode) -} - -async function copyDir (src, dest, opts) { - const dir = await readdir(src) - for (let i = 0; i < dir.length; i++) { - const item = dir[i] - const srcItem = join(src, item) - const destItem = join(dest, item) - const { destStat } = await checkPaths(srcItem, destItem, opts) - await startCopy(destStat, srcItem, destItem, opts) - } -} - -async function onLink (destStat, src, dest) { - let resolvedSrc = await readlink(src) - if (!isAbsolute(resolvedSrc)) { - resolvedSrc = resolve(dirname(src), resolvedSrc) - } - if (!destStat) { - return symlink(resolvedSrc, dest) - } - let resolvedDest - try { - resolvedDest = await readlink(dest) - } catch (err) { - // Dest exists and is a regular file or directory, - // Windows may throw UNKNOWN error. If dest already exists, - // fs throws error anyway, so no need to guard against it here. - // istanbul ignore next: can only test on windows - if (err.code === 'EINVAL' || err.code === 'UNKNOWN') { - return symlink(resolvedSrc, dest) - } - // istanbul ignore next: should not be possible - throw err - } - if (!isAbsolute(resolvedDest)) { - resolvedDest = resolve(dirname(dest), resolvedDest) - } - if (isSrcSubdir(resolvedSrc, resolvedDest)) { - throw new ERR_FS_CP_EINVAL({ - message: `cannot copy ${resolvedSrc} to a subdirectory of self ` + - `${resolvedDest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - // Do not copy if src is a subdir of dest since unlinking - // dest in this case would result in removing src contents - // and therefore a broken symlink would be created. - const srcStat = await stat(src) - if (srcStat.isDirectory() && isSrcSubdir(resolvedDest, resolvedSrc)) { - throw new ERR_FS_CP_SYMLINK_TO_SUBDIRECTORY({ - message: `cannot overwrite ${resolvedDest} with ${resolvedSrc}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - return copyLink(resolvedSrc, dest) -} - -async function copyLink (resolvedSrc, dest) { - await unlink(dest) - return symlink(resolvedSrc, dest) -} - -module.exports = cp diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/index.js b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/index.js deleted file mode 100644 index 81c746304cc428..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/index.js +++ /dev/null @@ -1,13 +0,0 @@ -'use strict' - -const cp = require('./cp/index.js') -const withTempDir = require('./with-temp-dir.js') -const readdirScoped = require('./readdir-scoped.js') -const moveFile = require('./move-file.js') - -module.exports = { - cp, - withTempDir, - readdirScoped, - moveFile, -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/move-file.js b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/move-file.js deleted file mode 100644 index d56e06d384659a..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/move-file.js +++ /dev/null @@ -1,78 +0,0 @@ -const { dirname, join, resolve, relative, isAbsolute } = require('path') -const fs = require('fs/promises') - -const pathExists = async path => { - try { - await fs.access(path) - return true - } catch (er) { - return er.code !== 'ENOENT' - } -} - -const moveFile = async (source, destination, options = {}, root = true, symlinks = []) => { - if (!source || !destination) { - throw new TypeError('`source` and `destination` file required') - } - - options = { - overwrite: true, - ...options, - } - - if (!options.overwrite && await pathExists(destination)) { - throw new Error(`The destination file exists: ${destination}`) - } - - await fs.mkdir(dirname(destination), { recursive: true }) - - try { - await fs.rename(source, destination) - } catch (error) { - if (error.code === 'EXDEV' || error.code === 'EPERM') { - const sourceStat = await fs.lstat(source) - if (sourceStat.isDirectory()) { - const files = await fs.readdir(source) - await Promise.all(files.map((file) => - moveFile(join(source, file), join(destination, file), options, false, symlinks) - )) - } else if (sourceStat.isSymbolicLink()) { - symlinks.push({ source, destination }) - } else { - await fs.copyFile(source, destination) - } - } else { - throw error - } - } - - if (root) { - await Promise.all(symlinks.map(async ({ source: symSource, destination: symDestination }) => { - let target = await fs.readlink(symSource) - // junction symlinks in windows will be absolute paths, so we need to - // make sure they point to the symlink destination - if (isAbsolute(target)) { - target = resolve(symDestination, relative(symSource, target)) - } - // try to determine what the actual file is so we can create the correct - // type of symlink in windows - let targetStat = 'file' - try { - targetStat = await fs.stat(resolve(dirname(symSource), target)) - if (targetStat.isDirectory()) { - targetStat = 'junction' - } - } catch { - // targetStat remains 'file' - } - await fs.symlink( - target, - symDestination, - targetStat - ) - })) - await fs.rm(source, { recursive: true, force: true }) - } -} - -module.exports = moveFile diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/readdir-scoped.js b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/readdir-scoped.js deleted file mode 100644 index cd601dfbe7486b..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/readdir-scoped.js +++ /dev/null @@ -1,20 +0,0 @@ -const { readdir } = require('fs/promises') -const { join } = require('path') - -const readdirScoped = async (dir) => { - const results = [] - - for (const item of await readdir(dir)) { - if (item.startsWith('@')) { - for (const scopedItem of await readdir(join(dir, item))) { - results.push(join(item, scopedItem)) - } - } else { - results.push(item) - } - } - - return results -} - -module.exports = readdirScoped diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/with-temp-dir.js b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/with-temp-dir.js deleted file mode 100644 index 0738ac4f29e1be..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/lib/with-temp-dir.js +++ /dev/null @@ -1,39 +0,0 @@ -const { join, sep } = require('path') - -const getOptions = require('./common/get-options.js') -const { mkdir, mkdtemp, rm } = require('fs/promises') - -// create a temp directory, ensure its permissions match its parent, then call -// the supplied function passing it the path to the directory. clean up after -// the function finishes, whether it throws or not -const withTempDir = async (root, fn, opts) => { - const options = getOptions(opts, { - copy: ['tmpPrefix'], - }) - // create the directory - await mkdir(root, { recursive: true }) - - const target = await mkdtemp(join(`${root}${sep}`, options.tmpPrefix || '')) - let err - let result - - try { - result = await fn(target) - } catch (_err) { - err = _err - } - - try { - await rm(target, { force: true, recursive: true }) - } catch { - // ignore errors - } - - if (err) { - throw err - } - - return result -} - -module.exports = withTempDir diff --git a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/package.json b/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/package.json deleted file mode 100644 index 5261a11b78000e..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/@npmcli/fs/package.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "name": "@npmcli/fs", - "version": "3.1.1", - "description": "filesystem utilities for the npm cli", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "scripts": { - "snap": "tap", - "test": "tap", - "npmclilint": "npmcli-lint", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "lintfix": "npm run lint -- --fix", - "posttest": "npm run lint", - "postsnap": "npm run lintfix --", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/fs.git" - }, - "keywords": [ - "npm", - "oss" - ], - "author": "GitHub Inc.", - "license": "ISC", - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "tap": "^16.0.1" - }, - "dependencies": { - "semver": "^7.3.5" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.22.0" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/abbrev/LICENSE b/deps/npm/node_modules/node-gyp/node_modules/abbrev/LICENSE deleted file mode 100644 index 9bcfa9d7d8d26e..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/abbrev/LICENSE +++ /dev/null @@ -1,46 +0,0 @@ -This software is dual-licensed under the ISC and MIT licenses. -You may use this software under EITHER of the following licenses. - ----------- - -The ISC License - -Copyright (c) Isaac Z. Schlueter and Contributors - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ----------- - -Copyright Isaac Z. Schlueter and Contributors -All rights reserved. - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/npm/node_modules/node-gyp/node_modules/abbrev/lib/index.js b/deps/npm/node_modules/node-gyp/node_modules/abbrev/lib/index.js deleted file mode 100644 index 9f48801f049c9e..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/abbrev/lib/index.js +++ /dev/null @@ -1,50 +0,0 @@ -module.exports = abbrev - -function abbrev (...args) { - let list = args.length === 1 || Array.isArray(args[0]) ? args[0] : args - - for (let i = 0, l = list.length; i < l; i++) { - list[i] = typeof list[i] === 'string' ? list[i] : String(list[i]) - } - - // sort them lexicographically, so that they're next to their nearest kin - list = list.sort(lexSort) - - // walk through each, seeing how much it has in common with the next and previous - const abbrevs = {} - let prev = '' - for (let ii = 0, ll = list.length; ii < ll; ii++) { - const current = list[ii] - const next = list[ii + 1] || '' - let nextMatches = true - let prevMatches = true - if (current === next) { - continue - } - let j = 0 - const cl = current.length - for (; j < cl; j++) { - const curChar = current.charAt(j) - nextMatches = nextMatches && curChar === next.charAt(j) - prevMatches = prevMatches && curChar === prev.charAt(j) - if (!nextMatches && !prevMatches) { - j++ - break - } - } - prev = current - if (j === cl) { - abbrevs[current] = current - continue - } - for (let a = current.slice(0, j); j <= cl; j++) { - abbrevs[a] = current - a += current.charAt(j) - } - } - return abbrevs -} - -function lexSort (a, b) { - return a === b ? 0 : a > b ? 1 : -1 -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/abbrev/package.json b/deps/npm/node_modules/node-gyp/node_modules/abbrev/package.json deleted file mode 100644 index e26400445631ad..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/abbrev/package.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "name": "abbrev", - "version": "2.0.0", - "description": "Like ruby's abbrev module, but in js", - "author": "GitHub Inc.", - "main": "lib/index.js", - "scripts": { - "test": "tap", - "lint": "eslint \"**/*.js\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap", - "posttest": "npm run lint" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/abbrev-js.git" - }, - "license": "ISC", - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.8.0", - "tap": "^16.3.0" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.8.0" - } -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/cacache/LICENSE.md b/deps/npm/node_modules/node-gyp/node_modules/cacache/LICENSE.md deleted file mode 100644 index 8d28acf866d932..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/cacache/LICENSE.md +++ /dev/null @@ -1,16 +0,0 @@ -ISC License - -Copyright (c) npm, Inc. - -Permission to use, copy, modify, and/or distribute this software for -any purpose with or without fee is hereby granted, provided that the -above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE COPYRIGHT HOLDER DISCLAIMS -ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR -CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/content/path.js b/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/content/path.js deleted file mode 100644 index ad5a76a4f73f26..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/content/path.js +++ /dev/null @@ -1,29 +0,0 @@ -'use strict' - -const contentVer = require('../../package.json')['cache-version'].content -const hashToSegments = require('../util/hash-to-segments') -const path = require('path') -const ssri = require('ssri') - -// Current format of content file path: -// -// sha512-BaSE64Hex= -> -// ~/.my-cache/content-v2/sha512/ba/da/55deadbeefc0ffee -// -module.exports = contentPath - -function contentPath (cache, integrity) { - const sri = ssri.parse(integrity, { single: true }) - // contentPath is the *strongest* algo given - return path.join( - contentDir(cache), - sri.algorithm, - ...hashToSegments(sri.hexDigest()) - ) -} - -module.exports.contentDir = contentDir - -function contentDir (cache) { - return path.join(cache, `content-v${contentVer}`) -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/content/read.js b/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/content/read.js deleted file mode 100644 index 5f6192c3cec566..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/content/read.js +++ /dev/null @@ -1,165 +0,0 @@ -'use strict' - -const fs = require('fs/promises') -const fsm = require('fs-minipass') -const ssri = require('ssri') -const contentPath = require('./path') -const Pipeline = require('minipass-pipeline') - -module.exports = read - -const MAX_SINGLE_READ_SIZE = 64 * 1024 * 1024 -async function read (cache, integrity, opts = {}) { - const { size } = opts - const { stat, cpath, sri } = await withContentSri(cache, integrity, async (cpath, sri) => { - // get size - const stat = size ? { size } : await fs.stat(cpath) - return { stat, cpath, sri } - }) - - if (stat.size > MAX_SINGLE_READ_SIZE) { - return readPipeline(cpath, stat.size, sri, new Pipeline()).concat() - } - - const data = await fs.readFile(cpath, { encoding: null }) - - if (stat.size !== data.length) { - throw sizeError(stat.size, data.length) - } - - if (!ssri.checkData(data, sri)) { - throw integrityError(sri, cpath) - } - - return data -} - -const readPipeline = (cpath, size, sri, stream) => { - stream.push( - new fsm.ReadStream(cpath, { - size, - readSize: MAX_SINGLE_READ_SIZE, - }), - ssri.integrityStream({ - integrity: sri, - size, - }) - ) - return stream -} - -module.exports.stream = readStream -module.exports.readStream = readStream - -function readStream (cache, integrity, opts = {}) { - const { size } = opts - const stream = new Pipeline() - // Set all this up to run on the stream and then just return the stream - Promise.resolve().then(async () => { - const { stat, cpath, sri } = await withContentSri(cache, integrity, async (cpath, sri) => { - // get size - const stat = size ? { size } : await fs.stat(cpath) - return { stat, cpath, sri } - }) - - return readPipeline(cpath, stat.size, sri, stream) - }).catch(err => stream.emit('error', err)) - - return stream -} - -module.exports.copy = copy - -function copy (cache, integrity, dest) { - return withContentSri(cache, integrity, (cpath) => { - return fs.copyFile(cpath, dest) - }) -} - -module.exports.hasContent = hasContent - -async function hasContent (cache, integrity) { - if (!integrity) { - return false - } - - try { - return await withContentSri(cache, integrity, async (cpath, sri) => { - const stat = await fs.stat(cpath) - return { size: stat.size, sri, stat } - }) - } catch (err) { - if (err.code === 'ENOENT') { - return false - } - - if (err.code === 'EPERM') { - /* istanbul ignore else */ - if (process.platform !== 'win32') { - throw err - } else { - return false - } - } - } -} - -async function withContentSri (cache, integrity, fn) { - const sri = ssri.parse(integrity) - // If `integrity` has multiple entries, pick the first digest - // with available local data. - const algo = sri.pickAlgorithm() - const digests = sri[algo] - - if (digests.length <= 1) { - const cpath = contentPath(cache, digests[0]) - return fn(cpath, digests[0]) - } else { - // Can't use race here because a generic error can happen before - // a ENOENT error, and can happen before a valid result - const results = await Promise.all(digests.map(async (meta) => { - try { - return await withContentSri(cache, meta, fn) - } catch (err) { - if (err.code === 'ENOENT') { - return Object.assign( - new Error('No matching content found for ' + sri.toString()), - { code: 'ENOENT' } - ) - } - return err - } - })) - // Return the first non error if it is found - const result = results.find((r) => !(r instanceof Error)) - if (result) { - return result - } - - // Throw the No matching content found error - const enoentError = results.find((r) => r.code === 'ENOENT') - if (enoentError) { - throw enoentError - } - - // Throw generic error - throw results.find((r) => r instanceof Error) - } -} - -function sizeError (expected, found) { - /* eslint-disable-next-line max-len */ - const err = new Error(`Bad data size: expected inserted data to be ${expected} bytes, but got ${found} instead`) - err.expected = expected - err.found = found - err.code = 'EBADSIZE' - return err -} - -function integrityError (sri, path) { - const err = new Error(`Integrity verification failed for ${sri} (${path})`) - err.code = 'EINTEGRITY' - err.sri = sri - err.path = path - return err -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/content/rm.js b/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/content/rm.js deleted file mode 100644 index ce58d679e4cb25..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/content/rm.js +++ /dev/null @@ -1,18 +0,0 @@ -'use strict' - -const fs = require('fs/promises') -const contentPath = require('./path') -const { hasContent } = require('./read') - -module.exports = rm - -async function rm (cache, integrity) { - const content = await hasContent(cache, integrity) - // ~pretty~ sure we can't end up with a content lacking sri, but be safe - if (content && content.sri) { - await fs.rm(contentPath(cache, content.sri), { recursive: true, force: true }) - return true - } else { - return false - } -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/content/write.js b/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/content/write.js deleted file mode 100644 index e7187abca8788a..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/content/write.js +++ /dev/null @@ -1,206 +0,0 @@ -'use strict' - -const events = require('events') - -const contentPath = require('./path') -const fs = require('fs/promises') -const { moveFile } = require('@npmcli/fs') -const { Minipass } = require('minipass') -const Pipeline = require('minipass-pipeline') -const Flush = require('minipass-flush') -const path = require('path') -const ssri = require('ssri') -const uniqueFilename = require('unique-filename') -const fsm = require('fs-minipass') - -module.exports = write - -// Cache of move operations in process so we don't duplicate -const moveOperations = new Map() - -async function write (cache, data, opts = {}) { - const { algorithms, size, integrity } = opts - - if (typeof size === 'number' && data.length !== size) { - throw sizeError(size, data.length) - } - - const sri = ssri.fromData(data, algorithms ? { algorithms } : {}) - if (integrity && !ssri.checkData(data, integrity, opts)) { - throw checksumError(integrity, sri) - } - - for (const algo in sri) { - const tmp = await makeTmp(cache, opts) - const hash = sri[algo].toString() - try { - await fs.writeFile(tmp.target, data, { flag: 'wx' }) - await moveToDestination(tmp, cache, hash, opts) - } finally { - if (!tmp.moved) { - await fs.rm(tmp.target, { recursive: true, force: true }) - } - } - } - return { integrity: sri, size: data.length } -} - -module.exports.stream = writeStream - -// writes proxied to the 'inputStream' that is passed to the Promise -// 'end' is deferred until content is handled. -class CacacheWriteStream extends Flush { - constructor (cache, opts) { - super() - this.opts = opts - this.cache = cache - this.inputStream = new Minipass() - this.inputStream.on('error', er => this.emit('error', er)) - this.inputStream.on('drain', () => this.emit('drain')) - this.handleContentP = null - } - - write (chunk, encoding, cb) { - if (!this.handleContentP) { - this.handleContentP = handleContent( - this.inputStream, - this.cache, - this.opts - ) - this.handleContentP.catch(error => this.emit('error', error)) - } - return this.inputStream.write(chunk, encoding, cb) - } - - flush (cb) { - this.inputStream.end(() => { - if (!this.handleContentP) { - const e = new Error('Cache input stream was empty') - e.code = 'ENODATA' - // empty streams are probably emitting end right away. - // defer this one tick by rejecting a promise on it. - return Promise.reject(e).catch(cb) - } - // eslint-disable-next-line promise/catch-or-return - this.handleContentP.then( - (res) => { - res.integrity && this.emit('integrity', res.integrity) - // eslint-disable-next-line promise/always-return - res.size !== null && this.emit('size', res.size) - cb() - }, - (er) => cb(er) - ) - }) - } -} - -function writeStream (cache, opts = {}) { - return new CacacheWriteStream(cache, opts) -} - -async function handleContent (inputStream, cache, opts) { - const tmp = await makeTmp(cache, opts) - try { - const res = await pipeToTmp(inputStream, cache, tmp.target, opts) - await moveToDestination( - tmp, - cache, - res.integrity, - opts - ) - return res - } finally { - if (!tmp.moved) { - await fs.rm(tmp.target, { recursive: true, force: true }) - } - } -} - -async function pipeToTmp (inputStream, cache, tmpTarget, opts) { - const outStream = new fsm.WriteStream(tmpTarget, { - flags: 'wx', - }) - - if (opts.integrityEmitter) { - // we need to create these all simultaneously since they can fire in any order - const [integrity, size] = await Promise.all([ - events.once(opts.integrityEmitter, 'integrity').then(res => res[0]), - events.once(opts.integrityEmitter, 'size').then(res => res[0]), - new Pipeline(inputStream, outStream).promise(), - ]) - return { integrity, size } - } - - let integrity - let size - const hashStream = ssri.integrityStream({ - integrity: opts.integrity, - algorithms: opts.algorithms, - size: opts.size, - }) - hashStream.on('integrity', i => { - integrity = i - }) - hashStream.on('size', s => { - size = s - }) - - const pipeline = new Pipeline(inputStream, hashStream, outStream) - await pipeline.promise() - return { integrity, size } -} - -async function makeTmp (cache, opts) { - const tmpTarget = uniqueFilename(path.join(cache, 'tmp'), opts.tmpPrefix) - await fs.mkdir(path.dirname(tmpTarget), { recursive: true }) - return { - target: tmpTarget, - moved: false, - } -} - -async function moveToDestination (tmp, cache, sri) { - const destination = contentPath(cache, sri) - const destDir = path.dirname(destination) - if (moveOperations.has(destination)) { - return moveOperations.get(destination) - } - moveOperations.set( - destination, - fs.mkdir(destDir, { recursive: true }) - .then(async () => { - await moveFile(tmp.target, destination, { overwrite: false }) - tmp.moved = true - return tmp.moved - }) - .catch(err => { - if (!err.message.startsWith('The destination file exists')) { - throw Object.assign(err, { code: 'EEXIST' }) - } - }).finally(() => { - moveOperations.delete(destination) - }) - - ) - return moveOperations.get(destination) -} - -function sizeError (expected, found) { - /* eslint-disable-next-line max-len */ - const err = new Error(`Bad data size: expected inserted data to be ${expected} bytes, but got ${found} instead`) - err.expected = expected - err.found = found - err.code = 'EBADSIZE' - return err -} - -function checksumError (expected, found) { - const err = new Error(`Integrity check failed: - Wanted: ${expected} - Found: ${found}`) - err.code = 'EINTEGRITY' - err.expected = expected - err.found = found - return err -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/entry-index.js b/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/entry-index.js deleted file mode 100644 index 89c28f2f257d48..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/entry-index.js +++ /dev/null @@ -1,336 +0,0 @@ -'use strict' - -const crypto = require('crypto') -const { - appendFile, - mkdir, - readFile, - readdir, - rm, - writeFile, -} = require('fs/promises') -const { Minipass } = require('minipass') -const path = require('path') -const ssri = require('ssri') -const uniqueFilename = require('unique-filename') - -const contentPath = require('./content/path') -const hashToSegments = require('./util/hash-to-segments') -const indexV = require('../package.json')['cache-version'].index -const { moveFile } = require('@npmcli/fs') - -const pMap = require('p-map') -const lsStreamConcurrency = 5 - -module.exports.NotFoundError = class NotFoundError extends Error { - constructor (cache, key) { - super(`No cache entry for ${key} found in ${cache}`) - this.code = 'ENOENT' - this.cache = cache - this.key = key - } -} - -module.exports.compact = compact - -async function compact (cache, key, matchFn, opts = {}) { - const bucket = bucketPath(cache, key) - const entries = await bucketEntries(bucket) - const newEntries = [] - // we loop backwards because the bottom-most result is the newest - // since we add new entries with appendFile - for (let i = entries.length - 1; i >= 0; --i) { - const entry = entries[i] - // a null integrity could mean either a delete was appended - // or the user has simply stored an index that does not map - // to any content. we determine if the user wants to keep the - // null integrity based on the validateEntry function passed in options. - // if the integrity is null and no validateEntry is provided, we break - // as we consider the null integrity to be a deletion of everything - // that came before it. - if (entry.integrity === null && !opts.validateEntry) { - break - } - - // if this entry is valid, and it is either the first entry or - // the newEntries array doesn't already include an entry that - // matches this one based on the provided matchFn, then we add - // it to the beginning of our list - if ((!opts.validateEntry || opts.validateEntry(entry) === true) && - (newEntries.length === 0 || - !newEntries.find((oldEntry) => matchFn(oldEntry, entry)))) { - newEntries.unshift(entry) - } - } - - const newIndex = '\n' + newEntries.map((entry) => { - const stringified = JSON.stringify(entry) - const hash = hashEntry(stringified) - return `${hash}\t${stringified}` - }).join('\n') - - const setup = async () => { - const target = uniqueFilename(path.join(cache, 'tmp'), opts.tmpPrefix) - await mkdir(path.dirname(target), { recursive: true }) - return { - target, - moved: false, - } - } - - const teardown = async (tmp) => { - if (!tmp.moved) { - return rm(tmp.target, { recursive: true, force: true }) - } - } - - const write = async (tmp) => { - await writeFile(tmp.target, newIndex, { flag: 'wx' }) - await mkdir(path.dirname(bucket), { recursive: true }) - // we use @npmcli/move-file directly here because we - // want to overwrite the existing file - await moveFile(tmp.target, bucket) - tmp.moved = true - } - - // write the file atomically - const tmp = await setup() - try { - await write(tmp) - } finally { - await teardown(tmp) - } - - // we reverse the list we generated such that the newest - // entries come first in order to make looping through them easier - // the true passed to formatEntry tells it to keep null - // integrity values, if they made it this far it's because - // validateEntry returned true, and as such we should return it - return newEntries.reverse().map((entry) => formatEntry(cache, entry, true)) -} - -module.exports.insert = insert - -async function insert (cache, key, integrity, opts = {}) { - const { metadata, size, time } = opts - const bucket = bucketPath(cache, key) - const entry = { - key, - integrity: integrity && ssri.stringify(integrity), - time: time || Date.now(), - size, - metadata, - } - try { - await mkdir(path.dirname(bucket), { recursive: true }) - const stringified = JSON.stringify(entry) - // NOTE - Cleverness ahoy! - // - // This works because it's tremendously unlikely for an entry to corrupt - // another while still preserving the string length of the JSON in - // question. So, we just slap the length in there and verify it on read. - // - // Thanks to @isaacs for the whiteboarding session that ended up with - // this. - await appendFile(bucket, `\n${hashEntry(stringified)}\t${stringified}`) - } catch (err) { - if (err.code === 'ENOENT') { - return undefined - } - - throw err - } - return formatEntry(cache, entry) -} - -module.exports.find = find - -async function find (cache, key) { - const bucket = bucketPath(cache, key) - try { - const entries = await bucketEntries(bucket) - return entries.reduce((latest, next) => { - if (next && next.key === key) { - return formatEntry(cache, next) - } else { - return latest - } - }, null) - } catch (err) { - if (err.code === 'ENOENT') { - return null - } else { - throw err - } - } -} - -module.exports.delete = del - -function del (cache, key, opts = {}) { - if (!opts.removeFully) { - return insert(cache, key, null, opts) - } - - const bucket = bucketPath(cache, key) - return rm(bucket, { recursive: true, force: true }) -} - -module.exports.lsStream = lsStream - -function lsStream (cache) { - const indexDir = bucketDir(cache) - const stream = new Minipass({ objectMode: true }) - - // Set all this up to run on the stream and then just return the stream - Promise.resolve().then(async () => { - const buckets = await readdirOrEmpty(indexDir) - await pMap(buckets, async (bucket) => { - const bucketPath = path.join(indexDir, bucket) - const subbuckets = await readdirOrEmpty(bucketPath) - await pMap(subbuckets, async (subbucket) => { - const subbucketPath = path.join(bucketPath, subbucket) - - // "/cachename//./*" - const subbucketEntries = await readdirOrEmpty(subbucketPath) - await pMap(subbucketEntries, async (entry) => { - const entryPath = path.join(subbucketPath, entry) - try { - const entries = await bucketEntries(entryPath) - // using a Map here prevents duplicate keys from showing up - // twice, I guess? - const reduced = entries.reduce((acc, entry) => { - acc.set(entry.key, entry) - return acc - }, new Map()) - // reduced is a map of key => entry - for (const entry of reduced.values()) { - const formatted = formatEntry(cache, entry) - if (formatted) { - stream.write(formatted) - } - } - } catch (err) { - if (err.code === 'ENOENT') { - return undefined - } - throw err - } - }, - { concurrency: lsStreamConcurrency }) - }, - { concurrency: lsStreamConcurrency }) - }, - { concurrency: lsStreamConcurrency }) - stream.end() - return stream - }).catch(err => stream.emit('error', err)) - - return stream -} - -module.exports.ls = ls - -async function ls (cache) { - const entries = await lsStream(cache).collect() - return entries.reduce((acc, xs) => { - acc[xs.key] = xs - return acc - }, {}) -} - -module.exports.bucketEntries = bucketEntries - -async function bucketEntries (bucket, filter) { - const data = await readFile(bucket, 'utf8') - return _bucketEntries(data, filter) -} - -function _bucketEntries (data) { - const entries = [] - data.split('\n').forEach((entry) => { - if (!entry) { - return - } - - const pieces = entry.split('\t') - if (!pieces[1] || hashEntry(pieces[1]) !== pieces[0]) { - // Hash is no good! Corruption or malice? Doesn't matter! - // EJECT EJECT - return - } - let obj - try { - obj = JSON.parse(pieces[1]) - } catch (_) { - // eslint-ignore-next-line no-empty-block - } - // coverage disabled here, no need to test with an entry that parses to something falsey - // istanbul ignore else - if (obj) { - entries.push(obj) - } - }) - return entries -} - -module.exports.bucketDir = bucketDir - -function bucketDir (cache) { - return path.join(cache, `index-v${indexV}`) -} - -module.exports.bucketPath = bucketPath - -function bucketPath (cache, key) { - const hashed = hashKey(key) - return path.join.apply( - path, - [bucketDir(cache)].concat(hashToSegments(hashed)) - ) -} - -module.exports.hashKey = hashKey - -function hashKey (key) { - return hash(key, 'sha256') -} - -module.exports.hashEntry = hashEntry - -function hashEntry (str) { - return hash(str, 'sha1') -} - -function hash (str, digest) { - return crypto - .createHash(digest) - .update(str) - .digest('hex') -} - -function formatEntry (cache, entry, keepAll) { - // Treat null digests as deletions. They'll shadow any previous entries. - if (!entry.integrity && !keepAll) { - return null - } - - return { - key: entry.key, - integrity: entry.integrity, - path: entry.integrity ? contentPath(cache, entry.integrity) : undefined, - size: entry.size, - time: entry.time, - metadata: entry.metadata, - } -} - -function readdirOrEmpty (dir) { - return readdir(dir).catch((err) => { - if (err.code === 'ENOENT' || err.code === 'ENOTDIR') { - return [] - } - - throw err - }) -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/get.js b/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/get.js deleted file mode 100644 index 80ec206c7ecaaa..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/get.js +++ /dev/null @@ -1,170 +0,0 @@ -'use strict' - -const Collect = require('minipass-collect') -const { Minipass } = require('minipass') -const Pipeline = require('minipass-pipeline') - -const index = require('./entry-index') -const memo = require('./memoization') -const read = require('./content/read') - -async function getData (cache, key, opts = {}) { - const { integrity, memoize, size } = opts - const memoized = memo.get(cache, key, opts) - if (memoized && memoize !== false) { - return { - metadata: memoized.entry.metadata, - data: memoized.data, - integrity: memoized.entry.integrity, - size: memoized.entry.size, - } - } - - const entry = await index.find(cache, key, opts) - if (!entry) { - throw new index.NotFoundError(cache, key) - } - const data = await read(cache, entry.integrity, { integrity, size }) - if (memoize) { - memo.put(cache, entry, data, opts) - } - - return { - data, - metadata: entry.metadata, - size: entry.size, - integrity: entry.integrity, - } -} -module.exports = getData - -async function getDataByDigest (cache, key, opts = {}) { - const { integrity, memoize, size } = opts - const memoized = memo.get.byDigest(cache, key, opts) - if (memoized && memoize !== false) { - return memoized - } - - const res = await read(cache, key, { integrity, size }) - if (memoize) { - memo.put.byDigest(cache, key, res, opts) - } - return res -} -module.exports.byDigest = getDataByDigest - -const getMemoizedStream = (memoized) => { - const stream = new Minipass() - stream.on('newListener', function (ev, cb) { - ev === 'metadata' && cb(memoized.entry.metadata) - ev === 'integrity' && cb(memoized.entry.integrity) - ev === 'size' && cb(memoized.entry.size) - }) - stream.end(memoized.data) - return stream -} - -function getStream (cache, key, opts = {}) { - const { memoize, size } = opts - const memoized = memo.get(cache, key, opts) - if (memoized && memoize !== false) { - return getMemoizedStream(memoized) - } - - const stream = new Pipeline() - // Set all this up to run on the stream and then just return the stream - Promise.resolve().then(async () => { - const entry = await index.find(cache, key) - if (!entry) { - throw new index.NotFoundError(cache, key) - } - - stream.emit('metadata', entry.metadata) - stream.emit('integrity', entry.integrity) - stream.emit('size', entry.size) - stream.on('newListener', function (ev, cb) { - ev === 'metadata' && cb(entry.metadata) - ev === 'integrity' && cb(entry.integrity) - ev === 'size' && cb(entry.size) - }) - - const src = read.readStream( - cache, - entry.integrity, - { ...opts, size: typeof size !== 'number' ? entry.size : size } - ) - - if (memoize) { - const memoStream = new Collect.PassThrough() - memoStream.on('collect', data => memo.put(cache, entry, data, opts)) - stream.unshift(memoStream) - } - stream.unshift(src) - return stream - }).catch((err) => stream.emit('error', err)) - - return stream -} - -module.exports.stream = getStream - -function getStreamDigest (cache, integrity, opts = {}) { - const { memoize } = opts - const memoized = memo.get.byDigest(cache, integrity, opts) - if (memoized && memoize !== false) { - const stream = new Minipass() - stream.end(memoized) - return stream - } else { - const stream = read.readStream(cache, integrity, opts) - if (!memoize) { - return stream - } - - const memoStream = new Collect.PassThrough() - memoStream.on('collect', data => memo.put.byDigest( - cache, - integrity, - data, - opts - )) - return new Pipeline(stream, memoStream) - } -} - -module.exports.stream.byDigest = getStreamDigest - -function info (cache, key, opts = {}) { - const { memoize } = opts - const memoized = memo.get(cache, key, opts) - if (memoized && memoize !== false) { - return Promise.resolve(memoized.entry) - } else { - return index.find(cache, key) - } -} -module.exports.info = info - -async function copy (cache, key, dest, opts = {}) { - const entry = await index.find(cache, key, opts) - if (!entry) { - throw new index.NotFoundError(cache, key) - } - await read.copy(cache, entry.integrity, dest, opts) - return { - metadata: entry.metadata, - size: entry.size, - integrity: entry.integrity, - } -} - -module.exports.copy = copy - -async function copyByDigest (cache, key, dest, opts = {}) { - await read.copy(cache, key, dest, opts) - return key -} - -module.exports.copy.byDigest = copyByDigest - -module.exports.hasContent = read.hasContent diff --git a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/index.js b/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/index.js deleted file mode 100644 index c9b0da5f3a271b..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/index.js +++ /dev/null @@ -1,42 +0,0 @@ -'use strict' - -const get = require('./get.js') -const put = require('./put.js') -const rm = require('./rm.js') -const verify = require('./verify.js') -const { clearMemoized } = require('./memoization.js') -const tmp = require('./util/tmp.js') -const index = require('./entry-index.js') - -module.exports.index = {} -module.exports.index.compact = index.compact -module.exports.index.insert = index.insert - -module.exports.ls = index.ls -module.exports.ls.stream = index.lsStream - -module.exports.get = get -module.exports.get.byDigest = get.byDigest -module.exports.get.stream = get.stream -module.exports.get.stream.byDigest = get.stream.byDigest -module.exports.get.copy = get.copy -module.exports.get.copy.byDigest = get.copy.byDigest -module.exports.get.info = get.info -module.exports.get.hasContent = get.hasContent - -module.exports.put = put -module.exports.put.stream = put.stream - -module.exports.rm = rm.entry -module.exports.rm.all = rm.all -module.exports.rm.entry = module.exports.rm -module.exports.rm.content = rm.content - -module.exports.clearMemoized = clearMemoized - -module.exports.tmp = {} -module.exports.tmp.mkdir = tmp.mkdir -module.exports.tmp.withTmp = tmp.withTmp - -module.exports.verify = verify -module.exports.verify.lastRun = verify.lastRun diff --git a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/memoization.js b/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/memoization.js deleted file mode 100644 index 2ecc60912e4563..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/memoization.js +++ /dev/null @@ -1,72 +0,0 @@ -'use strict' - -const { LRUCache } = require('lru-cache') - -const MEMOIZED = new LRUCache({ - max: 500, - maxSize: 50 * 1024 * 1024, // 50MB - ttl: 3 * 60 * 1000, // 3 minutes - sizeCalculation: (entry, key) => key.startsWith('key:') ? entry.data.length : entry.length, -}) - -module.exports.clearMemoized = clearMemoized - -function clearMemoized () { - const old = {} - MEMOIZED.forEach((v, k) => { - old[k] = v - }) - MEMOIZED.clear() - return old -} - -module.exports.put = put - -function put (cache, entry, data, opts) { - pickMem(opts).set(`key:${cache}:${entry.key}`, { entry, data }) - putDigest(cache, entry.integrity, data, opts) -} - -module.exports.put.byDigest = putDigest - -function putDigest (cache, integrity, data, opts) { - pickMem(opts).set(`digest:${cache}:${integrity}`, data) -} - -module.exports.get = get - -function get (cache, key, opts) { - return pickMem(opts).get(`key:${cache}:${key}`) -} - -module.exports.get.byDigest = getDigest - -function getDigest (cache, integrity, opts) { - return pickMem(opts).get(`digest:${cache}:${integrity}`) -} - -class ObjProxy { - constructor (obj) { - this.obj = obj - } - - get (key) { - return this.obj[key] - } - - set (key, val) { - this.obj[key] = val - } -} - -function pickMem (opts) { - if (!opts || !opts.memoize) { - return MEMOIZED - } else if (opts.memoize.get && opts.memoize.set) { - return opts.memoize - } else if (typeof opts.memoize === 'object') { - return new ObjProxy(opts.memoize) - } else { - return MEMOIZED - } -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/put.js b/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/put.js deleted file mode 100644 index 9fc932d5f6dec5..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/put.js +++ /dev/null @@ -1,80 +0,0 @@ -'use strict' - -const index = require('./entry-index') -const memo = require('./memoization') -const write = require('./content/write') -const Flush = require('minipass-flush') -const { PassThrough } = require('minipass-collect') -const Pipeline = require('minipass-pipeline') - -const putOpts = (opts) => ({ - algorithms: ['sha512'], - ...opts, -}) - -module.exports = putData - -async function putData (cache, key, data, opts = {}) { - const { memoize } = opts - opts = putOpts(opts) - const res = await write(cache, data, opts) - const entry = await index.insert(cache, key, res.integrity, { ...opts, size: res.size }) - if (memoize) { - memo.put(cache, entry, data, opts) - } - - return res.integrity -} - -module.exports.stream = putStream - -function putStream (cache, key, opts = {}) { - const { memoize } = opts - opts = putOpts(opts) - let integrity - let size - let error - - let memoData - const pipeline = new Pipeline() - // first item in the pipeline is the memoizer, because we need - // that to end first and get the collected data. - if (memoize) { - const memoizer = new PassThrough().on('collect', data => { - memoData = data - }) - pipeline.push(memoizer) - } - - // contentStream is a write-only, not a passthrough - // no data comes out of it. - const contentStream = write.stream(cache, opts) - .on('integrity', (int) => { - integrity = int - }) - .on('size', (s) => { - size = s - }) - .on('error', (err) => { - error = err - }) - - pipeline.push(contentStream) - - // last but not least, we write the index and emit hash and size, - // and memoize if we're doing that - pipeline.push(new Flush({ - async flush () { - if (!error) { - const entry = await index.insert(cache, key, integrity, { ...opts, size }) - if (memoize && memoData) { - memo.put(cache, entry, memoData, opts) - } - pipeline.emit('integrity', integrity) - pipeline.emit('size', size) - } - }, - })) - - return pipeline -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/rm.js b/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/rm.js deleted file mode 100644 index a94760c7cf2430..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/rm.js +++ /dev/null @@ -1,31 +0,0 @@ -'use strict' - -const { rm } = require('fs/promises') -const glob = require('./util/glob.js') -const index = require('./entry-index') -const memo = require('./memoization') -const path = require('path') -const rmContent = require('./content/rm') - -module.exports = entry -module.exports.entry = entry - -function entry (cache, key, opts) { - memo.clearMemoized() - return index.delete(cache, key, opts) -} - -module.exports.content = content - -function content (cache, integrity) { - memo.clearMemoized() - return rmContent(cache, integrity) -} - -module.exports.all = all - -async function all (cache) { - memo.clearMemoized() - const paths = await glob(path.join(cache, '*(content-*|index-*)'), { silent: true, nosort: true }) - return Promise.all(paths.map((p) => rm(p, { recursive: true, force: true }))) -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/util/glob.js b/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/util/glob.js deleted file mode 100644 index 8500c1c16a429f..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/util/glob.js +++ /dev/null @@ -1,7 +0,0 @@ -'use strict' - -const { glob } = require('glob') -const path = require('path') - -const globify = (pattern) => pattern.split(path.win32.sep).join(path.posix.sep) -module.exports = (path, options) => glob(globify(path), options) diff --git a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/util/hash-to-segments.js b/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/util/hash-to-segments.js deleted file mode 100644 index 445599b5038088..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/util/hash-to-segments.js +++ /dev/null @@ -1,7 +0,0 @@ -'use strict' - -module.exports = hashToSegments - -function hashToSegments (hash) { - return [hash.slice(0, 2), hash.slice(2, 4), hash.slice(4)] -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/util/tmp.js b/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/util/tmp.js deleted file mode 100644 index 0bf5302136ebeb..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/util/tmp.js +++ /dev/null @@ -1,26 +0,0 @@ -'use strict' - -const { withTempDir } = require('@npmcli/fs') -const fs = require('fs/promises') -const path = require('path') - -module.exports.mkdir = mktmpdir - -async function mktmpdir (cache, opts = {}) { - const { tmpPrefix } = opts - const tmpDir = path.join(cache, 'tmp') - await fs.mkdir(tmpDir, { recursive: true, owner: 'inherit' }) - // do not use path.join(), it drops the trailing / if tmpPrefix is unset - const target = `${tmpDir}${path.sep}${tmpPrefix || ''}` - return fs.mkdtemp(target, { owner: 'inherit' }) -} - -module.exports.withTmp = withTmp - -function withTmp (cache, opts, cb) { - if (!cb) { - cb = opts - opts = {} - } - return withTempDir(path.join(cache, 'tmp'), cb, opts) -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/verify.js b/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/verify.js deleted file mode 100644 index d7423da1295b68..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/cacache/lib/verify.js +++ /dev/null @@ -1,257 +0,0 @@ -'use strict' - -const { - mkdir, - readFile, - rm, - stat, - truncate, - writeFile, -} = require('fs/promises') -const pMap = require('p-map') -const contentPath = require('./content/path') -const fsm = require('fs-minipass') -const glob = require('./util/glob.js') -const index = require('./entry-index') -const path = require('path') -const ssri = require('ssri') - -const hasOwnProperty = (obj, key) => - Object.prototype.hasOwnProperty.call(obj, key) - -const verifyOpts = (opts) => ({ - concurrency: 20, - log: { silly () {} }, - ...opts, -}) - -module.exports = verify - -async function verify (cache, opts) { - opts = verifyOpts(opts) - opts.log.silly('verify', 'verifying cache at', cache) - - const steps = [ - markStartTime, - fixPerms, - garbageCollect, - rebuildIndex, - cleanTmp, - writeVerifile, - markEndTime, - ] - - const stats = {} - for (const step of steps) { - const label = step.name - const start = new Date() - const s = await step(cache, opts) - if (s) { - Object.keys(s).forEach((k) => { - stats[k] = s[k] - }) - } - const end = new Date() - if (!stats.runTime) { - stats.runTime = {} - } - stats.runTime[label] = end - start - } - stats.runTime.total = stats.endTime - stats.startTime - opts.log.silly( - 'verify', - 'verification finished for', - cache, - 'in', - `${stats.runTime.total}ms` - ) - return stats -} - -async function markStartTime () { - return { startTime: new Date() } -} - -async function markEndTime () { - return { endTime: new Date() } -} - -async function fixPerms (cache, opts) { - opts.log.silly('verify', 'fixing cache permissions') - await mkdir(cache, { recursive: true }) - return null -} - -// Implements a naive mark-and-sweep tracing garbage collector. -// -// The algorithm is basically as follows: -// 1. Read (and filter) all index entries ("pointers") -// 2. Mark each integrity value as "live" -// 3. Read entire filesystem tree in `content-vX/` dir -// 4. If content is live, verify its checksum and delete it if it fails -// 5. If content is not marked as live, rm it. -// -async function garbageCollect (cache, opts) { - opts.log.silly('verify', 'garbage collecting content') - const indexStream = index.lsStream(cache) - const liveContent = new Set() - indexStream.on('data', (entry) => { - if (opts.filter && !opts.filter(entry)) { - return - } - - // integrity is stringified, re-parse it so we can get each hash - const integrity = ssri.parse(entry.integrity) - for (const algo in integrity) { - liveContent.add(integrity[algo].toString()) - } - }) - await new Promise((resolve, reject) => { - indexStream.on('end', resolve).on('error', reject) - }) - const contentDir = contentPath.contentDir(cache) - const files = await glob(path.join(contentDir, '**'), { - follow: false, - nodir: true, - nosort: true, - }) - const stats = { - verifiedContent: 0, - reclaimedCount: 0, - reclaimedSize: 0, - badContentCount: 0, - keptSize: 0, - } - await pMap( - files, - async (f) => { - const split = f.split(/[/\\]/) - const digest = split.slice(split.length - 3).join('') - const algo = split[split.length - 4] - const integrity = ssri.fromHex(digest, algo) - if (liveContent.has(integrity.toString())) { - const info = await verifyContent(f, integrity) - if (!info.valid) { - stats.reclaimedCount++ - stats.badContentCount++ - stats.reclaimedSize += info.size - } else { - stats.verifiedContent++ - stats.keptSize += info.size - } - } else { - // No entries refer to this content. We can delete. - stats.reclaimedCount++ - const s = await stat(f) - await rm(f, { recursive: true, force: true }) - stats.reclaimedSize += s.size - } - return stats - }, - { concurrency: opts.concurrency } - ) - return stats -} - -async function verifyContent (filepath, sri) { - const contentInfo = {} - try { - const { size } = await stat(filepath) - contentInfo.size = size - contentInfo.valid = true - await ssri.checkStream(new fsm.ReadStream(filepath), sri) - } catch (err) { - if (err.code === 'ENOENT') { - return { size: 0, valid: false } - } - if (err.code !== 'EINTEGRITY') { - throw err - } - - await rm(filepath, { recursive: true, force: true }) - contentInfo.valid = false - } - return contentInfo -} - -async function rebuildIndex (cache, opts) { - opts.log.silly('verify', 'rebuilding index') - const entries = await index.ls(cache) - const stats = { - missingContent: 0, - rejectedEntries: 0, - totalEntries: 0, - } - const buckets = {} - for (const k in entries) { - /* istanbul ignore else */ - if (hasOwnProperty(entries, k)) { - const hashed = index.hashKey(k) - const entry = entries[k] - const excluded = opts.filter && !opts.filter(entry) - excluded && stats.rejectedEntries++ - if (buckets[hashed] && !excluded) { - buckets[hashed].push(entry) - } else if (buckets[hashed] && excluded) { - // skip - } else if (excluded) { - buckets[hashed] = [] - buckets[hashed]._path = index.bucketPath(cache, k) - } else { - buckets[hashed] = [entry] - buckets[hashed]._path = index.bucketPath(cache, k) - } - } - } - await pMap( - Object.keys(buckets), - (key) => { - return rebuildBucket(cache, buckets[key], stats, opts) - }, - { concurrency: opts.concurrency } - ) - return stats -} - -async function rebuildBucket (cache, bucket, stats) { - await truncate(bucket._path) - // This needs to be serialized because cacache explicitly - // lets very racy bucket conflicts clobber each other. - for (const entry of bucket) { - const content = contentPath(cache, entry.integrity) - try { - await stat(content) - await index.insert(cache, entry.key, entry.integrity, { - metadata: entry.metadata, - size: entry.size, - time: entry.time, - }) - stats.totalEntries++ - } catch (err) { - if (err.code === 'ENOENT') { - stats.rejectedEntries++ - stats.missingContent++ - } else { - throw err - } - } - } -} - -function cleanTmp (cache, opts) { - opts.log.silly('verify', 'cleaning tmp directory') - return rm(path.join(cache, 'tmp'), { recursive: true, force: true }) -} - -async function writeVerifile (cache, opts) { - const verifile = path.join(cache, '_lastverified') - opts.log.silly('verify', 'writing verifile to ' + verifile) - return writeFile(verifile, `${Date.now()}`) -} - -module.exports.lastRun = lastRun - -async function lastRun (cache) { - const data = await readFile(path.join(cache, '_lastverified'), { encoding: 'utf8' }) - return new Date(+data) -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/cacache/package.json b/deps/npm/node_modules/node-gyp/node_modules/cacache/package.json deleted file mode 100644 index 6e6219158ed759..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/cacache/package.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "name": "cacache", - "version": "18.0.4", - "cache-version": { - "content": "2", - "index": "5" - }, - "description": "Fast, fault-tolerant, cross-platform, disk-based, data-agnostic, content-addressable cache.", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "scripts": { - "test": "tap", - "snap": "tap", - "coverage": "tap", - "test-docker": "docker run -it --rm --name pacotest -v \"$PWD\":/tmp -w /tmp node:latest npm test", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "npmclilint": "npmcli-lint", - "lintfix": "npm run lint -- --fix", - "postsnap": "npm run lintfix --", - "postlint": "template-oss-check", - "posttest": "npm run lint", - "template-oss-apply": "template-oss-apply --force" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/cacache.git" - }, - "keywords": [ - "cache", - "caching", - "content-addressable", - "sri", - "sri hash", - "subresource integrity", - "cache", - "storage", - "store", - "file store", - "filesystem", - "disk cache", - "disk storage" - ], - "license": "ISC", - "dependencies": { - "@npmcli/fs": "^3.1.0", - "fs-minipass": "^3.0.0", - "glob": "^10.2.2", - "lru-cache": "^10.0.1", - "minipass": "^7.0.3", - "minipass-collect": "^2.0.1", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "p-map": "^4.0.0", - "ssri": "^10.0.0", - "tar": "^6.1.11", - "unique-filename": "^3.0.0" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "tap": "^16.0.0" - }, - "engines": { - "node": "^16.14.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "windowsCI": false, - "version": "4.22.0", - "publish": "true" - }, - "author": "GitHub Inc.", - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/chownr/LICENSE.md b/deps/npm/node_modules/node-gyp/node_modules/chownr/LICENSE.md new file mode 100644 index 00000000000000..881248b6d7f0ca --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/chownr/LICENSE.md @@ -0,0 +1,63 @@ +All packages under `src/` are licensed according to the terms in +their respective `LICENSE` or `LICENSE.md` files. + +The remainder of this project is licensed under the Blue Oak +Model License, as follows: + +----- + +# Blue Oak Model License + +Version 1.0.0 + +## Purpose + +This license gives everyone as much permission to work with +this software as possible, while protecting contributors +from liability. + +## Acceptance + +In order to receive this license, you must agree to its +rules. The rules of this license are both obligations +under that agreement and conditions to your license. +You must not do anything with this software that triggers +a rule that you cannot or will not follow. + +## Copyright + +Each contributor licenses you to do everything with this +software that would otherwise infringe that contributor's +copyright in it. + +## Notices + +You must ensure that everyone who gets a copy of +any part of this software from you, with or without +changes, also gets the text of this license or a link to +. + +## Excuse + +If anyone notifies you in writing that you have not +complied with [Notices](#notices), you can keep your +license by taking all practical steps to comply within 30 +days after the notice. If you do not do so, your license +ends immediately. + +## Patent + +Each contributor licenses you to do everything with this +software that would otherwise infringe any patent claims +they can license or become able to license. + +## Reliability + +No contributor can revoke this license. + +## No Liability + +***As far as the law allows, this software comes as is, +without any warranty or condition, and no contributor +will be liable to anyone for any damages related to this +software or this license, under any kind of legal claim.*** diff --git a/deps/npm/node_modules/node-gyp/node_modules/chownr/dist/commonjs/index.js b/deps/npm/node_modules/node-gyp/node_modules/chownr/dist/commonjs/index.js new file mode 100644 index 00000000000000..6a7b68d5eac26e --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/chownr/dist/commonjs/index.js @@ -0,0 +1,93 @@ +"use strict"; +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.chownrSync = exports.chownr = void 0; +const node_fs_1 = __importDefault(require("node:fs")); +const node_path_1 = __importDefault(require("node:path")); +const lchownSync = (path, uid, gid) => { + try { + return node_fs_1.default.lchownSync(path, uid, gid); + } + catch (er) { + if (er?.code !== 'ENOENT') + throw er; + } +}; +const chown = (cpath, uid, gid, cb) => { + node_fs_1.default.lchown(cpath, uid, gid, er => { + // Skip ENOENT error + cb(er && er?.code !== 'ENOENT' ? er : null); + }); +}; +const chownrKid = (p, child, uid, gid, cb) => { + if (child.isDirectory()) { + (0, exports.chownr)(node_path_1.default.resolve(p, child.name), uid, gid, (er) => { + if (er) + return cb(er); + const cpath = node_path_1.default.resolve(p, child.name); + chown(cpath, uid, gid, cb); + }); + } + else { + const cpath = node_path_1.default.resolve(p, child.name); + chown(cpath, uid, gid, cb); + } +}; +const chownr = (p, uid, gid, cb) => { + node_fs_1.default.readdir(p, { withFileTypes: true }, (er, children) => { + // any error other than ENOTDIR or ENOTSUP means it's not readable, + // or doesn't exist. give up. + if (er) { + if (er.code === 'ENOENT') + return cb(); + else if (er.code !== 'ENOTDIR' && er.code !== 'ENOTSUP') + return cb(er); + } + if (er || !children.length) + return chown(p, uid, gid, cb); + let len = children.length; + let errState = null; + const then = (er) => { + /* c8 ignore start */ + if (errState) + return; + /* c8 ignore stop */ + if (er) + return cb((errState = er)); + if (--len === 0) + return chown(p, uid, gid, cb); + }; + for (const child of children) { + chownrKid(p, child, uid, gid, then); + } + }); +}; +exports.chownr = chownr; +const chownrKidSync = (p, child, uid, gid) => { + if (child.isDirectory()) + (0, exports.chownrSync)(node_path_1.default.resolve(p, child.name), uid, gid); + lchownSync(node_path_1.default.resolve(p, child.name), uid, gid); +}; +const chownrSync = (p, uid, gid) => { + let children; + try { + children = node_fs_1.default.readdirSync(p, { withFileTypes: true }); + } + catch (er) { + const e = er; + if (e?.code === 'ENOENT') + return; + else if (e?.code === 'ENOTDIR' || e?.code === 'ENOTSUP') + return lchownSync(p, uid, gid); + else + throw e; + } + for (const child of children) { + chownrKidSync(p, child, uid, gid); + } + return lchownSync(p, uid, gid); +}; +exports.chownrSync = chownrSync; +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/cjs/package.json b/deps/npm/node_modules/node-gyp/node_modules/chownr/dist/commonjs/package.json similarity index 100% rename from deps/npm/node_modules/node-gyp/node_modules/isexe/dist/cjs/package.json rename to deps/npm/node_modules/node-gyp/node_modules/chownr/dist/commonjs/package.json diff --git a/deps/npm/node_modules/node-gyp/node_modules/chownr/dist/esm/index.js b/deps/npm/node_modules/node-gyp/node_modules/chownr/dist/esm/index.js new file mode 100644 index 00000000000000..5c2815297a67cb --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/chownr/dist/esm/index.js @@ -0,0 +1,85 @@ +import fs from 'node:fs'; +import path from 'node:path'; +const lchownSync = (path, uid, gid) => { + try { + return fs.lchownSync(path, uid, gid); + } + catch (er) { + if (er?.code !== 'ENOENT') + throw er; + } +}; +const chown = (cpath, uid, gid, cb) => { + fs.lchown(cpath, uid, gid, er => { + // Skip ENOENT error + cb(er && er?.code !== 'ENOENT' ? er : null); + }); +}; +const chownrKid = (p, child, uid, gid, cb) => { + if (child.isDirectory()) { + chownr(path.resolve(p, child.name), uid, gid, (er) => { + if (er) + return cb(er); + const cpath = path.resolve(p, child.name); + chown(cpath, uid, gid, cb); + }); + } + else { + const cpath = path.resolve(p, child.name); + chown(cpath, uid, gid, cb); + } +}; +export const chownr = (p, uid, gid, cb) => { + fs.readdir(p, { withFileTypes: true }, (er, children) => { + // any error other than ENOTDIR or ENOTSUP means it's not readable, + // or doesn't exist. give up. + if (er) { + if (er.code === 'ENOENT') + return cb(); + else if (er.code !== 'ENOTDIR' && er.code !== 'ENOTSUP') + return cb(er); + } + if (er || !children.length) + return chown(p, uid, gid, cb); + let len = children.length; + let errState = null; + const then = (er) => { + /* c8 ignore start */ + if (errState) + return; + /* c8 ignore stop */ + if (er) + return cb((errState = er)); + if (--len === 0) + return chown(p, uid, gid, cb); + }; + for (const child of children) { + chownrKid(p, child, uid, gid, then); + } + }); +}; +const chownrKidSync = (p, child, uid, gid) => { + if (child.isDirectory()) + chownrSync(path.resolve(p, child.name), uid, gid); + lchownSync(path.resolve(p, child.name), uid, gid); +}; +export const chownrSync = (p, uid, gid) => { + let children; + try { + children = fs.readdirSync(p, { withFileTypes: true }); + } + catch (er) { + const e = er; + if (e?.code === 'ENOENT') + return; + else if (e?.code === 'ENOTDIR' || e?.code === 'ENOTSUP') + return lchownSync(p, uid, gid); + else + throw e; + } + for (const child of children) { + chownrKidSync(p, child, uid, gid); + } + return lchownSync(p, uid, gid); +}; +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/mjs/package.json b/deps/npm/node_modules/node-gyp/node_modules/chownr/dist/esm/package.json similarity index 100% rename from deps/npm/node_modules/node-gyp/node_modules/isexe/dist/mjs/package.json rename to deps/npm/node_modules/node-gyp/node_modules/chownr/dist/esm/package.json diff --git a/deps/npm/node_modules/node-gyp/node_modules/chownr/package.json b/deps/npm/node_modules/node-gyp/node_modules/chownr/package.json new file mode 100644 index 00000000000000..09aa6b2e2e576d --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/chownr/package.json @@ -0,0 +1,69 @@ +{ + "author": "Isaac Z. Schlueter (http://blog.izs.me/)", + "name": "chownr", + "description": "like `chown -R`", + "version": "3.0.0", + "repository": { + "type": "git", + "url": "git://github.com/isaacs/chownr.git" + }, + "files": [ + "dist" + ], + "devDependencies": { + "@types/node": "^20.12.5", + "mkdirp": "^3.0.1", + "prettier": "^3.2.5", + "rimraf": "^5.0.5", + "tap": "^18.7.2", + "tshy": "^1.13.1", + "typedoc": "^0.25.12" + }, + "scripts": { + "prepare": "tshy", + "pretest": "npm run prepare", + "test": "tap", + "preversion": "npm test", + "postversion": "npm publish", + "prepublishOnly": "git push origin --follow-tags", + "format": "prettier --write . --loglevel warn", + "typedoc": "typedoc --tsconfig .tshy/esm.json ./src/*.ts" + }, + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=18" + }, + "tshy": { + "exports": { + "./package.json": "./package.json", + ".": "./src/index.ts" + } + }, + "exports": { + "./package.json": "./package.json", + ".": { + "import": { + "types": "./dist/esm/index.d.ts", + "default": "./dist/esm/index.js" + }, + "require": { + "types": "./dist/commonjs/index.d.ts", + "default": "./dist/commonjs/index.js" + } + } + }, + "main": "./dist/commonjs/index.js", + "types": "./dist/commonjs/index.d.ts", + "type": "module", + "prettier": { + "semi": false, + "printWidth": 75, + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "jsxSingleQuote": false, + "bracketSameLine": true, + "arrowParens": "avoid", + "endOfLine": "lf" + } +} diff --git a/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/cjs/index.js b/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/cjs/index.js deleted file mode 100644 index cefcb66b5c5434..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/cjs/index.js +++ /dev/null @@ -1,46 +0,0 @@ -"use strict"; -var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { - if (k2 === undefined) k2 = k; - var desc = Object.getOwnPropertyDescriptor(m, k); - if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { - desc = { enumerable: true, get: function() { return m[k]; } }; - } - Object.defineProperty(o, k2, desc); -}) : (function(o, m, k, k2) { - if (k2 === undefined) k2 = k; - o[k2] = m[k]; -})); -var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { - Object.defineProperty(o, "default", { enumerable: true, value: v }); -}) : function(o, v) { - o["default"] = v; -}); -var __importStar = (this && this.__importStar) || function (mod) { - if (mod && mod.__esModule) return mod; - var result = {}; - if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); - __setModuleDefault(result, mod); - return result; -}; -var __exportStar = (this && this.__exportStar) || function(m, exports) { - for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p); -}; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.sync = exports.isexe = exports.posix = exports.win32 = void 0; -const posix = __importStar(require("./posix.js")); -exports.posix = posix; -const win32 = __importStar(require("./win32.js")); -exports.win32 = win32; -__exportStar(require("./options.js"), exports); -const platform = process.env._ISEXE_TEST_PLATFORM_ || process.platform; -const impl = platform === 'win32' ? win32 : posix; -/** - * Determine whether a path is executable on the current platform. - */ -exports.isexe = impl.isexe; -/** - * Synchronously determine whether a path is executable on the - * current platform. - */ -exports.sync = impl.sync; -//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/cjs/options.js b/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/cjs/options.js deleted file mode 100644 index 0dfad0762cc32c..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/cjs/options.js +++ /dev/null @@ -1,3 +0,0 @@ -"use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); -//# sourceMappingURL=options.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/cjs/posix.js b/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/cjs/posix.js deleted file mode 100644 index 3bc5e79d7007e9..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/cjs/posix.js +++ /dev/null @@ -1,67 +0,0 @@ -"use strict"; -/** - * This is the Posix implementation of isexe, which uses the file - * mode and uid/gid values. - * - * @module - */ -Object.defineProperty(exports, "__esModule", { value: true }); -exports.sync = exports.isexe = void 0; -const fs_1 = require("fs"); -const promises_1 = require("fs/promises"); -/** - * Determine whether a path is executable according to the mode and - * current (or specified) user and group IDs. - */ -const isexe = async (path, options = {}) => { - const { ignoreErrors = false } = options; - try { - return checkStat(await (0, promises_1.stat)(path), options); - } - catch (e) { - const er = e; - if (ignoreErrors || er.code === 'EACCES') - return false; - throw er; - } -}; -exports.isexe = isexe; -/** - * Synchronously determine whether a path is executable according to - * the mode and current (or specified) user and group IDs. - */ -const sync = (path, options = {}) => { - const { ignoreErrors = false } = options; - try { - return checkStat((0, fs_1.statSync)(path), options); - } - catch (e) { - const er = e; - if (ignoreErrors || er.code === 'EACCES') - return false; - throw er; - } -}; -exports.sync = sync; -const checkStat = (stat, options) => stat.isFile() && checkMode(stat, options); -const checkMode = (stat, options) => { - const myUid = options.uid ?? process.getuid?.(); - const myGroups = options.groups ?? process.getgroups?.() ?? []; - const myGid = options.gid ?? process.getgid?.() ?? myGroups[0]; - if (myUid === undefined || myGid === undefined) { - throw new Error('cannot get uid or gid'); - } - const groups = new Set([myGid, ...myGroups]); - const mod = stat.mode; - const uid = stat.uid; - const gid = stat.gid; - const u = parseInt('100', 8); - const g = parseInt('010', 8); - const o = parseInt('001', 8); - const ug = u | g; - return !!(mod & o || - (mod & g && groups.has(gid)) || - (mod & u && uid === myUid) || - (mod & ug && myUid === 0)); -}; -//# sourceMappingURL=posix.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/cjs/win32.js b/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/cjs/win32.js deleted file mode 100644 index fa7a4d2f7d240d..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/cjs/win32.js +++ /dev/null @@ -1,62 +0,0 @@ -"use strict"; -/** - * This is the Windows implementation of isexe, which uses the file - * extension and PATHEXT setting. - * - * @module - */ -Object.defineProperty(exports, "__esModule", { value: true }); -exports.sync = exports.isexe = void 0; -const fs_1 = require("fs"); -const promises_1 = require("fs/promises"); -/** - * Determine whether a path is executable based on the file extension - * and PATHEXT environment variable (or specified pathExt option) - */ -const isexe = async (path, options = {}) => { - const { ignoreErrors = false } = options; - try { - return checkStat(await (0, promises_1.stat)(path), path, options); - } - catch (e) { - const er = e; - if (ignoreErrors || er.code === 'EACCES') - return false; - throw er; - } -}; -exports.isexe = isexe; -/** - * Synchronously determine whether a path is executable based on the file - * extension and PATHEXT environment variable (or specified pathExt option) - */ -const sync = (path, options = {}) => { - const { ignoreErrors = false } = options; - try { - return checkStat((0, fs_1.statSync)(path), path, options); - } - catch (e) { - const er = e; - if (ignoreErrors || er.code === 'EACCES') - return false; - throw er; - } -}; -exports.sync = sync; -const checkPathExt = (path, options) => { - const { pathExt = process.env.PATHEXT || '' } = options; - const peSplit = pathExt.split(';'); - if (peSplit.indexOf('') !== -1) { - return true; - } - for (let i = 0; i < peSplit.length; i++) { - const p = peSplit[i].toLowerCase(); - const ext = path.substring(path.length - p.length).toLowerCase(); - if (p && ext === p) { - return true; - } - } - return false; -}; -const checkStat = (stat, path, options) => stat.isFile() && checkPathExt(path, options); -//# sourceMappingURL=win32.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/mjs/index.js b/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/mjs/index.js deleted file mode 100644 index 1e309acd7355ec..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/mjs/index.js +++ /dev/null @@ -1,16 +0,0 @@ -import * as posix from './posix.js'; -import * as win32 from './win32.js'; -export * from './options.js'; -export { win32, posix }; -const platform = process.env._ISEXE_TEST_PLATFORM_ || process.platform; -const impl = platform === 'win32' ? win32 : posix; -/** - * Determine whether a path is executable on the current platform. - */ -export const isexe = impl.isexe; -/** - * Synchronously determine whether a path is executable on the - * current platform. - */ -export const sync = impl.sync; -//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/mjs/options.js b/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/mjs/options.js deleted file mode 100644 index e9ded40bd5b2cd..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/mjs/options.js +++ /dev/null @@ -1,2 +0,0 @@ -export {}; -//# sourceMappingURL=options.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/mjs/posix.js b/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/mjs/posix.js deleted file mode 100644 index c453776c0452f7..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/mjs/posix.js +++ /dev/null @@ -1,62 +0,0 @@ -/** - * This is the Posix implementation of isexe, which uses the file - * mode and uid/gid values. - * - * @module - */ -import { statSync } from 'fs'; -import { stat } from 'fs/promises'; -/** - * Determine whether a path is executable according to the mode and - * current (or specified) user and group IDs. - */ -export const isexe = async (path, options = {}) => { - const { ignoreErrors = false } = options; - try { - return checkStat(await stat(path), options); - } - catch (e) { - const er = e; - if (ignoreErrors || er.code === 'EACCES') - return false; - throw er; - } -}; -/** - * Synchronously determine whether a path is executable according to - * the mode and current (or specified) user and group IDs. - */ -export const sync = (path, options = {}) => { - const { ignoreErrors = false } = options; - try { - return checkStat(statSync(path), options); - } - catch (e) { - const er = e; - if (ignoreErrors || er.code === 'EACCES') - return false; - throw er; - } -}; -const checkStat = (stat, options) => stat.isFile() && checkMode(stat, options); -const checkMode = (stat, options) => { - const myUid = options.uid ?? process.getuid?.(); - const myGroups = options.groups ?? process.getgroups?.() ?? []; - const myGid = options.gid ?? process.getgid?.() ?? myGroups[0]; - if (myUid === undefined || myGid === undefined) { - throw new Error('cannot get uid or gid'); - } - const groups = new Set([myGid, ...myGroups]); - const mod = stat.mode; - const uid = stat.uid; - const gid = stat.gid; - const u = parseInt('100', 8); - const g = parseInt('010', 8); - const o = parseInt('001', 8); - const ug = u | g; - return !!(mod & o || - (mod & g && groups.has(gid)) || - (mod & u && uid === myUid) || - (mod & ug && myUid === 0)); -}; -//# sourceMappingURL=posix.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/mjs/win32.js b/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/mjs/win32.js deleted file mode 100644 index a354ee2a5115c7..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/isexe/dist/mjs/win32.js +++ /dev/null @@ -1,57 +0,0 @@ -/** - * This is the Windows implementation of isexe, which uses the file - * extension and PATHEXT setting. - * - * @module - */ -import { statSync } from 'fs'; -import { stat } from 'fs/promises'; -/** - * Determine whether a path is executable based on the file extension - * and PATHEXT environment variable (or specified pathExt option) - */ -export const isexe = async (path, options = {}) => { - const { ignoreErrors = false } = options; - try { - return checkStat(await stat(path), path, options); - } - catch (e) { - const er = e; - if (ignoreErrors || er.code === 'EACCES') - return false; - throw er; - } -}; -/** - * Synchronously determine whether a path is executable based on the file - * extension and PATHEXT environment variable (or specified pathExt option) - */ -export const sync = (path, options = {}) => { - const { ignoreErrors = false } = options; - try { - return checkStat(statSync(path), path, options); - } - catch (e) { - const er = e; - if (ignoreErrors || er.code === 'EACCES') - return false; - throw er; - } -}; -const checkPathExt = (path, options) => { - const { pathExt = process.env.PATHEXT || '' } = options; - const peSplit = pathExt.split(';'); - if (peSplit.indexOf('') !== -1) { - return true; - } - for (let i = 0; i < peSplit.length; i++) { - const p = peSplit[i].toLowerCase(); - const ext = path.substring(path.length - p.length).toLowerCase(); - if (p && ext === p) { - return true; - } - } - return false; -}; -const checkStat = (stat, path, options) => stat.isFile() && checkPathExt(path, options); -//# sourceMappingURL=win32.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/isexe/package.json b/deps/npm/node_modules/node-gyp/node_modules/isexe/package.json deleted file mode 100644 index a0e2cd04bfdbfe..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/isexe/package.json +++ /dev/null @@ -1,96 +0,0 @@ -{ - "name": "isexe", - "version": "3.1.1", - "description": "Minimal module to check if a file is executable.", - "main": "./dist/cjs/index.js", - "module": "./dist/mjs/index.js", - "types": "./dist/cjs/index.js", - "files": [ - "dist" - ], - "exports": { - ".": { - "import": { - "types": "./dist/mjs/index.d.ts", - "default": "./dist/mjs/index.js" - }, - "require": { - "types": "./dist/cjs/index.d.ts", - "default": "./dist/cjs/index.js" - } - }, - "./posix": { - "import": { - "types": "./dist/mjs/posix.d.ts", - "default": "./dist/mjs/posix.js" - }, - "require": { - "types": "./dist/cjs/posix.d.ts", - "default": "./dist/cjs/posix.js" - } - }, - "./win32": { - "import": { - "types": "./dist/mjs/win32.d.ts", - "default": "./dist/mjs/win32.js" - }, - "require": { - "types": "./dist/cjs/win32.d.ts", - "default": "./dist/cjs/win32.js" - } - }, - "./package.json": "./package.json" - }, - "devDependencies": { - "@types/node": "^20.4.5", - "@types/tap": "^15.0.8", - "c8": "^8.0.1", - "mkdirp": "^0.5.1", - "prettier": "^2.8.8", - "rimraf": "^2.5.0", - "sync-content": "^1.0.2", - "tap": "^16.3.8", - "ts-node": "^10.9.1", - "typedoc": "^0.24.8", - "typescript": "^5.1.6" - }, - "scripts": { - "preversion": "npm test", - "postversion": "npm publish", - "prepublishOnly": "git push origin --follow-tags", - "prepare": "tsc -p tsconfig/cjs.json && tsc -p tsconfig/esm.json && bash ./scripts/fixup.sh", - "pretest": "npm run prepare", - "presnap": "npm run prepare", - "test": "c8 tap", - "snap": "c8 tap", - "format": "prettier --write . --loglevel warn --ignore-path ../../.prettierignore --cache", - "typedoc": "typedoc --tsconfig tsconfig/esm.json ./src/*.ts" - }, - "author": "Isaac Z. Schlueter (http://blog.izs.me/)", - "license": "ISC", - "tap": { - "coverage": false, - "node-arg": [ - "--enable-source-maps", - "--no-warnings", - "--loader", - "ts-node/esm" - ], - "ts": false - }, - "prettier": { - "semi": false, - "printWidth": 75, - "tabWidth": 2, - "useTabs": false, - "singleQuote": true, - "jsxSingleQuote": false, - "bracketSameLine": true, - "arrowParens": "avoid", - "endOfLine": "lf" - }, - "repository": "https://github.com/isaacs/isexe", - "engines": { - "node": ">=16" - } -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/LICENSE b/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/LICENSE deleted file mode 100644 index 1808eb2844231c..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/LICENSE +++ /dev/null @@ -1,16 +0,0 @@ -ISC License - -Copyright 2017-2022 (c) npm, Inc. - -Permission to use, copy, modify, and/or distribute this software for -any purpose with or without fee is hereby granted, provided that the -above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE COPYRIGHT HOLDER DISCLAIMS -ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR -CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/cache/entry.js b/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/cache/entry.js deleted file mode 100644 index bfcfacbcc95e18..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/cache/entry.js +++ /dev/null @@ -1,471 +0,0 @@ -const { Request, Response } = require('minipass-fetch') -const { Minipass } = require('minipass') -const MinipassFlush = require('minipass-flush') -const cacache = require('cacache') -const url = require('url') - -const CachingMinipassPipeline = require('../pipeline.js') -const CachePolicy = require('./policy.js') -const cacheKey = require('./key.js') -const remote = require('../remote.js') - -const hasOwnProperty = (obj, prop) => Object.prototype.hasOwnProperty.call(obj, prop) - -// allow list for request headers that will be written to the cache index -// note: we will also store any request headers -// that are named in a response's vary header -const KEEP_REQUEST_HEADERS = [ - 'accept-charset', - 'accept-encoding', - 'accept-language', - 'accept', - 'cache-control', -] - -// allow list for response headers that will be written to the cache index -// note: we must not store the real response's age header, or when we load -// a cache policy based on the metadata it will think the cached response -// is always stale -const KEEP_RESPONSE_HEADERS = [ - 'cache-control', - 'content-encoding', - 'content-language', - 'content-type', - 'date', - 'etag', - 'expires', - 'last-modified', - 'link', - 'location', - 'pragma', - 'vary', -] - -// return an object containing all metadata to be written to the index -const getMetadata = (request, response, options) => { - const metadata = { - time: Date.now(), - url: request.url, - reqHeaders: {}, - resHeaders: {}, - - // options on which we must match the request and vary the response - options: { - compress: options.compress != null ? options.compress : request.compress, - }, - } - - // only save the status if it's not a 200 or 304 - if (response.status !== 200 && response.status !== 304) { - metadata.status = response.status - } - - for (const name of KEEP_REQUEST_HEADERS) { - if (request.headers.has(name)) { - metadata.reqHeaders[name] = request.headers.get(name) - } - } - - // if the request's host header differs from the host in the url - // we need to keep it, otherwise it's just noise and we ignore it - const host = request.headers.get('host') - const parsedUrl = new url.URL(request.url) - if (host && parsedUrl.host !== host) { - metadata.reqHeaders.host = host - } - - // if the response has a vary header, make sure - // we store the relevant request headers too - if (response.headers.has('vary')) { - const vary = response.headers.get('vary') - // a vary of "*" means every header causes a different response. - // in that scenario, we do not include any additional headers - // as the freshness check will always fail anyway and we don't - // want to bloat the cache indexes - if (vary !== '*') { - // copy any other request headers that will vary the response - const varyHeaders = vary.trim().toLowerCase().split(/\s*,\s*/) - for (const name of varyHeaders) { - if (request.headers.has(name)) { - metadata.reqHeaders[name] = request.headers.get(name) - } - } - } - } - - for (const name of KEEP_RESPONSE_HEADERS) { - if (response.headers.has(name)) { - metadata.resHeaders[name] = response.headers.get(name) - } - } - - for (const name of options.cacheAdditionalHeaders) { - if (response.headers.has(name)) { - metadata.resHeaders[name] = response.headers.get(name) - } - } - - return metadata -} - -// symbols used to hide objects that may be lazily evaluated in a getter -const _request = Symbol('request') -const _response = Symbol('response') -const _policy = Symbol('policy') - -class CacheEntry { - constructor ({ entry, request, response, options }) { - if (entry) { - this.key = entry.key - this.entry = entry - // previous versions of this module didn't write an explicit timestamp in - // the metadata, so fall back to the entry's timestamp. we can't use the - // entry timestamp to determine staleness because cacache will update it - // when it verifies its data - this.entry.metadata.time = this.entry.metadata.time || this.entry.time - } else { - this.key = cacheKey(request) - } - - this.options = options - - // these properties are behind getters that lazily evaluate - this[_request] = request - this[_response] = response - this[_policy] = null - } - - // returns a CacheEntry instance that satisfies the given request - // or undefined if no existing entry satisfies - static async find (request, options) { - try { - // compacts the index and returns an array of unique entries - var matches = await cacache.index.compact(options.cachePath, cacheKey(request), (A, B) => { - const entryA = new CacheEntry({ entry: A, options }) - const entryB = new CacheEntry({ entry: B, options }) - return entryA.policy.satisfies(entryB.request) - }, { - validateEntry: (entry) => { - // clean out entries with a buggy content-encoding value - if (entry.metadata && - entry.metadata.resHeaders && - entry.metadata.resHeaders['content-encoding'] === null) { - return false - } - - // if an integrity is null, it needs to have a status specified - if (entry.integrity === null) { - return !!(entry.metadata && entry.metadata.status) - } - - return true - }, - }) - } catch (err) { - // if the compact request fails, ignore the error and return - return - } - - // a cache mode of 'reload' means to behave as though we have no cache - // on the way to the network. return undefined to allow cacheFetch to - // create a brand new request no matter what. - if (options.cache === 'reload') { - return - } - - // find the specific entry that satisfies the request - let match - for (const entry of matches) { - const _entry = new CacheEntry({ - entry, - options, - }) - - if (_entry.policy.satisfies(request)) { - match = _entry - break - } - } - - return match - } - - // if the user made a PUT/POST/PATCH then we invalidate our - // cache for the same url by deleting the index entirely - static async invalidate (request, options) { - const key = cacheKey(request) - try { - await cacache.rm.entry(options.cachePath, key, { removeFully: true }) - } catch (err) { - // ignore errors - } - } - - get request () { - if (!this[_request]) { - this[_request] = new Request(this.entry.metadata.url, { - method: 'GET', - headers: this.entry.metadata.reqHeaders, - ...this.entry.metadata.options, - }) - } - - return this[_request] - } - - get response () { - if (!this[_response]) { - this[_response] = new Response(null, { - url: this.entry.metadata.url, - counter: this.options.counter, - status: this.entry.metadata.status || 200, - headers: { - ...this.entry.metadata.resHeaders, - 'content-length': this.entry.size, - }, - }) - } - - return this[_response] - } - - get policy () { - if (!this[_policy]) { - this[_policy] = new CachePolicy({ - entry: this.entry, - request: this.request, - response: this.response, - options: this.options, - }) - } - - return this[_policy] - } - - // wraps the response in a pipeline that stores the data - // in the cache while the user consumes it - async store (status) { - // if we got a status other than 200, 301, or 308, - // or the CachePolicy forbid storage, append the - // cache status header and return it untouched - if ( - this.request.method !== 'GET' || - ![200, 301, 308].includes(this.response.status) || - !this.policy.storable() - ) { - this.response.headers.set('x-local-cache-status', 'skip') - return this.response - } - - const size = this.response.headers.get('content-length') - const cacheOpts = { - algorithms: this.options.algorithms, - metadata: getMetadata(this.request, this.response, this.options), - size, - integrity: this.options.integrity, - integrityEmitter: this.response.body.hasIntegrityEmitter && this.response.body, - } - - let body = null - // we only set a body if the status is a 200, redirects are - // stored as metadata only - if (this.response.status === 200) { - let cacheWriteResolve, cacheWriteReject - const cacheWritePromise = new Promise((resolve, reject) => { - cacheWriteResolve = resolve - cacheWriteReject = reject - }).catch((err) => { - body.emit('error', err) - }) - - body = new CachingMinipassPipeline({ events: ['integrity', 'size'] }, new MinipassFlush({ - flush () { - return cacheWritePromise - }, - })) - // this is always true since if we aren't reusing the one from the remote fetch, we - // are using the one from cacache - body.hasIntegrityEmitter = true - - const onResume = () => { - const tee = new Minipass() - const cacheStream = cacache.put.stream(this.options.cachePath, this.key, cacheOpts) - // re-emit the integrity and size events on our new response body so they can be reused - cacheStream.on('integrity', i => body.emit('integrity', i)) - cacheStream.on('size', s => body.emit('size', s)) - // stick a flag on here so downstream users will know if they can expect integrity events - tee.pipe(cacheStream) - // TODO if the cache write fails, log a warning but return the response anyway - // eslint-disable-next-line promise/catch-or-return - cacheStream.promise().then(cacheWriteResolve, cacheWriteReject) - body.unshift(tee) - body.unshift(this.response.body) - } - - body.once('resume', onResume) - body.once('end', () => body.removeListener('resume', onResume)) - } else { - await cacache.index.insert(this.options.cachePath, this.key, null, cacheOpts) - } - - // note: we do not set the x-local-cache-hash header because we do not know - // the hash value until after the write to the cache completes, which doesn't - // happen until after the response has been sent and it's too late to write - // the header anyway - this.response.headers.set('x-local-cache', encodeURIComponent(this.options.cachePath)) - this.response.headers.set('x-local-cache-key', encodeURIComponent(this.key)) - this.response.headers.set('x-local-cache-mode', 'stream') - this.response.headers.set('x-local-cache-status', status) - this.response.headers.set('x-local-cache-time', new Date().toISOString()) - const newResponse = new Response(body, { - url: this.response.url, - status: this.response.status, - headers: this.response.headers, - counter: this.options.counter, - }) - return newResponse - } - - // use the cached data to create a response and return it - async respond (method, options, status) { - let response - if (method === 'HEAD' || [301, 308].includes(this.response.status)) { - // if the request is a HEAD, or the response is a redirect, - // then the metadata in the entry already includes everything - // we need to build a response - response = this.response - } else { - // we're responding with a full cached response, so create a body - // that reads from cacache and attach it to a new Response - const body = new Minipass() - const headers = { ...this.policy.responseHeaders() } - - const onResume = () => { - const cacheStream = cacache.get.stream.byDigest( - this.options.cachePath, this.entry.integrity, { memoize: this.options.memoize } - ) - cacheStream.on('error', async (err) => { - cacheStream.pause() - if (err.code === 'EINTEGRITY') { - await cacache.rm.content( - this.options.cachePath, this.entry.integrity, { memoize: this.options.memoize } - ) - } - if (err.code === 'ENOENT' || err.code === 'EINTEGRITY') { - await CacheEntry.invalidate(this.request, this.options) - } - body.emit('error', err) - cacheStream.resume() - }) - // emit the integrity and size events based on our metadata so we're consistent - body.emit('integrity', this.entry.integrity) - body.emit('size', Number(headers['content-length'])) - cacheStream.pipe(body) - } - - body.once('resume', onResume) - body.once('end', () => body.removeListener('resume', onResume)) - response = new Response(body, { - url: this.entry.metadata.url, - counter: options.counter, - status: 200, - headers, - }) - } - - response.headers.set('x-local-cache', encodeURIComponent(this.options.cachePath)) - response.headers.set('x-local-cache-hash', encodeURIComponent(this.entry.integrity)) - response.headers.set('x-local-cache-key', encodeURIComponent(this.key)) - response.headers.set('x-local-cache-mode', 'stream') - response.headers.set('x-local-cache-status', status) - response.headers.set('x-local-cache-time', new Date(this.entry.metadata.time).toUTCString()) - return response - } - - // use the provided request along with this cache entry to - // revalidate the stored response. returns a response, either - // from the cache or from the update - async revalidate (request, options) { - const revalidateRequest = new Request(request, { - headers: this.policy.revalidationHeaders(request), - }) - - try { - // NOTE: be sure to remove the headers property from the - // user supplied options, since we have already defined - // them on the new request object. if they're still in the - // options then those will overwrite the ones from the policy - var response = await remote(revalidateRequest, { - ...options, - headers: undefined, - }) - } catch (err) { - // if the network fetch fails, return the stale - // cached response unless it has a cache-control - // of 'must-revalidate' - if (!this.policy.mustRevalidate) { - return this.respond(request.method, options, 'stale') - } - - throw err - } - - if (this.policy.revalidated(revalidateRequest, response)) { - // we got a 304, write a new index to the cache and respond from cache - const metadata = getMetadata(request, response, options) - // 304 responses do not include headers that are specific to the response data - // since they do not include a body, so we copy values for headers that were - // in the old cache entry to the new one, if the new metadata does not already - // include that header - for (const name of KEEP_RESPONSE_HEADERS) { - if ( - !hasOwnProperty(metadata.resHeaders, name) && - hasOwnProperty(this.entry.metadata.resHeaders, name) - ) { - metadata.resHeaders[name] = this.entry.metadata.resHeaders[name] - } - } - - for (const name of options.cacheAdditionalHeaders) { - const inMeta = hasOwnProperty(metadata.resHeaders, name) - const inEntry = hasOwnProperty(this.entry.metadata.resHeaders, name) - const inPolicy = hasOwnProperty(this.policy.response.headers, name) - - // if the header is in the existing entry, but it is not in the metadata - // then we need to write it to the metadata as this will refresh the on-disk cache - if (!inMeta && inEntry) { - metadata.resHeaders[name] = this.entry.metadata.resHeaders[name] - } - // if the header is in the metadata, but not in the policy, then we need to set - // it in the policy so that it's included in the immediate response. future - // responses will load a new cache entry, so we don't need to change that - if (!inPolicy && inMeta) { - this.policy.response.headers[name] = metadata.resHeaders[name] - } - } - - try { - await cacache.index.insert(options.cachePath, this.key, this.entry.integrity, { - size: this.entry.size, - metadata, - }) - } catch (err) { - // if updating the cache index fails, we ignore it and - // respond anyway - } - return this.respond(request.method, options, 'revalidated') - } - - // if we got a modified response, create a new entry based on it - const newEntry = new CacheEntry({ - request, - response, - options, - }) - - // respond with the new entry while writing it to the cache - return newEntry.store('updated') - } -} - -module.exports = CacheEntry diff --git a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/cache/errors.js b/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/cache/errors.js deleted file mode 100644 index 67a66573bebe66..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/cache/errors.js +++ /dev/null @@ -1,11 +0,0 @@ -class NotCachedError extends Error { - constructor (url) { - /* eslint-disable-next-line max-len */ - super(`request to ${url} failed: cache mode is 'only-if-cached' but no cached response is available.`) - this.code = 'ENOTCACHED' - } -} - -module.exports = { - NotCachedError, -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/cache/index.js b/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/cache/index.js deleted file mode 100644 index 0de49d23fb9336..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/cache/index.js +++ /dev/null @@ -1,49 +0,0 @@ -const { NotCachedError } = require('./errors.js') -const CacheEntry = require('./entry.js') -const remote = require('../remote.js') - -// do whatever is necessary to get a Response and return it -const cacheFetch = async (request, options) => { - // try to find a cached entry that satisfies this request - const entry = await CacheEntry.find(request, options) - if (!entry) { - // no cached result, if the cache mode is 'only-if-cached' that's a failure - if (options.cache === 'only-if-cached') { - throw new NotCachedError(request.url) - } - - // otherwise, we make a request, store it and return it - const response = await remote(request, options) - const newEntry = new CacheEntry({ request, response, options }) - return newEntry.store('miss') - } - - // we have a cached response that satisfies this request, however if the cache - // mode is 'no-cache' then we send the revalidation request no matter what - if (options.cache === 'no-cache') { - return entry.revalidate(request, options) - } - - // if the cached entry is not stale, or if the cache mode is 'force-cache' or - // 'only-if-cached' we can respond with the cached entry. set the status - // based on the result of needsRevalidation and respond - const _needsRevalidation = entry.policy.needsRevalidation(request) - if (options.cache === 'force-cache' || - options.cache === 'only-if-cached' || - !_needsRevalidation) { - return entry.respond(request.method, options, _needsRevalidation ? 'stale' : 'hit') - } - - // if we got here, the cache entry is stale so revalidate it - return entry.revalidate(request, options) -} - -cacheFetch.invalidate = async (request, options) => { - if (!options.cachePath) { - return - } - - return CacheEntry.invalidate(request, options) -} - -module.exports = cacheFetch diff --git a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/cache/key.js b/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/cache/key.js deleted file mode 100644 index f7684d562b7fae..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/cache/key.js +++ /dev/null @@ -1,17 +0,0 @@ -const { URL, format } = require('url') - -// options passed to url.format() when generating a key -const formatOptions = { - auth: false, - fragment: false, - search: true, - unicode: false, -} - -// returns a string to be used as the cache key for the Request -const cacheKey = (request) => { - const parsed = new URL(request.url) - return `make-fetch-happen:request-cache:${format(parsed, formatOptions)}` -} - -module.exports = cacheKey diff --git a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/cache/policy.js b/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/cache/policy.js deleted file mode 100644 index ada3c8600dae92..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/cache/policy.js +++ /dev/null @@ -1,161 +0,0 @@ -const CacheSemantics = require('http-cache-semantics') -const Negotiator = require('negotiator') -const ssri = require('ssri') - -// options passed to http-cache-semantics constructor -const policyOptions = { - shared: false, - ignoreCargoCult: true, -} - -// a fake empty response, used when only testing the -// request for storability -const emptyResponse = { status: 200, headers: {} } - -// returns a plain object representation of the Request -const requestObject = (request) => { - const _obj = { - method: request.method, - url: request.url, - headers: {}, - compress: request.compress, - } - - request.headers.forEach((value, key) => { - _obj.headers[key] = value - }) - - return _obj -} - -// returns a plain object representation of the Response -const responseObject = (response) => { - const _obj = { - status: response.status, - headers: {}, - } - - response.headers.forEach((value, key) => { - _obj.headers[key] = value - }) - - return _obj -} - -class CachePolicy { - constructor ({ entry, request, response, options }) { - this.entry = entry - this.request = requestObject(request) - this.response = responseObject(response) - this.options = options - this.policy = new CacheSemantics(this.request, this.response, policyOptions) - - if (this.entry) { - // if we have an entry, copy the timestamp to the _responseTime - // this is necessary because the CacheSemantics constructor forces - // the value to Date.now() which means a policy created from a - // cache entry is likely to always identify itself as stale - this.policy._responseTime = this.entry.metadata.time - } - } - - // static method to quickly determine if a request alone is storable - static storable (request, options) { - // no cachePath means no caching - if (!options.cachePath) { - return false - } - - // user explicitly asked not to cache - if (options.cache === 'no-store') { - return false - } - - // we only cache GET and HEAD requests - if (!['GET', 'HEAD'].includes(request.method)) { - return false - } - - // otherwise, let http-cache-semantics make the decision - // based on the request's headers - const policy = new CacheSemantics(requestObject(request), emptyResponse, policyOptions) - return policy.storable() - } - - // returns true if the policy satisfies the request - satisfies (request) { - const _req = requestObject(request) - if (this.request.headers.host !== _req.headers.host) { - return false - } - - if (this.request.compress !== _req.compress) { - return false - } - - const negotiatorA = new Negotiator(this.request) - const negotiatorB = new Negotiator(_req) - - if (JSON.stringify(negotiatorA.mediaTypes()) !== JSON.stringify(negotiatorB.mediaTypes())) { - return false - } - - if (JSON.stringify(negotiatorA.languages()) !== JSON.stringify(negotiatorB.languages())) { - return false - } - - if (JSON.stringify(negotiatorA.encodings()) !== JSON.stringify(negotiatorB.encodings())) { - return false - } - - if (this.options.integrity) { - return ssri.parse(this.options.integrity).match(this.entry.integrity) - } - - return true - } - - // returns true if the request and response allow caching - storable () { - return this.policy.storable() - } - - // NOTE: this is a hack to avoid parsing the cache-control - // header ourselves, it returns true if the response's - // cache-control contains must-revalidate - get mustRevalidate () { - return !!this.policy._rescc['must-revalidate'] - } - - // returns true if the cached response requires revalidation - // for the given request - needsRevalidation (request) { - const _req = requestObject(request) - // force method to GET because we only cache GETs - // but can serve a HEAD from a cached GET - _req.method = 'GET' - return !this.policy.satisfiesWithoutRevalidation(_req) - } - - responseHeaders () { - return this.policy.responseHeaders() - } - - // returns a new object containing the appropriate headers - // to send a revalidation request - revalidationHeaders (request) { - const _req = requestObject(request) - return this.policy.revalidationHeaders(_req) - } - - // returns true if the request/response was revalidated - // successfully. returns false if a new response was received - revalidated (request, response) { - const _req = requestObject(request) - const _res = responseObject(response) - const policy = this.policy.revalidatedPolicy(_req, _res) - return !policy.modified - } -} - -module.exports = CachePolicy diff --git a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/fetch.js b/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/fetch.js deleted file mode 100644 index 233ba67e165502..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/fetch.js +++ /dev/null @@ -1,118 +0,0 @@ -'use strict' - -const { FetchError, Request, isRedirect } = require('minipass-fetch') -const url = require('url') - -const CachePolicy = require('./cache/policy.js') -const cache = require('./cache/index.js') -const remote = require('./remote.js') - -// given a Request, a Response and user options -// return true if the response is a redirect that -// can be followed. we throw errors that will result -// in the fetch being rejected if the redirect is -// possible but invalid for some reason -const canFollowRedirect = (request, response, options) => { - if (!isRedirect(response.status)) { - return false - } - - if (options.redirect === 'manual') { - return false - } - - if (options.redirect === 'error') { - throw new FetchError(`redirect mode is set to error: ${request.url}`, - 'no-redirect', { code: 'ENOREDIRECT' }) - } - - if (!response.headers.has('location')) { - throw new FetchError(`redirect location header missing for: ${request.url}`, - 'no-location', { code: 'EINVALIDREDIRECT' }) - } - - if (request.counter >= request.follow) { - throw new FetchError(`maximum redirect reached at: ${request.url}`, - 'max-redirect', { code: 'EMAXREDIRECT' }) - } - - return true -} - -// given a Request, a Response, and the user's options return an object -// with a new Request and a new options object that will be used for -// following the redirect -const getRedirect = (request, response, options) => { - const _opts = { ...options } - const location = response.headers.get('location') - const redirectUrl = new url.URL(location, /^https?:/.test(location) ? undefined : request.url) - // Comment below is used under the following license: - /** - * @license - * Copyright (c) 2010-2012 Mikeal Rogers - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS - * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language - * governing permissions and limitations under the License. - */ - - // Remove authorization if changing hostnames (but not if just - // changing ports or protocols). This matches the behavior of request: - // https://github.com/request/request/blob/b12a6245/lib/redirect.js#L134-L138 - if (new url.URL(request.url).hostname !== redirectUrl.hostname) { - request.headers.delete('authorization') - request.headers.delete('cookie') - } - - // for POST request with 301/302 response, or any request with 303 response, - // use GET when following redirect - if ( - response.status === 303 || - (request.method === 'POST' && [301, 302].includes(response.status)) - ) { - _opts.method = 'GET' - _opts.body = null - request.headers.delete('content-length') - } - - _opts.headers = {} - request.headers.forEach((value, key) => { - _opts.headers[key] = value - }) - - _opts.counter = ++request.counter - const redirectReq = new Request(url.format(redirectUrl), _opts) - return { - request: redirectReq, - options: _opts, - } -} - -const fetch = async (request, options) => { - const response = CachePolicy.storable(request, options) - ? await cache(request, options) - : await remote(request, options) - - // if the request wasn't a GET or HEAD, and the response - // status is between 200 and 399 inclusive, invalidate the - // request url - if (!['GET', 'HEAD'].includes(request.method) && - response.status >= 200 && - response.status <= 399) { - await cache.invalidate(request, options) - } - - if (!canFollowRedirect(request, response, options)) { - return response - } - - const redirect = getRedirect(request, response, options) - return fetch(redirect.request, redirect.options) -} - -module.exports = fetch diff --git a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/index.js b/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/index.js deleted file mode 100644 index 2f12e8e1b61131..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/index.js +++ /dev/null @@ -1,41 +0,0 @@ -const { FetchError, Headers, Request, Response } = require('minipass-fetch') - -const configureOptions = require('./options.js') -const fetch = require('./fetch.js') - -const makeFetchHappen = (url, opts) => { - const options = configureOptions(opts) - - const request = new Request(url, options) - return fetch(request, options) -} - -makeFetchHappen.defaults = (defaultUrl, defaultOptions = {}, wrappedFetch = makeFetchHappen) => { - if (typeof defaultUrl === 'object') { - defaultOptions = defaultUrl - defaultUrl = null - } - - const defaultedFetch = (url, options = {}) => { - const finalUrl = url || defaultUrl - const finalOptions = { - ...defaultOptions, - ...options, - headers: { - ...defaultOptions.headers, - ...options.headers, - }, - } - return wrappedFetch(finalUrl, finalOptions) - } - - defaultedFetch.defaults = (defaultUrl1, defaultOptions1 = {}) => - makeFetchHappen.defaults(defaultUrl1, defaultOptions1, defaultedFetch) - return defaultedFetch -} - -module.exports = makeFetchHappen -module.exports.FetchError = FetchError -module.exports.Headers = Headers -module.exports.Request = Request -module.exports.Response = Response diff --git a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/options.js b/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/options.js deleted file mode 100644 index f77511279f831d..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/options.js +++ /dev/null @@ -1,54 +0,0 @@ -const dns = require('dns') - -const conditionalHeaders = [ - 'if-modified-since', - 'if-none-match', - 'if-unmodified-since', - 'if-match', - 'if-range', -] - -const configureOptions = (opts) => { - const { strictSSL, ...options } = { ...opts } - options.method = options.method ? options.method.toUpperCase() : 'GET' - options.rejectUnauthorized = strictSSL !== false - - if (!options.retry) { - options.retry = { retries: 0 } - } else if (typeof options.retry === 'string') { - const retries = parseInt(options.retry, 10) - if (isFinite(retries)) { - options.retry = { retries } - } else { - options.retry = { retries: 0 } - } - } else if (typeof options.retry === 'number') { - options.retry = { retries: options.retry } - } else { - options.retry = { retries: 0, ...options.retry } - } - - options.dns = { ttl: 5 * 60 * 1000, lookup: dns.lookup, ...options.dns } - - options.cache = options.cache || 'default' - if (options.cache === 'default') { - const hasConditionalHeader = Object.keys(options.headers || {}).some((name) => { - return conditionalHeaders.includes(name.toLowerCase()) - }) - if (hasConditionalHeader) { - options.cache = 'no-store' - } - } - - options.cacheAdditionalHeaders = options.cacheAdditionalHeaders || [] - - // cacheManager is deprecated, but if it's set and - // cachePath is not we should copy it to the new field - if (options.cacheManager && !options.cachePath) { - options.cachePath = options.cacheManager - } - - return options -} - -module.exports = configureOptions diff --git a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/pipeline.js b/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/pipeline.js deleted file mode 100644 index b1d221b2d0ce31..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/pipeline.js +++ /dev/null @@ -1,41 +0,0 @@ -'use strict' - -const MinipassPipeline = require('minipass-pipeline') - -class CachingMinipassPipeline extends MinipassPipeline { - #events = [] - #data = new Map() - - constructor (opts, ...streams) { - // CRITICAL: do NOT pass the streams to the call to super(), this will start - // the flow of data and potentially cause the events we need to catch to emit - // before we've finished our own setup. instead we call super() with no args, - // finish our setup, and then push the streams into ourselves to start the - // data flow - super() - this.#events = opts.events - - /* istanbul ignore next - coverage disabled because this is pointless to test here */ - if (streams.length) { - this.push(...streams) - } - } - - on (event, handler) { - if (this.#events.includes(event) && this.#data.has(event)) { - return handler(...this.#data.get(event)) - } - - return super.on(event, handler) - } - - emit (event, ...data) { - if (this.#events.includes(event)) { - this.#data.set(event, data) - } - - return super.emit(event, ...data) - } -} - -module.exports = CachingMinipassPipeline diff --git a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/remote.js b/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/remote.js deleted file mode 100644 index 8554564074de6e..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/lib/remote.js +++ /dev/null @@ -1,131 +0,0 @@ -const { Minipass } = require('minipass') -const fetch = require('minipass-fetch') -const promiseRetry = require('promise-retry') -const ssri = require('ssri') -const { log } = require('proc-log') - -const CachingMinipassPipeline = require('./pipeline.js') -const { getAgent } = require('@npmcli/agent') -const pkg = require('../package.json') - -const USER_AGENT = `${pkg.name}/${pkg.version} (+https://npm.im/${pkg.name})` - -const RETRY_ERRORS = [ - 'ECONNRESET', // remote socket closed on us - 'ECONNREFUSED', // remote host refused to open connection - 'EADDRINUSE', // failed to bind to a local port (proxy?) - 'ETIMEDOUT', // someone in the transaction is WAY TOO SLOW - // from @npmcli/agent - 'ECONNECTIONTIMEOUT', - 'EIDLETIMEOUT', - 'ERESPONSETIMEOUT', - 'ETRANSFERTIMEOUT', - // Known codes we do NOT retry on: - // ENOTFOUND (getaddrinfo failure. Either bad hostname, or offline) - // EINVALIDPROXY // invalid protocol from @npmcli/agent - // EINVALIDRESPONSE // invalid status code from @npmcli/agent -] - -const RETRY_TYPES = [ - 'request-timeout', -] - -// make a request directly to the remote source, -// retrying certain classes of errors as well as -// following redirects (through the cache if necessary) -// and verifying response integrity -const remoteFetch = (request, options) => { - const agent = getAgent(request.url, options) - if (!request.headers.has('connection')) { - request.headers.set('connection', agent ? 'keep-alive' : 'close') - } - - if (!request.headers.has('user-agent')) { - request.headers.set('user-agent', USER_AGENT) - } - - // keep our own options since we're overriding the agent - // and the redirect mode - const _opts = { - ...options, - agent, - redirect: 'manual', - } - - return promiseRetry(async (retryHandler, attemptNum) => { - const req = new fetch.Request(request, _opts) - try { - let res = await fetch(req, _opts) - if (_opts.integrity && res.status === 200) { - // we got a 200 response and the user has specified an expected - // integrity value, so wrap the response in an ssri stream to verify it - const integrityStream = ssri.integrityStream({ - algorithms: _opts.algorithms, - integrity: _opts.integrity, - size: _opts.size, - }) - const pipeline = new CachingMinipassPipeline({ - events: ['integrity', 'size'], - }, res.body, integrityStream) - // we also propagate the integrity and size events out to the pipeline so we can use - // this new response body as an integrityEmitter for cacache - integrityStream.on('integrity', i => pipeline.emit('integrity', i)) - integrityStream.on('size', s => pipeline.emit('size', s)) - res = new fetch.Response(pipeline, res) - // set an explicit flag so we know if our response body will emit integrity and size - res.body.hasIntegrityEmitter = true - } - - res.headers.set('x-fetch-attempts', attemptNum) - - // do not retry POST requests, or requests with a streaming body - // do retry requests with a 408, 420, 429 or 500+ status in the response - const isStream = Minipass.isStream(req.body) - const isRetriable = req.method !== 'POST' && - !isStream && - ([408, 420, 429].includes(res.status) || res.status >= 500) - - if (isRetriable) { - if (typeof options.onRetry === 'function') { - options.onRetry(res) - } - - /* eslint-disable-next-line max-len */ - log.http('fetch', `${req.method} ${req.url} attempt ${attemptNum} failed with ${res.status}`) - return retryHandler(res) - } - - return res - } catch (err) { - const code = (err.code === 'EPROMISERETRY') - ? err.retried.code - : err.code - - // err.retried will be the thing that was thrown from above - // if it's a response, we just got a bad status code and we - // can re-throw to allow the retry - const isRetryError = err.retried instanceof fetch.Response || - (RETRY_ERRORS.includes(code) && RETRY_TYPES.includes(err.type)) - - if (req.method === 'POST' || isRetryError) { - throw err - } - - if (typeof options.onRetry === 'function') { - options.onRetry(err) - } - - log.http('fetch', `${req.method} ${req.url} attempt ${attemptNum} failed with ${err.code}`) - return retryHandler(err) - } - }, options.retry).catch((err) => { - // don't reject for http errors, just return them - if (err.status >= 400 && err.type !== 'system') { - return err - } - - throw err - }) -} - -module.exports = remoteFetch diff --git a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/package.json b/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/package.json deleted file mode 100644 index 7adb4d1e7f9719..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/make-fetch-happen/package.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "name": "make-fetch-happen", - "version": "13.0.1", - "description": "Opinionated, caching, retrying fetch client", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "scripts": { - "test": "tap", - "posttest": "npm run lint", - "eslint": "eslint", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "lintfix": "npm run lint -- --fix", - "postlint": "template-oss-check", - "snap": "tap", - "template-oss-apply": "template-oss-apply --force" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/make-fetch-happen.git" - }, - "keywords": [ - "http", - "request", - "fetch", - "mean girls", - "caching", - "cache", - "subresource integrity" - ], - "author": "GitHub Inc.", - "license": "ISC", - "dependencies": { - "@npmcli/agent": "^2.0.0", - "cacache": "^18.0.0", - "http-cache-semantics": "^4.1.1", - "is-lambda": "^1.0.1", - "minipass": "^7.0.2", - "minipass-fetch": "^3.0.0", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "negotiator": "^0.6.3", - "proc-log": "^4.2.0", - "promise-retry": "^2.0.1", - "ssri": "^10.0.0" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.21.4", - "nock": "^13.2.4", - "safe-buffer": "^5.2.1", - "standard-version": "^9.3.2", - "tap": "^16.0.0" - }, - "engines": { - "node": "^16.14.0 || >=18.0.0" - }, - "tap": { - "color": 1, - "files": "test/*.js", - "check-coverage": true, - "timeout": 60, - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.21.4", - "publish": "true" - } -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/LICENSE b/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/LICENSE deleted file mode 100644 index 3c3410cdc12ee3..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -The MIT License (MIT) - -Copyright (c) Isaac Z. Schlueter and Contributors -Copyright (c) 2016 David Frank - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---- - -Note: This is a derivative work based on "node-fetch" by David Frank, -modified and distributed under the terms of the MIT license above. -https://github.com/bitinn/node-fetch diff --git a/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/abort-error.js b/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/abort-error.js deleted file mode 100644 index b18f643269e375..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/abort-error.js +++ /dev/null @@ -1,17 +0,0 @@ -'use strict' -class AbortError extends Error { - constructor (message) { - super(message) - this.code = 'FETCH_ABORTED' - this.type = 'aborted' - Error.captureStackTrace(this, this.constructor) - } - - get name () { - return 'AbortError' - } - - // don't allow name to be overridden, but don't throw either - set name (s) {} -} -module.exports = AbortError diff --git a/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/blob.js b/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/blob.js deleted file mode 100644 index 121b1730102e72..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/blob.js +++ /dev/null @@ -1,97 +0,0 @@ -'use strict' -const { Minipass } = require('minipass') -const TYPE = Symbol('type') -const BUFFER = Symbol('buffer') - -class Blob { - constructor (blobParts, options) { - this[TYPE] = '' - - const buffers = [] - let size = 0 - - if (blobParts) { - const a = blobParts - const length = Number(a.length) - for (let i = 0; i < length; i++) { - const element = a[i] - const buffer = element instanceof Buffer ? element - : ArrayBuffer.isView(element) - ? Buffer.from(element.buffer, element.byteOffset, element.byteLength) - : element instanceof ArrayBuffer ? Buffer.from(element) - : element instanceof Blob ? element[BUFFER] - : typeof element === 'string' ? Buffer.from(element) - : Buffer.from(String(element)) - size += buffer.length - buffers.push(buffer) - } - } - - this[BUFFER] = Buffer.concat(buffers, size) - - const type = options && options.type !== undefined - && String(options.type).toLowerCase() - if (type && !/[^\u0020-\u007E]/.test(type)) { - this[TYPE] = type - } - } - - get size () { - return this[BUFFER].length - } - - get type () { - return this[TYPE] - } - - text () { - return Promise.resolve(this[BUFFER].toString()) - } - - arrayBuffer () { - const buf = this[BUFFER] - const off = buf.byteOffset - const len = buf.byteLength - const ab = buf.buffer.slice(off, off + len) - return Promise.resolve(ab) - } - - stream () { - return new Minipass().end(this[BUFFER]) - } - - slice (start, end, type) { - const size = this.size - const relativeStart = start === undefined ? 0 - : start < 0 ? Math.max(size + start, 0) - : Math.min(start, size) - const relativeEnd = end === undefined ? size - : end < 0 ? Math.max(size + end, 0) - : Math.min(end, size) - const span = Math.max(relativeEnd - relativeStart, 0) - - const buffer = this[BUFFER] - const slicedBuffer = buffer.slice( - relativeStart, - relativeStart + span - ) - const blob = new Blob([], { type }) - blob[BUFFER] = slicedBuffer - return blob - } - - get [Symbol.toStringTag] () { - return 'Blob' - } - - static get BUFFER () { - return BUFFER - } -} - -Object.defineProperties(Blob.prototype, { - size: { enumerable: true }, - type: { enumerable: true }, -}) - -module.exports = Blob diff --git a/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/body.js b/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/body.js deleted file mode 100644 index 62286bd1de0d91..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/body.js +++ /dev/null @@ -1,350 +0,0 @@ -'use strict' -const { Minipass } = require('minipass') -const MinipassSized = require('minipass-sized') - -const Blob = require('./blob.js') -const { BUFFER } = Blob -const FetchError = require('./fetch-error.js') - -// optional dependency on 'encoding' -let convert -try { - convert = require('encoding').convert -} catch (e) { - // defer error until textConverted is called -} - -const INTERNALS = Symbol('Body internals') -const CONSUME_BODY = Symbol('consumeBody') - -class Body { - constructor (bodyArg, options = {}) { - const { size = 0, timeout = 0 } = options - const body = bodyArg === undefined || bodyArg === null ? null - : isURLSearchParams(bodyArg) ? Buffer.from(bodyArg.toString()) - : isBlob(bodyArg) ? bodyArg - : Buffer.isBuffer(bodyArg) ? bodyArg - : Object.prototype.toString.call(bodyArg) === '[object ArrayBuffer]' - ? Buffer.from(bodyArg) - : ArrayBuffer.isView(bodyArg) - ? Buffer.from(bodyArg.buffer, bodyArg.byteOffset, bodyArg.byteLength) - : Minipass.isStream(bodyArg) ? bodyArg - : Buffer.from(String(bodyArg)) - - this[INTERNALS] = { - body, - disturbed: false, - error: null, - } - - this.size = size - this.timeout = timeout - - if (Minipass.isStream(body)) { - body.on('error', er => { - const error = er.name === 'AbortError' ? er - : new FetchError(`Invalid response while trying to fetch ${ - this.url}: ${er.message}`, 'system', er) - this[INTERNALS].error = error - }) - } - } - - get body () { - return this[INTERNALS].body - } - - get bodyUsed () { - return this[INTERNALS].disturbed - } - - arrayBuffer () { - return this[CONSUME_BODY]().then(buf => - buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength)) - } - - blob () { - const ct = this.headers && this.headers.get('content-type') || '' - return this[CONSUME_BODY]().then(buf => Object.assign( - new Blob([], { type: ct.toLowerCase() }), - { [BUFFER]: buf } - )) - } - - async json () { - const buf = await this[CONSUME_BODY]() - try { - return JSON.parse(buf.toString()) - } catch (er) { - throw new FetchError( - `invalid json response body at ${this.url} reason: ${er.message}`, - 'invalid-json' - ) - } - } - - text () { - return this[CONSUME_BODY]().then(buf => buf.toString()) - } - - buffer () { - return this[CONSUME_BODY]() - } - - textConverted () { - return this[CONSUME_BODY]().then(buf => convertBody(buf, this.headers)) - } - - [CONSUME_BODY] () { - if (this[INTERNALS].disturbed) { - return Promise.reject(new TypeError(`body used already for: ${ - this.url}`)) - } - - this[INTERNALS].disturbed = true - - if (this[INTERNALS].error) { - return Promise.reject(this[INTERNALS].error) - } - - // body is null - if (this.body === null) { - return Promise.resolve(Buffer.alloc(0)) - } - - if (Buffer.isBuffer(this.body)) { - return Promise.resolve(this.body) - } - - const upstream = isBlob(this.body) ? this.body.stream() : this.body - - /* istanbul ignore if: should never happen */ - if (!Minipass.isStream(upstream)) { - return Promise.resolve(Buffer.alloc(0)) - } - - const stream = this.size && upstream instanceof MinipassSized ? upstream - : !this.size && upstream instanceof Minipass && - !(upstream instanceof MinipassSized) ? upstream - : this.size ? new MinipassSized({ size: this.size }) - : new Minipass() - - // allow timeout on slow response body, but only if the stream is still writable. this - // makes the timeout center on the socket stream from lib/index.js rather than the - // intermediary minipass stream we create to receive the data - const resTimeout = this.timeout && stream.writable ? setTimeout(() => { - stream.emit('error', new FetchError( - `Response timeout while trying to fetch ${ - this.url} (over ${this.timeout}ms)`, 'body-timeout')) - }, this.timeout) : null - - // do not keep the process open just for this timeout, even - // though we expect it'll get cleared eventually. - if (resTimeout && resTimeout.unref) { - resTimeout.unref() - } - - // do the pipe in the promise, because the pipe() can send too much - // data through right away and upset the MP Sized object - return new Promise((resolve) => { - // if the stream is some other kind of stream, then pipe through a MP - // so we can collect it more easily. - if (stream !== upstream) { - upstream.on('error', er => stream.emit('error', er)) - upstream.pipe(stream) - } - resolve() - }).then(() => stream.concat()).then(buf => { - clearTimeout(resTimeout) - return buf - }).catch(er => { - clearTimeout(resTimeout) - // request was aborted, reject with this Error - if (er.name === 'AbortError' || er.name === 'FetchError') { - throw er - } else if (er.name === 'RangeError') { - throw new FetchError(`Could not create Buffer from response body for ${ - this.url}: ${er.message}`, 'system', er) - } else { - // other errors, such as incorrect content-encoding or content-length - throw new FetchError(`Invalid response body while trying to fetch ${ - this.url}: ${er.message}`, 'system', er) - } - }) - } - - static clone (instance) { - if (instance.bodyUsed) { - throw new Error('cannot clone body after it is used') - } - - const body = instance.body - - // check that body is a stream and not form-data object - // NB: can't clone the form-data object without having it as a dependency - if (Minipass.isStream(body) && typeof body.getBoundary !== 'function') { - // create a dedicated tee stream so that we don't lose data - // potentially sitting in the body stream's buffer by writing it - // immediately to p1 and not having it for p2. - const tee = new Minipass() - const p1 = new Minipass() - const p2 = new Minipass() - tee.on('error', er => { - p1.emit('error', er) - p2.emit('error', er) - }) - body.on('error', er => tee.emit('error', er)) - tee.pipe(p1) - tee.pipe(p2) - body.pipe(tee) - // set instance body to one fork, return the other - instance[INTERNALS].body = p1 - return p2 - } else { - return instance.body - } - } - - static extractContentType (body) { - return body === null || body === undefined ? null - : typeof body === 'string' ? 'text/plain;charset=UTF-8' - : isURLSearchParams(body) - ? 'application/x-www-form-urlencoded;charset=UTF-8' - : isBlob(body) ? body.type || null - : Buffer.isBuffer(body) ? null - : Object.prototype.toString.call(body) === '[object ArrayBuffer]' ? null - : ArrayBuffer.isView(body) ? null - : typeof body.getBoundary === 'function' - ? `multipart/form-data;boundary=${body.getBoundary()}` - : Minipass.isStream(body) ? null - : 'text/plain;charset=UTF-8' - } - - static getTotalBytes (instance) { - const { body } = instance - return (body === null || body === undefined) ? 0 - : isBlob(body) ? body.size - : Buffer.isBuffer(body) ? body.length - : body && typeof body.getLengthSync === 'function' && ( - // detect form data input from form-data module - body._lengthRetrievers && - /* istanbul ignore next */ body._lengthRetrievers.length === 0 || // 1.x - body.hasKnownLength && body.hasKnownLength()) // 2.x - ? body.getLengthSync() - : null - } - - static writeToStream (dest, instance) { - const { body } = instance - - if (body === null || body === undefined) { - dest.end() - } else if (Buffer.isBuffer(body) || typeof body === 'string') { - dest.end(body) - } else { - // body is stream or blob - const stream = isBlob(body) ? body.stream() : body - stream.on('error', er => dest.emit('error', er)).pipe(dest) - } - - return dest - } -} - -Object.defineProperties(Body.prototype, { - body: { enumerable: true }, - bodyUsed: { enumerable: true }, - arrayBuffer: { enumerable: true }, - blob: { enumerable: true }, - json: { enumerable: true }, - text: { enumerable: true }, -}) - -const isURLSearchParams = obj => - // Duck-typing as a necessary condition. - (typeof obj !== 'object' || - typeof obj.append !== 'function' || - typeof obj.delete !== 'function' || - typeof obj.get !== 'function' || - typeof obj.getAll !== 'function' || - typeof obj.has !== 'function' || - typeof obj.set !== 'function') ? false - // Brand-checking and more duck-typing as optional condition. - : obj.constructor.name === 'URLSearchParams' || - Object.prototype.toString.call(obj) === '[object URLSearchParams]' || - typeof obj.sort === 'function' - -const isBlob = obj => - typeof obj === 'object' && - typeof obj.arrayBuffer === 'function' && - typeof obj.type === 'string' && - typeof obj.stream === 'function' && - typeof obj.constructor === 'function' && - typeof obj.constructor.name === 'string' && - /^(Blob|File)$/.test(obj.constructor.name) && - /^(Blob|File)$/.test(obj[Symbol.toStringTag]) - -const convertBody = (buffer, headers) => { - /* istanbul ignore if */ - if (typeof convert !== 'function') { - throw new Error('The package `encoding` must be installed to use the textConverted() function') - } - - const ct = headers && headers.get('content-type') - let charset = 'utf-8' - let res - - // header - if (ct) { - res = /charset=([^;]*)/i.exec(ct) - } - - // no charset in content type, peek at response body for at most 1024 bytes - const str = buffer.slice(0, 1024).toString() - - // html5 - if (!res && str) { - res = / this.expect - ? 'max-size' : type - this.message = message - Error.captureStackTrace(this, this.constructor) - } - - get name () { - return 'FetchError' - } - - // don't allow name to be overwritten - set name (n) {} - - get [Symbol.toStringTag] () { - return 'FetchError' - } -} -module.exports = FetchError diff --git a/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/headers.js b/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/headers.js deleted file mode 100644 index dd6e854d5ba399..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/headers.js +++ /dev/null @@ -1,267 +0,0 @@ -'use strict' -const invalidTokenRegex = /[^^_`a-zA-Z\-0-9!#$%&'*+.|~]/ -const invalidHeaderCharRegex = /[^\t\x20-\x7e\x80-\xff]/ - -const validateName = name => { - name = `${name}` - if (invalidTokenRegex.test(name) || name === '') { - throw new TypeError(`${name} is not a legal HTTP header name`) - } -} - -const validateValue = value => { - value = `${value}` - if (invalidHeaderCharRegex.test(value)) { - throw new TypeError(`${value} is not a legal HTTP header value`) - } -} - -const find = (map, name) => { - name = name.toLowerCase() - for (const key in map) { - if (key.toLowerCase() === name) { - return key - } - } - return undefined -} - -const MAP = Symbol('map') -class Headers { - constructor (init = undefined) { - this[MAP] = Object.create(null) - if (init instanceof Headers) { - const rawHeaders = init.raw() - const headerNames = Object.keys(rawHeaders) - for (const headerName of headerNames) { - for (const value of rawHeaders[headerName]) { - this.append(headerName, value) - } - } - return - } - - // no-op - if (init === undefined || init === null) { - return - } - - if (typeof init === 'object') { - const method = init[Symbol.iterator] - if (method !== null && method !== undefined) { - if (typeof method !== 'function') { - throw new TypeError('Header pairs must be iterable') - } - - // sequence> - // Note: per spec we have to first exhaust the lists then process them - const pairs = [] - for (const pair of init) { - if (typeof pair !== 'object' || - typeof pair[Symbol.iterator] !== 'function') { - throw new TypeError('Each header pair must be iterable') - } - const arrPair = Array.from(pair) - if (arrPair.length !== 2) { - throw new TypeError('Each header pair must be a name/value tuple') - } - pairs.push(arrPair) - } - - for (const pair of pairs) { - this.append(pair[0], pair[1]) - } - } else { - // record - for (const key of Object.keys(init)) { - this.append(key, init[key]) - } - } - } else { - throw new TypeError('Provided initializer must be an object') - } - } - - get (name) { - name = `${name}` - validateName(name) - const key = find(this[MAP], name) - if (key === undefined) { - return null - } - - return this[MAP][key].join(', ') - } - - forEach (callback, thisArg = undefined) { - let pairs = getHeaders(this) - for (let i = 0; i < pairs.length; i++) { - const [name, value] = pairs[i] - callback.call(thisArg, value, name, this) - // refresh in case the callback added more headers - pairs = getHeaders(this) - } - } - - set (name, value) { - name = `${name}` - value = `${value}` - validateName(name) - validateValue(value) - const key = find(this[MAP], name) - this[MAP][key !== undefined ? key : name] = [value] - } - - append (name, value) { - name = `${name}` - value = `${value}` - validateName(name) - validateValue(value) - const key = find(this[MAP], name) - if (key !== undefined) { - this[MAP][key].push(value) - } else { - this[MAP][name] = [value] - } - } - - has (name) { - name = `${name}` - validateName(name) - return find(this[MAP], name) !== undefined - } - - delete (name) { - name = `${name}` - validateName(name) - const key = find(this[MAP], name) - if (key !== undefined) { - delete this[MAP][key] - } - } - - raw () { - return this[MAP] - } - - keys () { - return new HeadersIterator(this, 'key') - } - - values () { - return new HeadersIterator(this, 'value') - } - - [Symbol.iterator] () { - return new HeadersIterator(this, 'key+value') - } - - entries () { - return new HeadersIterator(this, 'key+value') - } - - get [Symbol.toStringTag] () { - return 'Headers' - } - - static exportNodeCompatibleHeaders (headers) { - const obj = Object.assign(Object.create(null), headers[MAP]) - - // http.request() only supports string as Host header. This hack makes - // specifying custom Host header possible. - const hostHeaderKey = find(headers[MAP], 'Host') - if (hostHeaderKey !== undefined) { - obj[hostHeaderKey] = obj[hostHeaderKey][0] - } - - return obj - } - - static createHeadersLenient (obj) { - const headers = new Headers() - for (const name of Object.keys(obj)) { - if (invalidTokenRegex.test(name)) { - continue - } - - if (Array.isArray(obj[name])) { - for (const val of obj[name]) { - if (invalidHeaderCharRegex.test(val)) { - continue - } - - if (headers[MAP][name] === undefined) { - headers[MAP][name] = [val] - } else { - headers[MAP][name].push(val) - } - } - } else if (!invalidHeaderCharRegex.test(obj[name])) { - headers[MAP][name] = [obj[name]] - } - } - return headers - } -} - -Object.defineProperties(Headers.prototype, { - get: { enumerable: true }, - forEach: { enumerable: true }, - set: { enumerable: true }, - append: { enumerable: true }, - has: { enumerable: true }, - delete: { enumerable: true }, - keys: { enumerable: true }, - values: { enumerable: true }, - entries: { enumerable: true }, -}) - -const getHeaders = (headers, kind = 'key+value') => - Object.keys(headers[MAP]).sort().map( - kind === 'key' ? k => k.toLowerCase() - : kind === 'value' ? k => headers[MAP][k].join(', ') - : k => [k.toLowerCase(), headers[MAP][k].join(', ')] - ) - -const INTERNAL = Symbol('internal') - -class HeadersIterator { - constructor (target, kind) { - this[INTERNAL] = { - target, - kind, - index: 0, - } - } - - get [Symbol.toStringTag] () { - return 'HeadersIterator' - } - - next () { - /* istanbul ignore if: should be impossible */ - if (!this || Object.getPrototypeOf(this) !== HeadersIterator.prototype) { - throw new TypeError('Value of `this` is not a HeadersIterator') - } - - const { target, kind, index } = this[INTERNAL] - const values = getHeaders(target, kind) - const len = values.length - if (index >= len) { - return { - value: undefined, - done: true, - } - } - - this[INTERNAL].index++ - - return { value: values[index], done: false } - } -} - -// manually extend because 'extends' requires a ctor -Object.setPrototypeOf(HeadersIterator.prototype, - Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]()))) - -module.exports = Headers diff --git a/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/index.js b/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/index.js deleted file mode 100644 index da402161670e65..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/index.js +++ /dev/null @@ -1,377 +0,0 @@ -'use strict' -const { URL } = require('url') -const http = require('http') -const https = require('https') -const zlib = require('minizlib') -const { Minipass } = require('minipass') - -const Body = require('./body.js') -const { writeToStream, getTotalBytes } = Body -const Response = require('./response.js') -const Headers = require('./headers.js') -const { createHeadersLenient } = Headers -const Request = require('./request.js') -const { getNodeRequestOptions } = Request -const FetchError = require('./fetch-error.js') -const AbortError = require('./abort-error.js') - -// XXX this should really be split up and unit-ized for easier testing -// and better DRY implementation of data/http request aborting -const fetch = async (url, opts) => { - if (/^data:/.test(url)) { - const request = new Request(url, opts) - // delay 1 promise tick so that the consumer can abort right away - return Promise.resolve().then(() => new Promise((resolve, reject) => { - let type, data - try { - const { pathname, search } = new URL(url) - const split = pathname.split(',') - if (split.length < 2) { - throw new Error('invalid data: URI') - } - const mime = split.shift() - const base64 = /;base64$/.test(mime) - type = base64 ? mime.slice(0, -1 * ';base64'.length) : mime - const rawData = decodeURIComponent(split.join(',') + search) - data = base64 ? Buffer.from(rawData, 'base64') : Buffer.from(rawData) - } catch (er) { - return reject(new FetchError(`[${request.method}] ${ - request.url} invalid URL, ${er.message}`, 'system', er)) - } - - const { signal } = request - if (signal && signal.aborted) { - return reject(new AbortError('The user aborted a request.')) - } - - const headers = { 'Content-Length': data.length } - if (type) { - headers['Content-Type'] = type - } - return resolve(new Response(data, { headers })) - })) - } - - return new Promise((resolve, reject) => { - // build request object - const request = new Request(url, opts) - let options - try { - options = getNodeRequestOptions(request) - } catch (er) { - return reject(er) - } - - const send = (options.protocol === 'https:' ? https : http).request - const { signal } = request - let response = null - const abort = () => { - const error = new AbortError('The user aborted a request.') - reject(error) - if (Minipass.isStream(request.body) && - typeof request.body.destroy === 'function') { - request.body.destroy(error) - } - if (response && response.body) { - response.body.emit('error', error) - } - } - - if (signal && signal.aborted) { - return abort() - } - - const abortAndFinalize = () => { - abort() - finalize() - } - - const finalize = () => { - req.abort() - if (signal) { - signal.removeEventListener('abort', abortAndFinalize) - } - clearTimeout(reqTimeout) - } - - // send request - const req = send(options) - - if (signal) { - signal.addEventListener('abort', abortAndFinalize) - } - - let reqTimeout = null - if (request.timeout) { - req.once('socket', () => { - reqTimeout = setTimeout(() => { - reject(new FetchError(`network timeout at: ${ - request.url}`, 'request-timeout')) - finalize() - }, request.timeout) - }) - } - - req.on('error', er => { - // if a 'response' event is emitted before the 'error' event, then by the - // time this handler is run it's too late to reject the Promise for the - // response. instead, we forward the error event to the response stream - // so that the error will surface to the user when they try to consume - // the body. this is done as a side effect of aborting the request except - // for in windows, where we must forward the event manually, otherwise - // there is no longer a ref'd socket attached to the request and the - // stream never ends so the event loop runs out of work and the process - // exits without warning. - // coverage skipped here due to the difficulty in testing - // istanbul ignore next - if (req.res) { - req.res.emit('error', er) - } - reject(new FetchError(`request to ${request.url} failed, reason: ${ - er.message}`, 'system', er)) - finalize() - }) - - req.on('response', res => { - clearTimeout(reqTimeout) - - const headers = createHeadersLenient(res.headers) - - // HTTP fetch step 5 - if (fetch.isRedirect(res.statusCode)) { - // HTTP fetch step 5.2 - const location = headers.get('Location') - - // HTTP fetch step 5.3 - let locationURL = null - try { - locationURL = location === null ? null : new URL(location, request.url).toString() - } catch { - // error here can only be invalid URL in Location: header - // do not throw when options.redirect == manual - // let the user extract the errorneous redirect URL - if (request.redirect !== 'manual') { - /* eslint-disable-next-line max-len */ - reject(new FetchError(`uri requested responds with an invalid redirect URL: ${location}`, 'invalid-redirect')) - finalize() - return - } - } - - // HTTP fetch step 5.5 - if (request.redirect === 'error') { - reject(new FetchError('uri requested responds with a redirect, ' + - `redirect mode is set to error: ${request.url}`, 'no-redirect')) - finalize() - return - } else if (request.redirect === 'manual') { - // node-fetch-specific step: make manual redirect a bit easier to - // use by setting the Location header value to the resolved URL. - if (locationURL !== null) { - // handle corrupted header - try { - headers.set('Location', locationURL) - } catch (err) { - /* istanbul ignore next: nodejs server prevent invalid - response headers, we can't test this through normal - request */ - reject(err) - } - } - } else if (request.redirect === 'follow' && locationURL !== null) { - // HTTP-redirect fetch step 5 - if (request.counter >= request.follow) { - reject(new FetchError(`maximum redirect reached at: ${ - request.url}`, 'max-redirect')) - finalize() - return - } - - // HTTP-redirect fetch step 9 - if (res.statusCode !== 303 && - request.body && - getTotalBytes(request) === null) { - reject(new FetchError( - 'Cannot follow redirect with body being a readable stream', - 'unsupported-redirect' - )) - finalize() - return - } - - // Update host due to redirection - request.headers.set('host', (new URL(locationURL)).host) - - // HTTP-redirect fetch step 6 (counter increment) - // Create a new Request object. - const requestOpts = { - headers: new Headers(request.headers), - follow: request.follow, - counter: request.counter + 1, - agent: request.agent, - compress: request.compress, - method: request.method, - body: request.body, - signal: request.signal, - timeout: request.timeout, - } - - // if the redirect is to a new hostname, strip the authorization and cookie headers - const parsedOriginal = new URL(request.url) - const parsedRedirect = new URL(locationURL) - if (parsedOriginal.hostname !== parsedRedirect.hostname) { - requestOpts.headers.delete('authorization') - requestOpts.headers.delete('cookie') - } - - // HTTP-redirect fetch step 11 - if (res.statusCode === 303 || ( - (res.statusCode === 301 || res.statusCode === 302) && - request.method === 'POST' - )) { - requestOpts.method = 'GET' - requestOpts.body = undefined - requestOpts.headers.delete('content-length') - } - - // HTTP-redirect fetch step 15 - resolve(fetch(new Request(locationURL, requestOpts))) - finalize() - return - } - } // end if(isRedirect) - - // prepare response - res.once('end', () => - signal && signal.removeEventListener('abort', abortAndFinalize)) - - const body = new Minipass() - // if an error occurs, either on the response stream itself, on one of the - // decoder streams, or a response length timeout from the Body class, we - // forward the error through to our internal body stream. If we see an - // error event on that, we call finalize to abort the request and ensure - // we don't leave a socket believing a request is in flight. - // this is difficult to test, so lacks specific coverage. - body.on('error', finalize) - // exceedingly rare that the stream would have an error, - // but just in case we proxy it to the stream in use. - res.on('error', /* istanbul ignore next */ er => body.emit('error', er)) - res.on('data', (chunk) => body.write(chunk)) - res.on('end', () => body.end()) - - const responseOptions = { - url: request.url, - status: res.statusCode, - statusText: res.statusMessage, - headers: headers, - size: request.size, - timeout: request.timeout, - counter: request.counter, - trailer: new Promise(resolveTrailer => - res.on('end', () => resolveTrailer(createHeadersLenient(res.trailers)))), - } - - // HTTP-network fetch step 12.1.1.3 - const codings = headers.get('Content-Encoding') - - // HTTP-network fetch step 12.1.1.4: handle content codings - - // in following scenarios we ignore compression support - // 1. compression support is disabled - // 2. HEAD request - // 3. no Content-Encoding header - // 4. no content response (204) - // 5. content not modified response (304) - if (!request.compress || - request.method === 'HEAD' || - codings === null || - res.statusCode === 204 || - res.statusCode === 304) { - response = new Response(body, responseOptions) - resolve(response) - return - } - - // Be less strict when decoding compressed responses, since sometimes - // servers send slightly invalid responses that are still accepted - // by common browsers. - // Always using Z_SYNC_FLUSH is what cURL does. - const zlibOptions = { - flush: zlib.constants.Z_SYNC_FLUSH, - finishFlush: zlib.constants.Z_SYNC_FLUSH, - } - - // for gzip - if (codings === 'gzip' || codings === 'x-gzip') { - const unzip = new zlib.Gunzip(zlibOptions) - response = new Response( - // exceedingly rare that the stream would have an error, - // but just in case we proxy it to the stream in use. - body.on('error', /* istanbul ignore next */ er => unzip.emit('error', er)).pipe(unzip), - responseOptions - ) - resolve(response) - return - } - - // for deflate - if (codings === 'deflate' || codings === 'x-deflate') { - // handle the infamous raw deflate response from old servers - // a hack for old IIS and Apache servers - const raw = res.pipe(new Minipass()) - raw.once('data', chunk => { - // see http://stackoverflow.com/questions/37519828 - const decoder = (chunk[0] & 0x0F) === 0x08 - ? new zlib.Inflate() - : new zlib.InflateRaw() - // exceedingly rare that the stream would have an error, - // but just in case we proxy it to the stream in use. - body.on('error', /* istanbul ignore next */ er => decoder.emit('error', er)).pipe(decoder) - response = new Response(decoder, responseOptions) - resolve(response) - }) - return - } - - // for br - if (codings === 'br') { - // ignoring coverage so tests don't have to fake support (or lack of) for brotli - // istanbul ignore next - try { - var decoder = new zlib.BrotliDecompress() - } catch (err) { - reject(err) - finalize() - return - } - // exceedingly rare that the stream would have an error, - // but just in case we proxy it to the stream in use. - body.on('error', /* istanbul ignore next */ er => decoder.emit('error', er)).pipe(decoder) - response = new Response(decoder, responseOptions) - resolve(response) - return - } - - // otherwise, use response as-is - response = new Response(body, responseOptions) - resolve(response) - }) - - writeToStream(req, request) - }) -} - -module.exports = fetch - -fetch.isRedirect = code => - code === 301 || - code === 302 || - code === 303 || - code === 307 || - code === 308 - -fetch.Headers = Headers -fetch.Request = Request -fetch.Response = Response -fetch.FetchError = FetchError -fetch.AbortError = AbortError diff --git a/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/request.js b/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/request.js deleted file mode 100644 index 054439e6699107..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/request.js +++ /dev/null @@ -1,282 +0,0 @@ -'use strict' -const { URL } = require('url') -const { Minipass } = require('minipass') -const Headers = require('./headers.js') -const { exportNodeCompatibleHeaders } = Headers -const Body = require('./body.js') -const { clone, extractContentType, getTotalBytes } = Body - -const version = require('../package.json').version -const defaultUserAgent = - `minipass-fetch/${version} (+https://github.com/isaacs/minipass-fetch)` - -const INTERNALS = Symbol('Request internals') - -const isRequest = input => - typeof input === 'object' && typeof input[INTERNALS] === 'object' - -const isAbortSignal = signal => { - const proto = ( - signal - && typeof signal === 'object' - && Object.getPrototypeOf(signal) - ) - return !!(proto && proto.constructor.name === 'AbortSignal') -} - -class Request extends Body { - constructor (input, init = {}) { - const parsedURL = isRequest(input) ? new URL(input.url) - : input && input.href ? new URL(input.href) - : new URL(`${input}`) - - if (isRequest(input)) { - init = { ...input[INTERNALS], ...init } - } else if (!input || typeof input === 'string') { - input = {} - } - - const method = (init.method || input.method || 'GET').toUpperCase() - const isGETHEAD = method === 'GET' || method === 'HEAD' - - if ((init.body !== null && init.body !== undefined || - isRequest(input) && input.body !== null) && isGETHEAD) { - throw new TypeError('Request with GET/HEAD method cannot have body') - } - - const inputBody = init.body !== null && init.body !== undefined ? init.body - : isRequest(input) && input.body !== null ? clone(input) - : null - - super(inputBody, { - timeout: init.timeout || input.timeout || 0, - size: init.size || input.size || 0, - }) - - const headers = new Headers(init.headers || input.headers || {}) - - if (inputBody !== null && inputBody !== undefined && - !headers.has('Content-Type')) { - const contentType = extractContentType(inputBody) - if (contentType) { - headers.append('Content-Type', contentType) - } - } - - const signal = 'signal' in init ? init.signal - : null - - if (signal !== null && signal !== undefined && !isAbortSignal(signal)) { - throw new TypeError('Expected signal must be an instanceof AbortSignal') - } - - // TLS specific options that are handled by node - const { - ca, - cert, - ciphers, - clientCertEngine, - crl, - dhparam, - ecdhCurve, - family, - honorCipherOrder, - key, - passphrase, - pfx, - rejectUnauthorized = process.env.NODE_TLS_REJECT_UNAUTHORIZED !== '0', - secureOptions, - secureProtocol, - servername, - sessionIdContext, - } = init - - this[INTERNALS] = { - method, - redirect: init.redirect || input.redirect || 'follow', - headers, - parsedURL, - signal, - ca, - cert, - ciphers, - clientCertEngine, - crl, - dhparam, - ecdhCurve, - family, - honorCipherOrder, - key, - passphrase, - pfx, - rejectUnauthorized, - secureOptions, - secureProtocol, - servername, - sessionIdContext, - } - - // node-fetch-only options - this.follow = init.follow !== undefined ? init.follow - : input.follow !== undefined ? input.follow - : 20 - this.compress = init.compress !== undefined ? init.compress - : input.compress !== undefined ? input.compress - : true - this.counter = init.counter || input.counter || 0 - this.agent = init.agent || input.agent - } - - get method () { - return this[INTERNALS].method - } - - get url () { - return this[INTERNALS].parsedURL.toString() - } - - get headers () { - return this[INTERNALS].headers - } - - get redirect () { - return this[INTERNALS].redirect - } - - get signal () { - return this[INTERNALS].signal - } - - clone () { - return new Request(this) - } - - get [Symbol.toStringTag] () { - return 'Request' - } - - static getNodeRequestOptions (request) { - const parsedURL = request[INTERNALS].parsedURL - const headers = new Headers(request[INTERNALS].headers) - - // fetch step 1.3 - if (!headers.has('Accept')) { - headers.set('Accept', '*/*') - } - - // Basic fetch - if (!/^https?:$/.test(parsedURL.protocol)) { - throw new TypeError('Only HTTP(S) protocols are supported') - } - - if (request.signal && - Minipass.isStream(request.body) && - typeof request.body.destroy !== 'function') { - throw new Error( - 'Cancellation of streamed requests with AbortSignal is not supported') - } - - // HTTP-network-or-cache fetch steps 2.4-2.7 - const contentLengthValue = - (request.body === null || request.body === undefined) && - /^(POST|PUT)$/i.test(request.method) ? '0' - : request.body !== null && request.body !== undefined - ? getTotalBytes(request) - : null - - if (contentLengthValue) { - headers.set('Content-Length', contentLengthValue + '') - } - - // HTTP-network-or-cache fetch step 2.11 - if (!headers.has('User-Agent')) { - headers.set('User-Agent', defaultUserAgent) - } - - // HTTP-network-or-cache fetch step 2.15 - if (request.compress && !headers.has('Accept-Encoding')) { - headers.set('Accept-Encoding', 'gzip,deflate') - } - - const agent = typeof request.agent === 'function' - ? request.agent(parsedURL) - : request.agent - - if (!headers.has('Connection') && !agent) { - headers.set('Connection', 'close') - } - - // TLS specific options that are handled by node - const { - ca, - cert, - ciphers, - clientCertEngine, - crl, - dhparam, - ecdhCurve, - family, - honorCipherOrder, - key, - passphrase, - pfx, - rejectUnauthorized, - secureOptions, - secureProtocol, - servername, - sessionIdContext, - } = request[INTERNALS] - - // HTTP-network fetch step 4.2 - // chunked encoding is handled by Node.js - - // we cannot spread parsedURL directly, so we have to read each property one-by-one - // and map them to the equivalent https?.request() method options - const urlProps = { - auth: parsedURL.username || parsedURL.password - ? `${parsedURL.username}:${parsedURL.password}` - : '', - host: parsedURL.host, - hostname: parsedURL.hostname, - path: `${parsedURL.pathname}${parsedURL.search}`, - port: parsedURL.port, - protocol: parsedURL.protocol, - } - - return { - ...urlProps, - method: request.method, - headers: exportNodeCompatibleHeaders(headers), - agent, - ca, - cert, - ciphers, - clientCertEngine, - crl, - dhparam, - ecdhCurve, - family, - honorCipherOrder, - key, - passphrase, - pfx, - rejectUnauthorized, - secureOptions, - secureProtocol, - servername, - sessionIdContext, - timeout: request.timeout, - } - } -} - -module.exports = Request - -Object.defineProperties(Request.prototype, { - method: { enumerable: true }, - url: { enumerable: true }, - headers: { enumerable: true }, - redirect: { enumerable: true }, - clone: { enumerable: true }, - signal: { enumerable: true }, -}) diff --git a/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/response.js b/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/response.js deleted file mode 100644 index 54cb52db3594a7..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/lib/response.js +++ /dev/null @@ -1,90 +0,0 @@ -'use strict' -const http = require('http') -const { STATUS_CODES } = http - -const Headers = require('./headers.js') -const Body = require('./body.js') -const { clone, extractContentType } = Body - -const INTERNALS = Symbol('Response internals') - -class Response extends Body { - constructor (body = null, opts = {}) { - super(body, opts) - - const status = opts.status || 200 - const headers = new Headers(opts.headers) - - if (body !== null && body !== undefined && !headers.has('Content-Type')) { - const contentType = extractContentType(body) - if (contentType) { - headers.append('Content-Type', contentType) - } - } - - this[INTERNALS] = { - url: opts.url, - status, - statusText: opts.statusText || STATUS_CODES[status], - headers, - counter: opts.counter, - trailer: Promise.resolve(opts.trailer || new Headers()), - } - } - - get trailer () { - return this[INTERNALS].trailer - } - - get url () { - return this[INTERNALS].url || '' - } - - get status () { - return this[INTERNALS].status - } - - get ok () { - return this[INTERNALS].status >= 200 && this[INTERNALS].status < 300 - } - - get redirected () { - return this[INTERNALS].counter > 0 - } - - get statusText () { - return this[INTERNALS].statusText - } - - get headers () { - return this[INTERNALS].headers - } - - clone () { - return new Response(clone(this), { - url: this.url, - status: this.status, - statusText: this.statusText, - headers: this.headers, - ok: this.ok, - redirected: this.redirected, - trailer: this.trailer, - }) - } - - get [Symbol.toStringTag] () { - return 'Response' - } -} - -module.exports = Response - -Object.defineProperties(Response.prototype, { - url: { enumerable: true }, - status: { enumerable: true }, - ok: { enumerable: true }, - redirected: { enumerable: true }, - statusText: { enumerable: true }, - headers: { enumerable: true }, - clone: { enumerable: true }, -}) diff --git a/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/package.json b/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/package.json deleted file mode 100644 index d491a7fba126d0..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/minipass-fetch/package.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "name": "minipass-fetch", - "version": "3.0.5", - "description": "An implementation of window.fetch in Node.js using Minipass streams", - "license": "MIT", - "main": "lib/index.js", - "scripts": { - "test:tls-fixtures": "./test/fixtures/tls/setup.sh", - "test": "tap", - "snap": "tap", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "lintfix": "npm run lint -- --fix", - "posttest": "npm run lint", - "template-oss-apply": "template-oss-apply --force" - }, - "tap": { - "coverage-map": "map.js", - "check-coverage": true, - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "@ungap/url-search-params": "^0.2.2", - "abort-controller": "^3.0.0", - "abortcontroller-polyfill": "~1.7.3", - "encoding": "^0.1.13", - "form-data": "^4.0.0", - "nock": "^13.2.4", - "parted": "^0.1.1", - "string-to-arraybuffer": "^1.0.2", - "tap": "^16.0.0" - }, - "dependencies": { - "minipass": "^7.0.3", - "minipass-sized": "^1.0.3", - "minizlib": "^2.1.2" - }, - "optionalDependencies": { - "encoding": "^0.1.13" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/minipass-fetch.git" - }, - "keywords": [ - "fetch", - "minipass", - "node-fetch", - "window.fetch" - ], - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "author": "GitHub Inc.", - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.22.0", - "publish": "true" - } -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/minizlib/LICENSE b/deps/npm/node_modules/node-gyp/node_modules/minizlib/LICENSE new file mode 100644 index 00000000000000..49f7efe431c9ea --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/minizlib/LICENSE @@ -0,0 +1,26 @@ +Minizlib was created by Isaac Z. Schlueter. +It is a derivative work of the Node.js project. + +""" +Copyright (c) 2017-2023 Isaac Z. Schlueter and Contributors +Copyright (c) 2017-2023 Node.js contributors. All rights reserved. +Copyright (c) 2017-2023 Joyent, Inc. and other Node contributors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +""" diff --git a/deps/npm/node_modules/node-gyp/node_modules/minizlib/dist/commonjs/constants.js b/deps/npm/node_modules/node-gyp/node_modules/minizlib/dist/commonjs/constants.js new file mode 100644 index 00000000000000..dfc2c1957bfc99 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/minizlib/dist/commonjs/constants.js @@ -0,0 +1,123 @@ +"use strict"; +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.constants = void 0; +// Update with any zlib constants that are added or changed in the future. +// Node v6 didn't export this, so we just hard code the version and rely +// on all the other hard-coded values from zlib v4736. When node v6 +// support drops, we can just export the realZlibConstants object. +const zlib_1 = __importDefault(require("zlib")); +/* c8 ignore start */ +const realZlibConstants = zlib_1.default.constants || { ZLIB_VERNUM: 4736 }; +/* c8 ignore stop */ +exports.constants = Object.freeze(Object.assign(Object.create(null), { + Z_NO_FLUSH: 0, + Z_PARTIAL_FLUSH: 1, + Z_SYNC_FLUSH: 2, + Z_FULL_FLUSH: 3, + Z_FINISH: 4, + Z_BLOCK: 5, + Z_OK: 0, + Z_STREAM_END: 1, + Z_NEED_DICT: 2, + Z_ERRNO: -1, + Z_STREAM_ERROR: -2, + Z_DATA_ERROR: -3, + Z_MEM_ERROR: -4, + Z_BUF_ERROR: -5, + Z_VERSION_ERROR: -6, + Z_NO_COMPRESSION: 0, + Z_BEST_SPEED: 1, + Z_BEST_COMPRESSION: 9, + Z_DEFAULT_COMPRESSION: -1, + Z_FILTERED: 1, + Z_HUFFMAN_ONLY: 2, + Z_RLE: 3, + Z_FIXED: 4, + Z_DEFAULT_STRATEGY: 0, + DEFLATE: 1, + INFLATE: 2, + GZIP: 3, + GUNZIP: 4, + DEFLATERAW: 5, + INFLATERAW: 6, + UNZIP: 7, + BROTLI_DECODE: 8, + BROTLI_ENCODE: 9, + Z_MIN_WINDOWBITS: 8, + Z_MAX_WINDOWBITS: 15, + Z_DEFAULT_WINDOWBITS: 15, + Z_MIN_CHUNK: 64, + Z_MAX_CHUNK: Infinity, + Z_DEFAULT_CHUNK: 16384, + Z_MIN_MEMLEVEL: 1, + Z_MAX_MEMLEVEL: 9, + Z_DEFAULT_MEMLEVEL: 8, + Z_MIN_LEVEL: -1, + Z_MAX_LEVEL: 9, + Z_DEFAULT_LEVEL: -1, + BROTLI_OPERATION_PROCESS: 0, + BROTLI_OPERATION_FLUSH: 1, + BROTLI_OPERATION_FINISH: 2, + BROTLI_OPERATION_EMIT_METADATA: 3, + BROTLI_MODE_GENERIC: 0, + BROTLI_MODE_TEXT: 1, + BROTLI_MODE_FONT: 2, + BROTLI_DEFAULT_MODE: 0, + BROTLI_MIN_QUALITY: 0, + BROTLI_MAX_QUALITY: 11, + BROTLI_DEFAULT_QUALITY: 11, + BROTLI_MIN_WINDOW_BITS: 10, + BROTLI_MAX_WINDOW_BITS: 24, + BROTLI_LARGE_MAX_WINDOW_BITS: 30, + BROTLI_DEFAULT_WINDOW: 22, + BROTLI_MIN_INPUT_BLOCK_BITS: 16, + BROTLI_MAX_INPUT_BLOCK_BITS: 24, + BROTLI_PARAM_MODE: 0, + BROTLI_PARAM_QUALITY: 1, + BROTLI_PARAM_LGWIN: 2, + BROTLI_PARAM_LGBLOCK: 3, + BROTLI_PARAM_DISABLE_LITERAL_CONTEXT_MODELING: 4, + BROTLI_PARAM_SIZE_HINT: 5, + BROTLI_PARAM_LARGE_WINDOW: 6, + BROTLI_PARAM_NPOSTFIX: 7, + BROTLI_PARAM_NDIRECT: 8, + BROTLI_DECODER_RESULT_ERROR: 0, + BROTLI_DECODER_RESULT_SUCCESS: 1, + BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT: 2, + BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT: 3, + BROTLI_DECODER_PARAM_DISABLE_RING_BUFFER_REALLOCATION: 0, + BROTLI_DECODER_PARAM_LARGE_WINDOW: 1, + BROTLI_DECODER_NO_ERROR: 0, + BROTLI_DECODER_SUCCESS: 1, + BROTLI_DECODER_NEEDS_MORE_INPUT: 2, + BROTLI_DECODER_NEEDS_MORE_OUTPUT: 3, + BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_NIBBLE: -1, + BROTLI_DECODER_ERROR_FORMAT_RESERVED: -2, + BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_META_NIBBLE: -3, + BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_ALPHABET: -4, + BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_SAME: -5, + BROTLI_DECODER_ERROR_FORMAT_CL_SPACE: -6, + BROTLI_DECODER_ERROR_FORMAT_HUFFMAN_SPACE: -7, + BROTLI_DECODER_ERROR_FORMAT_CONTEXT_MAP_REPEAT: -8, + BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_1: -9, + BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_2: -10, + BROTLI_DECODER_ERROR_FORMAT_TRANSFORM: -11, + BROTLI_DECODER_ERROR_FORMAT_DICTIONARY: -12, + BROTLI_DECODER_ERROR_FORMAT_WINDOW_BITS: -13, + BROTLI_DECODER_ERROR_FORMAT_PADDING_1: -14, + BROTLI_DECODER_ERROR_FORMAT_PADDING_2: -15, + BROTLI_DECODER_ERROR_FORMAT_DISTANCE: -16, + BROTLI_DECODER_ERROR_DICTIONARY_NOT_SET: -19, + BROTLI_DECODER_ERROR_INVALID_ARGUMENTS: -20, + BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MODES: -21, + BROTLI_DECODER_ERROR_ALLOC_TREE_GROUPS: -22, + BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MAP: -25, + BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_1: -26, + BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_2: -27, + BROTLI_DECODER_ERROR_ALLOC_BLOCK_TYPE_TREES: -30, + BROTLI_DECODER_ERROR_UNREACHABLE: -31, +}, realZlibConstants)); +//# sourceMappingURL=constants.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/minizlib/dist/commonjs/index.js b/deps/npm/node_modules/node-gyp/node_modules/minizlib/dist/commonjs/index.js new file mode 100644 index 00000000000000..ad65eef0495076 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/minizlib/dist/commonjs/index.js @@ -0,0 +1,352 @@ +"use strict"; +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.BrotliDecompress = exports.BrotliCompress = exports.Brotli = exports.Unzip = exports.InflateRaw = exports.DeflateRaw = exports.Gunzip = exports.Gzip = exports.Inflate = exports.Deflate = exports.Zlib = exports.ZlibError = exports.constants = void 0; +const assert_1 = __importDefault(require("assert")); +const buffer_1 = require("buffer"); +const minipass_1 = require("minipass"); +const zlib_1 = __importDefault(require("zlib")); +const constants_js_1 = require("./constants.js"); +var constants_js_2 = require("./constants.js"); +Object.defineProperty(exports, "constants", { enumerable: true, get: function () { return constants_js_2.constants; } }); +const OriginalBufferConcat = buffer_1.Buffer.concat; +const _superWrite = Symbol('_superWrite'); +class ZlibError extends Error { + code; + errno; + constructor(err) { + super('zlib: ' + err.message); + this.code = err.code; + this.errno = err.errno; + /* c8 ignore next */ + if (!this.code) + this.code = 'ZLIB_ERROR'; + this.message = 'zlib: ' + err.message; + Error.captureStackTrace(this, this.constructor); + } + get name() { + return 'ZlibError'; + } +} +exports.ZlibError = ZlibError; +// the Zlib class they all inherit from +// This thing manages the queue of requests, and returns +// true or false if there is anything in the queue when +// you call the .write() method. +const _flushFlag = Symbol('flushFlag'); +class ZlibBase extends minipass_1.Minipass { + #sawError = false; + #ended = false; + #flushFlag; + #finishFlushFlag; + #fullFlushFlag; + #handle; + #onError; + get sawError() { + return this.#sawError; + } + get handle() { + return this.#handle; + } + /* c8 ignore start */ + get flushFlag() { + return this.#flushFlag; + } + /* c8 ignore stop */ + constructor(opts, mode) { + if (!opts || typeof opts !== 'object') + throw new TypeError('invalid options for ZlibBase constructor'); + //@ts-ignore + super(opts); + /* c8 ignore start */ + this.#flushFlag = opts.flush ?? 0; + this.#finishFlushFlag = opts.finishFlush ?? 0; + this.#fullFlushFlag = opts.fullFlushFlag ?? 0; + /* c8 ignore stop */ + // this will throw if any options are invalid for the class selected + try { + // @types/node doesn't know that it exports the classes, but they're there + //@ts-ignore + this.#handle = new zlib_1.default[mode](opts); + } + catch (er) { + // make sure that all errors get decorated properly + throw new ZlibError(er); + } + this.#onError = err => { + // no sense raising multiple errors, since we abort on the first one. + if (this.#sawError) + return; + this.#sawError = true; + // there is no way to cleanly recover. + // continuing only obscures problems. + this.close(); + this.emit('error', err); + }; + this.#handle?.on('error', er => this.#onError(new ZlibError(er))); + this.once('end', () => this.close); + } + close() { + if (this.#handle) { + this.#handle.close(); + this.#handle = undefined; + this.emit('close'); + } + } + reset() { + if (!this.#sawError) { + (0, assert_1.default)(this.#handle, 'zlib binding closed'); + //@ts-ignore + return this.#handle.reset?.(); + } + } + flush(flushFlag) { + if (this.ended) + return; + if (typeof flushFlag !== 'number') + flushFlag = this.#fullFlushFlag; + this.write(Object.assign(buffer_1.Buffer.alloc(0), { [_flushFlag]: flushFlag })); + } + end(chunk, encoding, cb) { + /* c8 ignore start */ + if (typeof chunk === 'function') { + cb = chunk; + encoding = undefined; + chunk = undefined; + } + if (typeof encoding === 'function') { + cb = encoding; + encoding = undefined; + } + /* c8 ignore stop */ + if (chunk) { + if (encoding) + this.write(chunk, encoding); + else + this.write(chunk); + } + this.flush(this.#finishFlushFlag); + this.#ended = true; + return super.end(cb); + } + get ended() { + return this.#ended; + } + // overridden in the gzip classes to do portable writes + [_superWrite](data) { + return super.write(data); + } + write(chunk, encoding, cb) { + // process the chunk using the sync process + // then super.write() all the outputted chunks + if (typeof encoding === 'function') + (cb = encoding), (encoding = 'utf8'); + if (typeof chunk === 'string') + chunk = buffer_1.Buffer.from(chunk, encoding); + if (this.#sawError) + return; + (0, assert_1.default)(this.#handle, 'zlib binding closed'); + // _processChunk tries to .close() the native handle after it's done, so we + // intercept that by temporarily making it a no-op. + // diving into the node:zlib internals a bit here + const nativeHandle = this.#handle + ._handle; + const originalNativeClose = nativeHandle.close; + nativeHandle.close = () => { }; + const originalClose = this.#handle.close; + this.#handle.close = () => { }; + // It also calls `Buffer.concat()` at the end, which may be convenient + // for some, but which we are not interested in as it slows us down. + buffer_1.Buffer.concat = args => args; + let result = undefined; + try { + const flushFlag = typeof chunk[_flushFlag] === 'number' + ? chunk[_flushFlag] + : this.#flushFlag; + result = this.#handle._processChunk(chunk, flushFlag); + // if we don't throw, reset it back how it was + buffer_1.Buffer.concat = OriginalBufferConcat; + } + catch (err) { + // or if we do, put Buffer.concat() back before we emit error + // Error events call into user code, which may call Buffer.concat() + buffer_1.Buffer.concat = OriginalBufferConcat; + this.#onError(new ZlibError(err)); + } + finally { + if (this.#handle) { + // Core zlib resets `_handle` to null after attempting to close the + // native handle. Our no-op handler prevented actual closure, but we + // need to restore the `._handle` property. + ; + this.#handle._handle = + nativeHandle; + nativeHandle.close = originalNativeClose; + this.#handle.close = originalClose; + // `_processChunk()` adds an 'error' listener. If we don't remove it + // after each call, these handlers start piling up. + this.#handle.removeAllListeners('error'); + // make sure OUR error listener is still attached tho + } + } + if (this.#handle) + this.#handle.on('error', er => this.#onError(new ZlibError(er))); + let writeReturn; + if (result) { + if (Array.isArray(result) && result.length > 0) { + const r = result[0]; + // The first buffer is always `handle._outBuffer`, which would be + // re-used for later invocations; so, we always have to copy that one. + writeReturn = this[_superWrite](buffer_1.Buffer.from(r)); + for (let i = 1; i < result.length; i++) { + writeReturn = this[_superWrite](result[i]); + } + } + else { + // either a single Buffer or an empty array + writeReturn = this[_superWrite](buffer_1.Buffer.from(result)); + } + } + if (cb) + cb(); + return writeReturn; + } +} +class Zlib extends ZlibBase { + #level; + #strategy; + constructor(opts, mode) { + opts = opts || {}; + opts.flush = opts.flush || constants_js_1.constants.Z_NO_FLUSH; + opts.finishFlush = opts.finishFlush || constants_js_1.constants.Z_FINISH; + opts.fullFlushFlag = constants_js_1.constants.Z_FULL_FLUSH; + super(opts, mode); + this.#level = opts.level; + this.#strategy = opts.strategy; + } + params(level, strategy) { + if (this.sawError) + return; + if (!this.handle) + throw new Error('cannot switch params when binding is closed'); + // no way to test this without also not supporting params at all + /* c8 ignore start */ + if (!this.handle.params) + throw new Error('not supported in this implementation'); + /* c8 ignore stop */ + if (this.#level !== level || this.#strategy !== strategy) { + this.flush(constants_js_1.constants.Z_SYNC_FLUSH); + (0, assert_1.default)(this.handle, 'zlib binding closed'); + // .params() calls .flush(), but the latter is always async in the + // core zlib. We override .flush() temporarily to intercept that and + // flush synchronously. + const origFlush = this.handle.flush; + this.handle.flush = (flushFlag, cb) => { + /* c8 ignore start */ + if (typeof flushFlag === 'function') { + cb = flushFlag; + flushFlag = this.flushFlag; + } + /* c8 ignore stop */ + this.flush(flushFlag); + cb?.(); + }; + try { + ; + this.handle.params(level, strategy); + } + finally { + this.handle.flush = origFlush; + } + /* c8 ignore start */ + if (this.handle) { + this.#level = level; + this.#strategy = strategy; + } + /* c8 ignore stop */ + } + } +} +exports.Zlib = Zlib; +// minimal 2-byte header +class Deflate extends Zlib { + constructor(opts) { + super(opts, 'Deflate'); + } +} +exports.Deflate = Deflate; +class Inflate extends Zlib { + constructor(opts) { + super(opts, 'Inflate'); + } +} +exports.Inflate = Inflate; +class Gzip extends Zlib { + #portable; + constructor(opts) { + super(opts, 'Gzip'); + this.#portable = opts && !!opts.portable; + } + [_superWrite](data) { + if (!this.#portable) + return super[_superWrite](data); + // we'll always get the header emitted in one first chunk + // overwrite the OS indicator byte with 0xFF + this.#portable = false; + data[9] = 255; + return super[_superWrite](data); + } +} +exports.Gzip = Gzip; +class Gunzip extends Zlib { + constructor(opts) { + super(opts, 'Gunzip'); + } +} +exports.Gunzip = Gunzip; +// raw - no header +class DeflateRaw extends Zlib { + constructor(opts) { + super(opts, 'DeflateRaw'); + } +} +exports.DeflateRaw = DeflateRaw; +class InflateRaw extends Zlib { + constructor(opts) { + super(opts, 'InflateRaw'); + } +} +exports.InflateRaw = InflateRaw; +// auto-detect header. +class Unzip extends Zlib { + constructor(opts) { + super(opts, 'Unzip'); + } +} +exports.Unzip = Unzip; +class Brotli extends ZlibBase { + constructor(opts, mode) { + opts = opts || {}; + opts.flush = opts.flush || constants_js_1.constants.BROTLI_OPERATION_PROCESS; + opts.finishFlush = + opts.finishFlush || constants_js_1.constants.BROTLI_OPERATION_FINISH; + opts.fullFlushFlag = constants_js_1.constants.BROTLI_OPERATION_FLUSH; + super(opts, mode); + } +} +exports.Brotli = Brotli; +class BrotliCompress extends Brotli { + constructor(opts) { + super(opts, 'BrotliCompress'); + } +} +exports.BrotliCompress = BrotliCompress; +class BrotliDecompress extends Brotli { + constructor(opts) { + super(opts, 'BrotliDecompress'); + } +} +exports.BrotliDecompress = BrotliDecompress; +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/minizlib/dist/commonjs/package.json b/deps/npm/node_modules/node-gyp/node_modules/minizlib/dist/commonjs/package.json new file mode 100644 index 00000000000000..5bbefffbabee39 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/minizlib/dist/commonjs/package.json @@ -0,0 +1,3 @@ +{ + "type": "commonjs" +} diff --git a/deps/npm/node_modules/node-gyp/node_modules/minizlib/dist/esm/constants.js b/deps/npm/node_modules/node-gyp/node_modules/minizlib/dist/esm/constants.js new file mode 100644 index 00000000000000..7faf40be5068d0 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/minizlib/dist/esm/constants.js @@ -0,0 +1,117 @@ +// Update with any zlib constants that are added or changed in the future. +// Node v6 didn't export this, so we just hard code the version and rely +// on all the other hard-coded values from zlib v4736. When node v6 +// support drops, we can just export the realZlibConstants object. +import realZlib from 'zlib'; +/* c8 ignore start */ +const realZlibConstants = realZlib.constants || { ZLIB_VERNUM: 4736 }; +/* c8 ignore stop */ +export const constants = Object.freeze(Object.assign(Object.create(null), { + Z_NO_FLUSH: 0, + Z_PARTIAL_FLUSH: 1, + Z_SYNC_FLUSH: 2, + Z_FULL_FLUSH: 3, + Z_FINISH: 4, + Z_BLOCK: 5, + Z_OK: 0, + Z_STREAM_END: 1, + Z_NEED_DICT: 2, + Z_ERRNO: -1, + Z_STREAM_ERROR: -2, + Z_DATA_ERROR: -3, + Z_MEM_ERROR: -4, + Z_BUF_ERROR: -5, + Z_VERSION_ERROR: -6, + Z_NO_COMPRESSION: 0, + Z_BEST_SPEED: 1, + Z_BEST_COMPRESSION: 9, + Z_DEFAULT_COMPRESSION: -1, + Z_FILTERED: 1, + Z_HUFFMAN_ONLY: 2, + Z_RLE: 3, + Z_FIXED: 4, + Z_DEFAULT_STRATEGY: 0, + DEFLATE: 1, + INFLATE: 2, + GZIP: 3, + GUNZIP: 4, + DEFLATERAW: 5, + INFLATERAW: 6, + UNZIP: 7, + BROTLI_DECODE: 8, + BROTLI_ENCODE: 9, + Z_MIN_WINDOWBITS: 8, + Z_MAX_WINDOWBITS: 15, + Z_DEFAULT_WINDOWBITS: 15, + Z_MIN_CHUNK: 64, + Z_MAX_CHUNK: Infinity, + Z_DEFAULT_CHUNK: 16384, + Z_MIN_MEMLEVEL: 1, + Z_MAX_MEMLEVEL: 9, + Z_DEFAULT_MEMLEVEL: 8, + Z_MIN_LEVEL: -1, + Z_MAX_LEVEL: 9, + Z_DEFAULT_LEVEL: -1, + BROTLI_OPERATION_PROCESS: 0, + BROTLI_OPERATION_FLUSH: 1, + BROTLI_OPERATION_FINISH: 2, + BROTLI_OPERATION_EMIT_METADATA: 3, + BROTLI_MODE_GENERIC: 0, + BROTLI_MODE_TEXT: 1, + BROTLI_MODE_FONT: 2, + BROTLI_DEFAULT_MODE: 0, + BROTLI_MIN_QUALITY: 0, + BROTLI_MAX_QUALITY: 11, + BROTLI_DEFAULT_QUALITY: 11, + BROTLI_MIN_WINDOW_BITS: 10, + BROTLI_MAX_WINDOW_BITS: 24, + BROTLI_LARGE_MAX_WINDOW_BITS: 30, + BROTLI_DEFAULT_WINDOW: 22, + BROTLI_MIN_INPUT_BLOCK_BITS: 16, + BROTLI_MAX_INPUT_BLOCK_BITS: 24, + BROTLI_PARAM_MODE: 0, + BROTLI_PARAM_QUALITY: 1, + BROTLI_PARAM_LGWIN: 2, + BROTLI_PARAM_LGBLOCK: 3, + BROTLI_PARAM_DISABLE_LITERAL_CONTEXT_MODELING: 4, + BROTLI_PARAM_SIZE_HINT: 5, + BROTLI_PARAM_LARGE_WINDOW: 6, + BROTLI_PARAM_NPOSTFIX: 7, + BROTLI_PARAM_NDIRECT: 8, + BROTLI_DECODER_RESULT_ERROR: 0, + BROTLI_DECODER_RESULT_SUCCESS: 1, + BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT: 2, + BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT: 3, + BROTLI_DECODER_PARAM_DISABLE_RING_BUFFER_REALLOCATION: 0, + BROTLI_DECODER_PARAM_LARGE_WINDOW: 1, + BROTLI_DECODER_NO_ERROR: 0, + BROTLI_DECODER_SUCCESS: 1, + BROTLI_DECODER_NEEDS_MORE_INPUT: 2, + BROTLI_DECODER_NEEDS_MORE_OUTPUT: 3, + BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_NIBBLE: -1, + BROTLI_DECODER_ERROR_FORMAT_RESERVED: -2, + BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_META_NIBBLE: -3, + BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_ALPHABET: -4, + BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_SAME: -5, + BROTLI_DECODER_ERROR_FORMAT_CL_SPACE: -6, + BROTLI_DECODER_ERROR_FORMAT_HUFFMAN_SPACE: -7, + BROTLI_DECODER_ERROR_FORMAT_CONTEXT_MAP_REPEAT: -8, + BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_1: -9, + BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_2: -10, + BROTLI_DECODER_ERROR_FORMAT_TRANSFORM: -11, + BROTLI_DECODER_ERROR_FORMAT_DICTIONARY: -12, + BROTLI_DECODER_ERROR_FORMAT_WINDOW_BITS: -13, + BROTLI_DECODER_ERROR_FORMAT_PADDING_1: -14, + BROTLI_DECODER_ERROR_FORMAT_PADDING_2: -15, + BROTLI_DECODER_ERROR_FORMAT_DISTANCE: -16, + BROTLI_DECODER_ERROR_DICTIONARY_NOT_SET: -19, + BROTLI_DECODER_ERROR_INVALID_ARGUMENTS: -20, + BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MODES: -21, + BROTLI_DECODER_ERROR_ALLOC_TREE_GROUPS: -22, + BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MAP: -25, + BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_1: -26, + BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_2: -27, + BROTLI_DECODER_ERROR_ALLOC_BLOCK_TYPE_TREES: -30, + BROTLI_DECODER_ERROR_UNREACHABLE: -31, +}, realZlibConstants)); +//# sourceMappingURL=constants.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/minizlib/dist/esm/index.js b/deps/npm/node_modules/node-gyp/node_modules/minizlib/dist/esm/index.js new file mode 100644 index 00000000000000..a6269b505f47cc --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/minizlib/dist/esm/index.js @@ -0,0 +1,333 @@ +import assert from 'assert'; +import { Buffer } from 'buffer'; +import { Minipass } from 'minipass'; +import realZlib from 'zlib'; +import { constants } from './constants.js'; +export { constants } from './constants.js'; +const OriginalBufferConcat = Buffer.concat; +const _superWrite = Symbol('_superWrite'); +export class ZlibError extends Error { + code; + errno; + constructor(err) { + super('zlib: ' + err.message); + this.code = err.code; + this.errno = err.errno; + /* c8 ignore next */ + if (!this.code) + this.code = 'ZLIB_ERROR'; + this.message = 'zlib: ' + err.message; + Error.captureStackTrace(this, this.constructor); + } + get name() { + return 'ZlibError'; + } +} +// the Zlib class they all inherit from +// This thing manages the queue of requests, and returns +// true or false if there is anything in the queue when +// you call the .write() method. +const _flushFlag = Symbol('flushFlag'); +class ZlibBase extends Minipass { + #sawError = false; + #ended = false; + #flushFlag; + #finishFlushFlag; + #fullFlushFlag; + #handle; + #onError; + get sawError() { + return this.#sawError; + } + get handle() { + return this.#handle; + } + /* c8 ignore start */ + get flushFlag() { + return this.#flushFlag; + } + /* c8 ignore stop */ + constructor(opts, mode) { + if (!opts || typeof opts !== 'object') + throw new TypeError('invalid options for ZlibBase constructor'); + //@ts-ignore + super(opts); + /* c8 ignore start */ + this.#flushFlag = opts.flush ?? 0; + this.#finishFlushFlag = opts.finishFlush ?? 0; + this.#fullFlushFlag = opts.fullFlushFlag ?? 0; + /* c8 ignore stop */ + // this will throw if any options are invalid for the class selected + try { + // @types/node doesn't know that it exports the classes, but they're there + //@ts-ignore + this.#handle = new realZlib[mode](opts); + } + catch (er) { + // make sure that all errors get decorated properly + throw new ZlibError(er); + } + this.#onError = err => { + // no sense raising multiple errors, since we abort on the first one. + if (this.#sawError) + return; + this.#sawError = true; + // there is no way to cleanly recover. + // continuing only obscures problems. + this.close(); + this.emit('error', err); + }; + this.#handle?.on('error', er => this.#onError(new ZlibError(er))); + this.once('end', () => this.close); + } + close() { + if (this.#handle) { + this.#handle.close(); + this.#handle = undefined; + this.emit('close'); + } + } + reset() { + if (!this.#sawError) { + assert(this.#handle, 'zlib binding closed'); + //@ts-ignore + return this.#handle.reset?.(); + } + } + flush(flushFlag) { + if (this.ended) + return; + if (typeof flushFlag !== 'number') + flushFlag = this.#fullFlushFlag; + this.write(Object.assign(Buffer.alloc(0), { [_flushFlag]: flushFlag })); + } + end(chunk, encoding, cb) { + /* c8 ignore start */ + if (typeof chunk === 'function') { + cb = chunk; + encoding = undefined; + chunk = undefined; + } + if (typeof encoding === 'function') { + cb = encoding; + encoding = undefined; + } + /* c8 ignore stop */ + if (chunk) { + if (encoding) + this.write(chunk, encoding); + else + this.write(chunk); + } + this.flush(this.#finishFlushFlag); + this.#ended = true; + return super.end(cb); + } + get ended() { + return this.#ended; + } + // overridden in the gzip classes to do portable writes + [_superWrite](data) { + return super.write(data); + } + write(chunk, encoding, cb) { + // process the chunk using the sync process + // then super.write() all the outputted chunks + if (typeof encoding === 'function') + (cb = encoding), (encoding = 'utf8'); + if (typeof chunk === 'string') + chunk = Buffer.from(chunk, encoding); + if (this.#sawError) + return; + assert(this.#handle, 'zlib binding closed'); + // _processChunk tries to .close() the native handle after it's done, so we + // intercept that by temporarily making it a no-op. + // diving into the node:zlib internals a bit here + const nativeHandle = this.#handle + ._handle; + const originalNativeClose = nativeHandle.close; + nativeHandle.close = () => { }; + const originalClose = this.#handle.close; + this.#handle.close = () => { }; + // It also calls `Buffer.concat()` at the end, which may be convenient + // for some, but which we are not interested in as it slows us down. + Buffer.concat = args => args; + let result = undefined; + try { + const flushFlag = typeof chunk[_flushFlag] === 'number' + ? chunk[_flushFlag] + : this.#flushFlag; + result = this.#handle._processChunk(chunk, flushFlag); + // if we don't throw, reset it back how it was + Buffer.concat = OriginalBufferConcat; + } + catch (err) { + // or if we do, put Buffer.concat() back before we emit error + // Error events call into user code, which may call Buffer.concat() + Buffer.concat = OriginalBufferConcat; + this.#onError(new ZlibError(err)); + } + finally { + if (this.#handle) { + // Core zlib resets `_handle` to null after attempting to close the + // native handle. Our no-op handler prevented actual closure, but we + // need to restore the `._handle` property. + ; + this.#handle._handle = + nativeHandle; + nativeHandle.close = originalNativeClose; + this.#handle.close = originalClose; + // `_processChunk()` adds an 'error' listener. If we don't remove it + // after each call, these handlers start piling up. + this.#handle.removeAllListeners('error'); + // make sure OUR error listener is still attached tho + } + } + if (this.#handle) + this.#handle.on('error', er => this.#onError(new ZlibError(er))); + let writeReturn; + if (result) { + if (Array.isArray(result) && result.length > 0) { + const r = result[0]; + // The first buffer is always `handle._outBuffer`, which would be + // re-used for later invocations; so, we always have to copy that one. + writeReturn = this[_superWrite](Buffer.from(r)); + for (let i = 1; i < result.length; i++) { + writeReturn = this[_superWrite](result[i]); + } + } + else { + // either a single Buffer or an empty array + writeReturn = this[_superWrite](Buffer.from(result)); + } + } + if (cb) + cb(); + return writeReturn; + } +} +export class Zlib extends ZlibBase { + #level; + #strategy; + constructor(opts, mode) { + opts = opts || {}; + opts.flush = opts.flush || constants.Z_NO_FLUSH; + opts.finishFlush = opts.finishFlush || constants.Z_FINISH; + opts.fullFlushFlag = constants.Z_FULL_FLUSH; + super(opts, mode); + this.#level = opts.level; + this.#strategy = opts.strategy; + } + params(level, strategy) { + if (this.sawError) + return; + if (!this.handle) + throw new Error('cannot switch params when binding is closed'); + // no way to test this without also not supporting params at all + /* c8 ignore start */ + if (!this.handle.params) + throw new Error('not supported in this implementation'); + /* c8 ignore stop */ + if (this.#level !== level || this.#strategy !== strategy) { + this.flush(constants.Z_SYNC_FLUSH); + assert(this.handle, 'zlib binding closed'); + // .params() calls .flush(), but the latter is always async in the + // core zlib. We override .flush() temporarily to intercept that and + // flush synchronously. + const origFlush = this.handle.flush; + this.handle.flush = (flushFlag, cb) => { + /* c8 ignore start */ + if (typeof flushFlag === 'function') { + cb = flushFlag; + flushFlag = this.flushFlag; + } + /* c8 ignore stop */ + this.flush(flushFlag); + cb?.(); + }; + try { + ; + this.handle.params(level, strategy); + } + finally { + this.handle.flush = origFlush; + } + /* c8 ignore start */ + if (this.handle) { + this.#level = level; + this.#strategy = strategy; + } + /* c8 ignore stop */ + } + } +} +// minimal 2-byte header +export class Deflate extends Zlib { + constructor(opts) { + super(opts, 'Deflate'); + } +} +export class Inflate extends Zlib { + constructor(opts) { + super(opts, 'Inflate'); + } +} +export class Gzip extends Zlib { + #portable; + constructor(opts) { + super(opts, 'Gzip'); + this.#portable = opts && !!opts.portable; + } + [_superWrite](data) { + if (!this.#portable) + return super[_superWrite](data); + // we'll always get the header emitted in one first chunk + // overwrite the OS indicator byte with 0xFF + this.#portable = false; + data[9] = 255; + return super[_superWrite](data); + } +} +export class Gunzip extends Zlib { + constructor(opts) { + super(opts, 'Gunzip'); + } +} +// raw - no header +export class DeflateRaw extends Zlib { + constructor(opts) { + super(opts, 'DeflateRaw'); + } +} +export class InflateRaw extends Zlib { + constructor(opts) { + super(opts, 'InflateRaw'); + } +} +// auto-detect header. +export class Unzip extends Zlib { + constructor(opts) { + super(opts, 'Unzip'); + } +} +export class Brotli extends ZlibBase { + constructor(opts, mode) { + opts = opts || {}; + opts.flush = opts.flush || constants.BROTLI_OPERATION_PROCESS; + opts.finishFlush = + opts.finishFlush || constants.BROTLI_OPERATION_FINISH; + opts.fullFlushFlag = constants.BROTLI_OPERATION_FLUSH; + super(opts, mode); + } +} +export class BrotliCompress extends Brotli { + constructor(opts) { + super(opts, 'BrotliCompress'); + } +} +export class BrotliDecompress extends Brotli { + constructor(opts) { + super(opts, 'BrotliDecompress'); + } +} +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/minizlib/dist/esm/package.json b/deps/npm/node_modules/node-gyp/node_modules/minizlib/dist/esm/package.json new file mode 100644 index 00000000000000..3dbc1ca591c055 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/minizlib/dist/esm/package.json @@ -0,0 +1,3 @@ +{ + "type": "module" +} diff --git a/deps/npm/node_modules/node-gyp/node_modules/minizlib/package.json b/deps/npm/node_modules/node-gyp/node_modules/minizlib/package.json new file mode 100644 index 00000000000000..e94623ff43d353 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/minizlib/package.json @@ -0,0 +1,81 @@ +{ + "name": "minizlib", + "version": "3.0.1", + "description": "A small fast zlib stream built on [minipass](http://npm.im/minipass) and Node.js's zlib binding.", + "main": "./dist/commonjs/index.js", + "dependencies": { + "minipass": "^7.0.4", + "rimraf": "^5.0.5" + }, + "scripts": { + "prepare": "tshy", + "pretest": "npm run prepare", + "test": "tap", + "preversion": "npm test", + "postversion": "npm publish", + "prepublishOnly": "git push origin --follow-tags", + "format": "prettier --write . --loglevel warn", + "typedoc": "typedoc --tsconfig .tshy/esm.json ./src/*.ts" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/isaacs/minizlib.git" + }, + "keywords": [ + "zlib", + "gzip", + "gunzip", + "deflate", + "inflate", + "compression", + "zip", + "unzip" + ], + "author": "Isaac Z. Schlueter (http://blog.izs.me/)", + "license": "MIT", + "devDependencies": { + "@types/node": "^20.11.29", + "mkdirp": "^3.0.1", + "tap": "^18.7.1", + "tshy": "^1.12.0", + "typedoc": "^0.25.12" + }, + "files": [ + "dist" + ], + "engines": { + "node": ">= 18" + }, + "tshy": { + "exports": { + "./package.json": "./package.json", + ".": "./src/index.ts" + } + }, + "exports": { + "./package.json": "./package.json", + ".": { + "import": { + "types": "./dist/esm/index.d.ts", + "default": "./dist/esm/index.js" + }, + "require": { + "types": "./dist/commonjs/index.d.ts", + "default": "./dist/commonjs/index.js" + } + } + }, + "types": "./dist/commonjs/index.d.ts", + "type": "module", + "prettier": { + "semi": false, + "printWidth": 75, + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "jsxSingleQuote": false, + "bracketSameLine": true, + "arrowParens": "avoid", + "endOfLine": "lf" + } +} diff --git a/deps/npm/node_modules/is-lambda/LICENSE b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/LICENSE similarity index 79% rename from deps/npm/node_modules/is-lambda/LICENSE rename to deps/npm/node_modules/node-gyp/node_modules/mkdirp/LICENSE index 4a59c94175c2a3..0a034db7a73b5d 100644 --- a/deps/npm/node_modules/is-lambda/LICENSE +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/LICENSE @@ -1,6 +1,6 @@ -The MIT License (MIT) +Copyright (c) 2011-2023 James Halliday (mail@substack.net) and Isaac Z. Schlueter (i@izs.me) -Copyright (c) 2016-2017 Thomas Watson Steen +This project is free software released under the MIT license: Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -9,13 +9,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/package.json b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/package.json new file mode 100644 index 00000000000000..9d04a66e16cd93 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/package.json @@ -0,0 +1,91 @@ +{ + "name": "mkdirp", + "description": "Recursively mkdir, like `mkdir -p`", + "version": "3.0.1", + "keywords": [ + "mkdir", + "directory", + "make dir", + "make", + "dir", + "recursive", + "native" + ], + "bin": "./dist/cjs/src/bin.js", + "main": "./dist/cjs/src/index.js", + "module": "./dist/mjs/index.js", + "types": "./dist/mjs/index.d.ts", + "exports": { + ".": { + "import": { + "types": "./dist/mjs/index.d.ts", + "default": "./dist/mjs/index.js" + }, + "require": { + "types": "./dist/cjs/src/index.d.ts", + "default": "./dist/cjs/src/index.js" + } + } + }, + "files": [ + "dist" + ], + "scripts": { + "preversion": "npm test", + "postversion": "npm publish", + "prepublishOnly": "git push origin --follow-tags", + "preprepare": "rm -rf dist", + "prepare": "tsc -p tsconfig.json && tsc -p tsconfig-esm.json", + "postprepare": "bash fixup.sh", + "pretest": "npm run prepare", + "presnap": "npm run prepare", + "test": "c8 tap", + "snap": "c8 tap", + "format": "prettier --write . --loglevel warn", + "benchmark": "node benchmark/index.js", + "typedoc": "typedoc --tsconfig tsconfig-esm.json ./src/*.ts" + }, + "prettier": { + "semi": false, + "printWidth": 80, + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "jsxSingleQuote": false, + "bracketSameLine": true, + "arrowParens": "avoid", + "endOfLine": "lf" + }, + "devDependencies": { + "@types/brace-expansion": "^1.1.0", + "@types/node": "^18.11.9", + "@types/tap": "^15.0.7", + "c8": "^7.12.0", + "eslint-config-prettier": "^8.6.0", + "prettier": "^2.8.2", + "tap": "^16.3.3", + "ts-node": "^10.9.1", + "typedoc": "^0.23.21", + "typescript": "^4.9.3" + }, + "tap": { + "coverage": false, + "node-arg": [ + "--no-warnings", + "--loader", + "ts-node/esm" + ], + "ts": false + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "repository": { + "type": "git", + "url": "https://github.com/isaacs/node-mkdirp.git" + }, + "license": "MIT", + "engines": { + "node": ">=10" + } +} diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/bin.d.ts b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/bin.d.ts new file mode 100644 index 00000000000000..34e005228653c8 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/bin.d.ts @@ -0,0 +1,3 @@ +#!/usr/bin/env node +export {}; +//# sourceMappingURL=bin.d.ts.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/bin.d.ts.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/bin.d.ts.map new file mode 100644 index 00000000000000..c10c656ec75109 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/bin.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"bin.d.ts","sourceRoot":"","sources":["../../../src/bin.ts"],"names":[],"mappings":""} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/bin.js b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/bin.js new file mode 100755 index 00000000000000..757aae1fd96cb2 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/bin.js @@ -0,0 +1,80 @@ +#!/usr/bin/env node +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +const package_json_1 = require("../package.json"); +const usage = () => ` +usage: mkdirp [DIR1,DIR2..] {OPTIONS} + + Create each supplied directory including any necessary parent directories + that don't yet exist. + + If the directory already exists, do nothing. + +OPTIONS are: + + -m If a directory needs to be created, set the mode as an octal + --mode= permission string. + + -v --version Print the mkdirp version number + + -h --help Print this helpful banner + + -p --print Print the first directories created for each path provided + + --manual Use manual implementation, even if native is available +`; +const dirs = []; +const opts = {}; +let doPrint = false; +let dashdash = false; +let manual = false; +for (const arg of process.argv.slice(2)) { + if (dashdash) + dirs.push(arg); + else if (arg === '--') + dashdash = true; + else if (arg === '--manual') + manual = true; + else if (/^-h/.test(arg) || /^--help/.test(arg)) { + console.log(usage()); + process.exit(0); + } + else if (arg === '-v' || arg === '--version') { + console.log(package_json_1.version); + process.exit(0); + } + else if (arg === '-p' || arg === '--print') { + doPrint = true; + } + else if (/^-m/.test(arg) || /^--mode=/.test(arg)) { + // these don't get covered in CI, but work locally + // weird because the tests below show as passing in the output. + /* c8 ignore start */ + const mode = parseInt(arg.replace(/^(-m|--mode=)/, ''), 8); + if (isNaN(mode)) { + console.error(`invalid mode argument: ${arg}\nMust be an octal number.`); + process.exit(1); + } + /* c8 ignore stop */ + opts.mode = mode; + } + else + dirs.push(arg); +} +const index_js_1 = require("./index.js"); +const impl = manual ? index_js_1.mkdirp.manual : index_js_1.mkdirp; +if (dirs.length === 0) { + console.error(usage()); +} +// these don't get covered in CI, but work locally +/* c8 ignore start */ +Promise.all(dirs.map(dir => impl(dir, opts))) + .then(made => (doPrint ? made.forEach(m => m && console.log(m)) : null)) + .catch(er => { + console.error(er.message); + if (er.code) + console.error(' code: ' + er.code); + process.exit(1); +}); +/* c8 ignore stop */ +//# sourceMappingURL=bin.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/bin.js.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/bin.js.map new file mode 100644 index 00000000000000..d99295301b5fa7 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/bin.js.map @@ -0,0 +1 @@ +{"version":3,"file":"bin.js","sourceRoot":"","sources":["../../../src/bin.ts"],"names":[],"mappings":";;;AAEA,kDAAyC;AAGzC,MAAM,KAAK,GAAG,GAAG,EAAE,CAAC;;;;;;;;;;;;;;;;;;;;CAoBnB,CAAA;AAED,MAAM,IAAI,GAAa,EAAE,CAAA;AACzB,MAAM,IAAI,GAAkB,EAAE,CAAA;AAC9B,IAAI,OAAO,GAAY,KAAK,CAAA;AAC5B,IAAI,QAAQ,GAAG,KAAK,CAAA;AACpB,IAAI,MAAM,GAAG,KAAK,CAAA;AAClB,KAAK,MAAM,GAAG,IAAI,OAAO,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE;IACvC,IAAI,QAAQ;QAAE,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,CAAA;SACvB,IAAI,GAAG,KAAK,IAAI;QAAE,QAAQ,GAAG,IAAI,CAAA;SACjC,IAAI,GAAG,KAAK,UAAU;QAAE,MAAM,GAAG,IAAI,CAAA;SACrC,IAAI,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,SAAS,CAAC,IAAI,CAAC,GAAG,CAAC,EAAE;QAC/C,OAAO,CAAC,GAAG,CAAC,KAAK,EAAE,CAAC,CAAA;QACpB,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;KAChB;SAAM,IAAI,GAAG,KAAK,IAAI,IAAI,GAAG,KAAK,WAAW,EAAE;QAC9C,OAAO,CAAC,GAAG,CAAC,sBAAO,CAAC,CAAA;QACpB,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;KAChB;SAAM,IAAI,GAAG,KAAK,IAAI,IAAI,GAAG,KAAK,SAAS,EAAE;QAC5C,OAAO,GAAG,IAAI,CAAA;KACf;SAAM,IAAI,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,UAAU,CAAC,IAAI,CAAC,GAAG,CAAC,EAAE;QAClD,kDAAkD;QAClD,+DAA+D;QAC/D,qBAAqB;QACrB,MAAM,IAAI,GAAG,QAAQ,CAAC,GAAG,CAAC,OAAO,CAAC,eAAe,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAA;QAC1D,IAAI,KAAK,CAAC,IAAI,CAAC,EAAE;YACf,OAAO,CAAC,KAAK,CAAC,0BAA0B,GAAG,4BAA4B,CAAC,CAAA;YACxE,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;SAChB;QACD,oBAAoB;QACpB,IAAI,CAAC,IAAI,GAAG,IAAI,CAAA;KACjB;;QAAM,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,CAAA;CACtB;AAED,yCAAmC;AACnC,MAAM,IAAI,GAAG,MAAM,CAAC,CAAC,CAAC,iBAAM,CAAC,MAAM,CAAC,CAAC,CAAC,iBAAM,CAAA;AAC5C,IAAI,IAAI,CAAC,MAAM,KAAK,CAAC,EAAE;IACrB,OAAO,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC,CAAA;CACvB;AAED,kDAAkD;AAClD,qBAAqB;AACrB,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,EAAE,IAAI,CAAC,CAAC,CAAC;KAC1C,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,IAAI,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC;KACvE,KAAK,CAAC,EAAE,CAAC,EAAE;IACV,OAAO,CAAC,KAAK,CAAC,EAAE,CAAC,OAAO,CAAC,CAAA;IACzB,IAAI,EAAE,CAAC,IAAI;QAAE,OAAO,CAAC,KAAK,CAAC,UAAU,GAAG,EAAE,CAAC,IAAI,CAAC,CAAA;IAChD,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;AACjB,CAAC,CAAC,CAAA;AACJ,oBAAoB","sourcesContent":["#!/usr/bin/env node\n\nimport { version } from '../package.json'\nimport { MkdirpOptions } from './opts-arg.js'\n\nconst usage = () => `\nusage: mkdirp [DIR1,DIR2..] {OPTIONS}\n\n Create each supplied directory including any necessary parent directories\n that don't yet exist.\n\n If the directory already exists, do nothing.\n\nOPTIONS are:\n\n -m If a directory needs to be created, set the mode as an octal\n --mode= permission string.\n\n -v --version Print the mkdirp version number\n\n -h --help Print this helpful banner\n\n -p --print Print the first directories created for each path provided\n\n --manual Use manual implementation, even if native is available\n`\n\nconst dirs: string[] = []\nconst opts: MkdirpOptions = {}\nlet doPrint: boolean = false\nlet dashdash = false\nlet manual = false\nfor (const arg of process.argv.slice(2)) {\n if (dashdash) dirs.push(arg)\n else if (arg === '--') dashdash = true\n else if (arg === '--manual') manual = true\n else if (/^-h/.test(arg) || /^--help/.test(arg)) {\n console.log(usage())\n process.exit(0)\n } else if (arg === '-v' || arg === '--version') {\n console.log(version)\n process.exit(0)\n } else if (arg === '-p' || arg === '--print') {\n doPrint = true\n } else if (/^-m/.test(arg) || /^--mode=/.test(arg)) {\n // these don't get covered in CI, but work locally\n // weird because the tests below show as passing in the output.\n /* c8 ignore start */\n const mode = parseInt(arg.replace(/^(-m|--mode=)/, ''), 8)\n if (isNaN(mode)) {\n console.error(`invalid mode argument: ${arg}\\nMust be an octal number.`)\n process.exit(1)\n }\n /* c8 ignore stop */\n opts.mode = mode\n } else dirs.push(arg)\n}\n\nimport { mkdirp } from './index.js'\nconst impl = manual ? mkdirp.manual : mkdirp\nif (dirs.length === 0) {\n console.error(usage())\n}\n\n// these don't get covered in CI, but work locally\n/* c8 ignore start */\nPromise.all(dirs.map(dir => impl(dir, opts)))\n .then(made => (doPrint ? made.forEach(m => m && console.log(m)) : null))\n .catch(er => {\n console.error(er.message)\n if (er.code) console.error(' code: ' + er.code)\n process.exit(1)\n })\n/* c8 ignore stop */\n"]} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/find-made.d.ts b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/find-made.d.ts new file mode 100644 index 00000000000000..e47794b3bb72a3 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/find-made.d.ts @@ -0,0 +1,4 @@ +import { MkdirpOptionsResolved } from './opts-arg.js'; +export declare const findMade: (opts: MkdirpOptionsResolved, parent: string, path?: string) => Promise; +export declare const findMadeSync: (opts: MkdirpOptionsResolved, parent: string, path?: string) => undefined | string; +//# sourceMappingURL=find-made.d.ts.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/find-made.d.ts.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/find-made.d.ts.map new file mode 100644 index 00000000000000..00d5d1a4dbefdf --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/find-made.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"find-made.d.ts","sourceRoot":"","sources":["../../../src/find-made.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,qBAAqB,EAAE,MAAM,eAAe,CAAA;AAErD,eAAO,MAAM,QAAQ,SACb,qBAAqB,UACnB,MAAM,SACP,MAAM,KACZ,QAAQ,SAAS,GAAG,MAAM,CAe5B,CAAA;AAED,eAAO,MAAM,YAAY,SACjB,qBAAqB,UACnB,MAAM,SACP,MAAM,KACZ,SAAS,GAAG,MAad,CAAA"} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/find-made.js b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/find-made.js new file mode 100644 index 00000000000000..e831ef27cadc1d --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/find-made.js @@ -0,0 +1,35 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.findMadeSync = exports.findMade = void 0; +const path_1 = require("path"); +const findMade = async (opts, parent, path) => { + // we never want the 'made' return value to be a root directory + if (path === parent) { + return; + } + return opts.statAsync(parent).then(st => (st.isDirectory() ? path : undefined), // will fail later + // will fail later + er => { + const fer = er; + return fer && fer.code === 'ENOENT' + ? (0, exports.findMade)(opts, (0, path_1.dirname)(parent), parent) + : undefined; + }); +}; +exports.findMade = findMade; +const findMadeSync = (opts, parent, path) => { + if (path === parent) { + return undefined; + } + try { + return opts.statSync(parent).isDirectory() ? path : undefined; + } + catch (er) { + const fer = er; + return fer && fer.code === 'ENOENT' + ? (0, exports.findMadeSync)(opts, (0, path_1.dirname)(parent), parent) + : undefined; + } +}; +exports.findMadeSync = findMadeSync; +//# sourceMappingURL=find-made.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/find-made.js.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/find-made.js.map new file mode 100644 index 00000000000000..30a0d66398878d --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/find-made.js.map @@ -0,0 +1 @@ +{"version":3,"file":"find-made.js","sourceRoot":"","sources":["../../../src/find-made.ts"],"names":[],"mappings":";;;AAAA,+BAA8B;AAGvB,MAAM,QAAQ,GAAG,KAAK,EAC3B,IAA2B,EAC3B,MAAc,EACd,IAAa,EACgB,EAAE;IAC/B,+DAA+D;IAC/D,IAAI,IAAI,KAAK,MAAM,EAAE;QACnB,OAAM;KACP;IAED,OAAO,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC,IAAI,CAChC,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,WAAW,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,EAAE,kBAAkB;IAC/D,AAD6C,kBAAkB;IAC/D,EAAE,CAAC,EAAE;QACH,MAAM,GAAG,GAAG,EAA2B,CAAA;QACvC,OAAO,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ;YACjC,CAAC,CAAC,IAAA,gBAAQ,EAAC,IAAI,EAAE,IAAA,cAAO,EAAC,MAAM,CAAC,EAAE,MAAM,CAAC;YACzC,CAAC,CAAC,SAAS,CAAA;IACf,CAAC,CACF,CAAA;AACH,CAAC,CAAA;AAnBY,QAAA,QAAQ,YAmBpB;AAEM,MAAM,YAAY,GAAG,CAC1B,IAA2B,EAC3B,MAAc,EACd,IAAa,EACO,EAAE;IACtB,IAAI,IAAI,KAAK,MAAM,EAAE;QACnB,OAAO,SAAS,CAAA;KACjB;IAED,IAAI;QACF,OAAO,IAAI,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAA;KAC9D;IAAC,OAAO,EAAE,EAAE;QACX,MAAM,GAAG,GAAG,EAA2B,CAAA;QACvC,OAAO,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ;YACjC,CAAC,CAAC,IAAA,oBAAY,EAAC,IAAI,EAAE,IAAA,cAAO,EAAC,MAAM,CAAC,EAAE,MAAM,CAAC;YAC7C,CAAC,CAAC,SAAS,CAAA;KACd;AACH,CAAC,CAAA;AAjBY,QAAA,YAAY,gBAiBxB","sourcesContent":["import { dirname } from 'path'\nimport { MkdirpOptionsResolved } from './opts-arg.js'\n\nexport const findMade = async (\n opts: MkdirpOptionsResolved,\n parent: string,\n path?: string\n): Promise => {\n // we never want the 'made' return value to be a root directory\n if (path === parent) {\n return\n }\n\n return opts.statAsync(parent).then(\n st => (st.isDirectory() ? path : undefined), // will fail later\n er => {\n const fer = er as NodeJS.ErrnoException\n return fer && fer.code === 'ENOENT'\n ? findMade(opts, dirname(parent), parent)\n : undefined\n }\n )\n}\n\nexport const findMadeSync = (\n opts: MkdirpOptionsResolved,\n parent: string,\n path?: string\n): undefined | string => {\n if (path === parent) {\n return undefined\n }\n\n try {\n return opts.statSync(parent).isDirectory() ? path : undefined\n } catch (er) {\n const fer = er as NodeJS.ErrnoException\n return fer && fer.code === 'ENOENT'\n ? findMadeSync(opts, dirname(parent), parent)\n : undefined\n }\n}\n"]} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/index.d.ts b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/index.d.ts new file mode 100644 index 00000000000000..fc9e43b3a45de1 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/index.d.ts @@ -0,0 +1,39 @@ +import { MkdirpOptions } from './opts-arg.js'; +export { mkdirpManual, mkdirpManualSync } from './mkdirp-manual.js'; +export { mkdirpNative, mkdirpNativeSync } from './mkdirp-native.js'; +export { useNative, useNativeSync } from './use-native.js'; +export declare const mkdirpSync: (path: string, opts?: MkdirpOptions) => string | void; +export declare const sync: (path: string, opts?: MkdirpOptions) => string | void; +export declare const manual: ((path: string, options?: MkdirpOptions | undefined, made?: string | void | undefined) => Promise) & { + sync: (path: string, options?: MkdirpOptions | undefined, made?: string | void | undefined) => string | void | undefined; +}; +export declare const manualSync: (path: string, options?: MkdirpOptions | undefined, made?: string | void | undefined) => string | void | undefined; +export declare const native: ((path: string, options?: MkdirpOptions | undefined) => Promise) & { + sync: (path: string, options?: MkdirpOptions | undefined) => string | void | undefined; +}; +export declare const nativeSync: (path: string, options?: MkdirpOptions | undefined) => string | void | undefined; +export declare const mkdirp: ((path: string, opts?: MkdirpOptions) => Promise) & { + mkdirpSync: (path: string, opts?: MkdirpOptions) => string | void; + mkdirpNative: ((path: string, options?: MkdirpOptions | undefined) => Promise) & { + sync: (path: string, options?: MkdirpOptions | undefined) => string | void | undefined; + }; + mkdirpNativeSync: (path: string, options?: MkdirpOptions | undefined) => string | void | undefined; + mkdirpManual: ((path: string, options?: MkdirpOptions | undefined, made?: string | void | undefined) => Promise) & { + sync: (path: string, options?: MkdirpOptions | undefined, made?: string | void | undefined) => string | void | undefined; + }; + mkdirpManualSync: (path: string, options?: MkdirpOptions | undefined, made?: string | void | undefined) => string | void | undefined; + sync: (path: string, opts?: MkdirpOptions) => string | void; + native: ((path: string, options?: MkdirpOptions | undefined) => Promise) & { + sync: (path: string, options?: MkdirpOptions | undefined) => string | void | undefined; + }; + nativeSync: (path: string, options?: MkdirpOptions | undefined) => string | void | undefined; + manual: ((path: string, options?: MkdirpOptions | undefined, made?: string | void | undefined) => Promise) & { + sync: (path: string, options?: MkdirpOptions | undefined, made?: string | void | undefined) => string | void | undefined; + }; + manualSync: (path: string, options?: MkdirpOptions | undefined, made?: string | void | undefined) => string | void | undefined; + useNative: ((opts?: MkdirpOptions | undefined) => boolean) & { + sync: (opts?: MkdirpOptions | undefined) => boolean; + }; + useNativeSync: (opts?: MkdirpOptions | undefined) => boolean; +}; +//# sourceMappingURL=index.d.ts.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/index.d.ts.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/index.d.ts.map new file mode 100644 index 00000000000000..0e915bbc9a0c7a --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/index.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/index.ts"],"names":[],"mappings":"AAEA,OAAO,EAAE,aAAa,EAAW,MAAM,eAAe,CAAA;AAItD,OAAO,EAAE,YAAY,EAAE,gBAAgB,EAAE,MAAM,oBAAoB,CAAA;AACnE,OAAO,EAAE,YAAY,EAAE,gBAAgB,EAAE,MAAM,oBAAoB,CAAA;AACnE,OAAO,EAAE,SAAS,EAAE,aAAa,EAAE,MAAM,iBAAiB,CAAA;AAG1D,eAAO,MAAM,UAAU,SAAU,MAAM,SAAS,aAAa,kBAM5D,CAAA;AAED,eAAO,MAAM,IAAI,SARgB,MAAM,SAAS,aAAa,kBAQ/B,CAAA;AAC9B,eAAO,MAAM,MAAM;;CAAe,CAAA;AAClC,eAAO,MAAM,UAAU,oHAAmB,CAAA;AAC1C,eAAO,MAAM,MAAM;;CAAe,CAAA;AAClC,eAAO,MAAM,UAAU,kFAAmB,CAAA;AAC1C,eAAO,MAAM,MAAM,UACJ,MAAM,SAAS,aAAa;uBAdV,MAAM,SAAS,aAAa;;;;;;;;;iBAA5B,MAAM,SAAS,aAAa;;;;;;;;;;;;;CAoC5D,CAAA"} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/index.js b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/index.js new file mode 100644 index 00000000000000..ab9dc62cddda36 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/index.js @@ -0,0 +1,53 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.mkdirp = exports.nativeSync = exports.native = exports.manualSync = exports.manual = exports.sync = exports.mkdirpSync = exports.useNativeSync = exports.useNative = exports.mkdirpNativeSync = exports.mkdirpNative = exports.mkdirpManualSync = exports.mkdirpManual = void 0; +const mkdirp_manual_js_1 = require("./mkdirp-manual.js"); +const mkdirp_native_js_1 = require("./mkdirp-native.js"); +const opts_arg_js_1 = require("./opts-arg.js"); +const path_arg_js_1 = require("./path-arg.js"); +const use_native_js_1 = require("./use-native.js"); +/* c8 ignore start */ +var mkdirp_manual_js_2 = require("./mkdirp-manual.js"); +Object.defineProperty(exports, "mkdirpManual", { enumerable: true, get: function () { return mkdirp_manual_js_2.mkdirpManual; } }); +Object.defineProperty(exports, "mkdirpManualSync", { enumerable: true, get: function () { return mkdirp_manual_js_2.mkdirpManualSync; } }); +var mkdirp_native_js_2 = require("./mkdirp-native.js"); +Object.defineProperty(exports, "mkdirpNative", { enumerable: true, get: function () { return mkdirp_native_js_2.mkdirpNative; } }); +Object.defineProperty(exports, "mkdirpNativeSync", { enumerable: true, get: function () { return mkdirp_native_js_2.mkdirpNativeSync; } }); +var use_native_js_2 = require("./use-native.js"); +Object.defineProperty(exports, "useNative", { enumerable: true, get: function () { return use_native_js_2.useNative; } }); +Object.defineProperty(exports, "useNativeSync", { enumerable: true, get: function () { return use_native_js_2.useNativeSync; } }); +/* c8 ignore stop */ +const mkdirpSync = (path, opts) => { + path = (0, path_arg_js_1.pathArg)(path); + const resolved = (0, opts_arg_js_1.optsArg)(opts); + return (0, use_native_js_1.useNativeSync)(resolved) + ? (0, mkdirp_native_js_1.mkdirpNativeSync)(path, resolved) + : (0, mkdirp_manual_js_1.mkdirpManualSync)(path, resolved); +}; +exports.mkdirpSync = mkdirpSync; +exports.sync = exports.mkdirpSync; +exports.manual = mkdirp_manual_js_1.mkdirpManual; +exports.manualSync = mkdirp_manual_js_1.mkdirpManualSync; +exports.native = mkdirp_native_js_1.mkdirpNative; +exports.nativeSync = mkdirp_native_js_1.mkdirpNativeSync; +exports.mkdirp = Object.assign(async (path, opts) => { + path = (0, path_arg_js_1.pathArg)(path); + const resolved = (0, opts_arg_js_1.optsArg)(opts); + return (0, use_native_js_1.useNative)(resolved) + ? (0, mkdirp_native_js_1.mkdirpNative)(path, resolved) + : (0, mkdirp_manual_js_1.mkdirpManual)(path, resolved); +}, { + mkdirpSync: exports.mkdirpSync, + mkdirpNative: mkdirp_native_js_1.mkdirpNative, + mkdirpNativeSync: mkdirp_native_js_1.mkdirpNativeSync, + mkdirpManual: mkdirp_manual_js_1.mkdirpManual, + mkdirpManualSync: mkdirp_manual_js_1.mkdirpManualSync, + sync: exports.mkdirpSync, + native: mkdirp_native_js_1.mkdirpNative, + nativeSync: mkdirp_native_js_1.mkdirpNativeSync, + manual: mkdirp_manual_js_1.mkdirpManual, + manualSync: mkdirp_manual_js_1.mkdirpManualSync, + useNative: use_native_js_1.useNative, + useNativeSync: use_native_js_1.useNativeSync, +}); +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/index.js.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/index.js.map new file mode 100644 index 00000000000000..fdb572677a98ef --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/index.js.map @@ -0,0 +1 @@ +{"version":3,"file":"index.js","sourceRoot":"","sources":["../../../src/index.ts"],"names":[],"mappings":";;;AAAA,yDAAmE;AACnE,yDAAmE;AACnE,+CAAsD;AACtD,+CAAuC;AACvC,mDAA0D;AAC1D,qBAAqB;AACrB,uDAAmE;AAA1D,gHAAA,YAAY,OAAA;AAAE,oHAAA,gBAAgB,OAAA;AACvC,uDAAmE;AAA1D,gHAAA,YAAY,OAAA;AAAE,oHAAA,gBAAgB,OAAA;AACvC,iDAA0D;AAAjD,0GAAA,SAAS,OAAA;AAAE,8GAAA,aAAa,OAAA;AACjC,oBAAoB;AAEb,MAAM,UAAU,GAAG,CAAC,IAAY,EAAE,IAAoB,EAAE,EAAE;IAC/D,IAAI,GAAG,IAAA,qBAAO,EAAC,IAAI,CAAC,CAAA;IACpB,MAAM,QAAQ,GAAG,IAAA,qBAAO,EAAC,IAAI,CAAC,CAAA;IAC9B,OAAO,IAAA,6BAAa,EAAC,QAAQ,CAAC;QAC5B,CAAC,CAAC,IAAA,mCAAgB,EAAC,IAAI,EAAE,QAAQ,CAAC;QAClC,CAAC,CAAC,IAAA,mCAAgB,EAAC,IAAI,EAAE,QAAQ,CAAC,CAAA;AACtC,CAAC,CAAA;AANY,QAAA,UAAU,cAMtB;AAEY,QAAA,IAAI,GAAG,kBAAU,CAAA;AACjB,QAAA,MAAM,GAAG,+BAAY,CAAA;AACrB,QAAA,UAAU,GAAG,mCAAgB,CAAA;AAC7B,QAAA,MAAM,GAAG,+BAAY,CAAA;AACrB,QAAA,UAAU,GAAG,mCAAgB,CAAA;AAC7B,QAAA,MAAM,GAAG,MAAM,CAAC,MAAM,CACjC,KAAK,EAAE,IAAY,EAAE,IAAoB,EAAE,EAAE;IAC3C,IAAI,GAAG,IAAA,qBAAO,EAAC,IAAI,CAAC,CAAA;IACpB,MAAM,QAAQ,GAAG,IAAA,qBAAO,EAAC,IAAI,CAAC,CAAA;IAC9B,OAAO,IAAA,yBAAS,EAAC,QAAQ,CAAC;QACxB,CAAC,CAAC,IAAA,+BAAY,EAAC,IAAI,EAAE,QAAQ,CAAC;QAC9B,CAAC,CAAC,IAAA,+BAAY,EAAC,IAAI,EAAE,QAAQ,CAAC,CAAA;AAClC,CAAC,EACD;IACE,UAAU,EAAV,kBAAU;IACV,YAAY,EAAZ,+BAAY;IACZ,gBAAgB,EAAhB,mCAAgB;IAChB,YAAY,EAAZ,+BAAY;IACZ,gBAAgB,EAAhB,mCAAgB;IAEhB,IAAI,EAAE,kBAAU;IAChB,MAAM,EAAE,+BAAY;IACpB,UAAU,EAAE,mCAAgB;IAC5B,MAAM,EAAE,+BAAY;IACpB,UAAU,EAAE,mCAAgB;IAC5B,SAAS,EAAT,yBAAS;IACT,aAAa,EAAb,6BAAa;CACd,CACF,CAAA","sourcesContent":["import { mkdirpManual, mkdirpManualSync } from './mkdirp-manual.js'\nimport { mkdirpNative, mkdirpNativeSync } from './mkdirp-native.js'\nimport { MkdirpOptions, optsArg } from './opts-arg.js'\nimport { pathArg } from './path-arg.js'\nimport { useNative, useNativeSync } from './use-native.js'\n/* c8 ignore start */\nexport { mkdirpManual, mkdirpManualSync } from './mkdirp-manual.js'\nexport { mkdirpNative, mkdirpNativeSync } from './mkdirp-native.js'\nexport { useNative, useNativeSync } from './use-native.js'\n/* c8 ignore stop */\n\nexport const mkdirpSync = (path: string, opts?: MkdirpOptions) => {\n path = pathArg(path)\n const resolved = optsArg(opts)\n return useNativeSync(resolved)\n ? mkdirpNativeSync(path, resolved)\n : mkdirpManualSync(path, resolved)\n}\n\nexport const sync = mkdirpSync\nexport const manual = mkdirpManual\nexport const manualSync = mkdirpManualSync\nexport const native = mkdirpNative\nexport const nativeSync = mkdirpNativeSync\nexport const mkdirp = Object.assign(\n async (path: string, opts?: MkdirpOptions) => {\n path = pathArg(path)\n const resolved = optsArg(opts)\n return useNative(resolved)\n ? mkdirpNative(path, resolved)\n : mkdirpManual(path, resolved)\n },\n {\n mkdirpSync,\n mkdirpNative,\n mkdirpNativeSync,\n mkdirpManual,\n mkdirpManualSync,\n\n sync: mkdirpSync,\n native: mkdirpNative,\n nativeSync: mkdirpNativeSync,\n manual: mkdirpManual,\n manualSync: mkdirpManualSync,\n useNative,\n useNativeSync,\n }\n)\n"]} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-manual.d.ts b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-manual.d.ts new file mode 100644 index 00000000000000..e49cdf9f1bd122 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-manual.d.ts @@ -0,0 +1,6 @@ +import { MkdirpOptions } from './opts-arg.js'; +export declare const mkdirpManualSync: (path: string, options?: MkdirpOptions, made?: string | undefined | void) => string | undefined | void; +export declare const mkdirpManual: ((path: string, options?: MkdirpOptions, made?: string | undefined | void) => Promise) & { + sync: (path: string, options?: MkdirpOptions, made?: string | undefined | void) => string | undefined | void; +}; +//# sourceMappingURL=mkdirp-manual.d.ts.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-manual.d.ts.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-manual.d.ts.map new file mode 100644 index 00000000000000..9301bab1ffb35b --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-manual.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"mkdirp-manual.d.ts","sourceRoot":"","sources":["../../../src/mkdirp-manual.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,aAAa,EAAW,MAAM,eAAe,CAAA;AAEtD,eAAO,MAAM,gBAAgB,SACrB,MAAM,YACF,aAAa,SAChB,MAAM,GAAG,SAAS,GAAG,IAAI,KAC/B,MAAM,GAAG,SAAS,GAAG,IAmCvB,CAAA;AAED,eAAO,MAAM,YAAY,UAEf,MAAM,YACF,aAAa,SAChB,MAAM,GAAG,SAAS,GAAG,IAAI,KAC/B,QAAQ,MAAM,GAAG,SAAS,GAAG,IAAI,CAAC;iBA7C/B,MAAM,YACF,aAAa,SAChB,MAAM,GAAG,SAAS,GAAG,IAAI,KAC/B,MAAM,GAAG,SAAS,GAAG,IAAI;CAqF3B,CAAA"} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-manual.js b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-manual.js new file mode 100644 index 00000000000000..d9bd1d8bb5a49b --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-manual.js @@ -0,0 +1,79 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.mkdirpManual = exports.mkdirpManualSync = void 0; +const path_1 = require("path"); +const opts_arg_js_1 = require("./opts-arg.js"); +const mkdirpManualSync = (path, options, made) => { + const parent = (0, path_1.dirname)(path); + const opts = { ...(0, opts_arg_js_1.optsArg)(options), recursive: false }; + if (parent === path) { + try { + return opts.mkdirSync(path, opts); + } + catch (er) { + // swallowed by recursive implementation on posix systems + // any other error is a failure + const fer = er; + if (fer && fer.code !== 'EISDIR') { + throw er; + } + return; + } + } + try { + opts.mkdirSync(path, opts); + return made || path; + } + catch (er) { + const fer = er; + if (fer && fer.code === 'ENOENT') { + return (0, exports.mkdirpManualSync)(path, opts, (0, exports.mkdirpManualSync)(parent, opts, made)); + } + if (fer && fer.code !== 'EEXIST' && fer && fer.code !== 'EROFS') { + throw er; + } + try { + if (!opts.statSync(path).isDirectory()) + throw er; + } + catch (_) { + throw er; + } + } +}; +exports.mkdirpManualSync = mkdirpManualSync; +exports.mkdirpManual = Object.assign(async (path, options, made) => { + const opts = (0, opts_arg_js_1.optsArg)(options); + opts.recursive = false; + const parent = (0, path_1.dirname)(path); + if (parent === path) { + return opts.mkdirAsync(path, opts).catch(er => { + // swallowed by recursive implementation on posix systems + // any other error is a failure + const fer = er; + if (fer && fer.code !== 'EISDIR') { + throw er; + } + }); + } + return opts.mkdirAsync(path, opts).then(() => made || path, async (er) => { + const fer = er; + if (fer && fer.code === 'ENOENT') { + return (0, exports.mkdirpManual)(parent, opts).then((made) => (0, exports.mkdirpManual)(path, opts, made)); + } + if (fer && fer.code !== 'EEXIST' && fer.code !== 'EROFS') { + throw er; + } + return opts.statAsync(path).then(st => { + if (st.isDirectory()) { + return made; + } + else { + throw er; + } + }, () => { + throw er; + }); + }); +}, { sync: exports.mkdirpManualSync }); +//# sourceMappingURL=mkdirp-manual.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-manual.js.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-manual.js.map new file mode 100644 index 00000000000000..ff7ba24dca32ad --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-manual.js.map @@ -0,0 +1 @@ +{"version":3,"file":"mkdirp-manual.js","sourceRoot":"","sources":["../../../src/mkdirp-manual.ts"],"names":[],"mappings":";;;AAAA,+BAA8B;AAC9B,+CAAsD;AAE/C,MAAM,gBAAgB,GAAG,CAC9B,IAAY,EACZ,OAAuB,EACvB,IAAgC,EACL,EAAE;IAC7B,MAAM,MAAM,GAAG,IAAA,cAAO,EAAC,IAAI,CAAC,CAAA;IAC5B,MAAM,IAAI,GAAG,EAAE,GAAG,IAAA,qBAAO,EAAC,OAAO,CAAC,EAAE,SAAS,EAAE,KAAK,EAAE,CAAA;IAEtD,IAAI,MAAM,KAAK,IAAI,EAAE;QACnB,IAAI;YACF,OAAO,IAAI,CAAC,SAAS,CAAC,IAAI,EAAE,IAAI,CAAC,CAAA;SAClC;QAAC,OAAO,EAAE,EAAE;YACX,yDAAyD;YACzD,+BAA+B;YAC/B,MAAM,GAAG,GAAG,EAA2B,CAAA;YACvC,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ,EAAE;gBAChC,MAAM,EAAE,CAAA;aACT;YACD,OAAM;SACP;KACF;IAED,IAAI;QACF,IAAI,CAAC,SAAS,CAAC,IAAI,EAAE,IAAI,CAAC,CAAA;QAC1B,OAAO,IAAI,IAAI,IAAI,CAAA;KACpB;IAAC,OAAO,EAAE,EAAE;QACX,MAAM,GAAG,GAAG,EAA2B,CAAA;QACvC,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ,EAAE;YAChC,OAAO,IAAA,wBAAgB,EAAC,IAAI,EAAE,IAAI,EAAE,IAAA,wBAAgB,EAAC,MAAM,EAAE,IAAI,EAAE,IAAI,CAAC,CAAC,CAAA;SAC1E;QACD,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,OAAO,EAAE;YAC/D,MAAM,EAAE,CAAA;SACT;QACD,IAAI;YACF,IAAI,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,WAAW,EAAE;gBAAE,MAAM,EAAE,CAAA;SACjD;QAAC,OAAO,CAAC,EAAE;YACV,MAAM,EAAE,CAAA;SACT;KACF;AACH,CAAC,CAAA;AAvCY,QAAA,gBAAgB,oBAuC5B;AAEY,QAAA,YAAY,GAAG,MAAM,CAAC,MAAM,CACvC,KAAK,EACH,IAAY,EACZ,OAAuB,EACvB,IAAgC,EACI,EAAE;IACtC,MAAM,IAAI,GAAG,IAAA,qBAAO,EAAC,OAAO,CAAC,CAAA;IAC7B,IAAI,CAAC,SAAS,GAAG,KAAK,CAAA;IACtB,MAAM,MAAM,GAAG,IAAA,cAAO,EAAC,IAAI,CAAC,CAAA;IAC5B,IAAI,MAAM,KAAK,IAAI,EAAE;QACnB,OAAO,IAAI,CAAC,UAAU,CAAC,IAAI,EAAE,IAAI,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE;YAC5C,yDAAyD;YACzD,+BAA+B;YAC/B,MAAM,GAAG,GAAG,EAA2B,CAAA;YACvC,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ,EAAE;gBAChC,MAAM,EAAE,CAAA;aACT;QACH,CAAC,CAAC,CAAA;KACH;IAED,OAAO,IAAI,CAAC,UAAU,CAAC,IAAI,EAAE,IAAI,CAAC,CAAC,IAAI,CACrC,GAAG,EAAE,CAAC,IAAI,IAAI,IAAI,EAClB,KAAK,EAAC,EAAE,EAAC,EAAE;QACT,MAAM,GAAG,GAAG,EAA2B,CAAA;QACvC,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ,EAAE;YAChC,OAAO,IAAA,oBAAY,EAAC,MAAM,EAAE,IAAI,CAAC,CAAC,IAAI,CACpC,CAAC,IAAgC,EAAE,EAAE,CAAC,IAAA,oBAAY,EAAC,IAAI,EAAE,IAAI,EAAE,IAAI,CAAC,CACrE,CAAA;SACF;QACD,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ,IAAI,GAAG,CAAC,IAAI,KAAK,OAAO,EAAE;YACxD,MAAM,EAAE,CAAA;SACT;QACD,OAAO,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,CAAC,IAAI,CAC9B,EAAE,CAAC,EAAE;YACH,IAAI,EAAE,CAAC,WAAW,EAAE,EAAE;gBACpB,OAAO,IAAI,CAAA;aACZ;iBAAM;gBACL,MAAM,EAAE,CAAA;aACT;QACH,CAAC,EACD,GAAG,EAAE;YACH,MAAM,EAAE,CAAA;QACV,CAAC,CACF,CAAA;IACH,CAAC,CACF,CAAA;AACH,CAAC,EACD,EAAE,IAAI,EAAE,wBAAgB,EAAE,CAC3B,CAAA","sourcesContent":["import { dirname } from 'path'\nimport { MkdirpOptions, optsArg } from './opts-arg.js'\n\nexport const mkdirpManualSync = (\n path: string,\n options?: MkdirpOptions,\n made?: string | undefined | void\n): string | undefined | void => {\n const parent = dirname(path)\n const opts = { ...optsArg(options), recursive: false }\n\n if (parent === path) {\n try {\n return opts.mkdirSync(path, opts)\n } catch (er) {\n // swallowed by recursive implementation on posix systems\n // any other error is a failure\n const fer = er as NodeJS.ErrnoException\n if (fer && fer.code !== 'EISDIR') {\n throw er\n }\n return\n }\n }\n\n try {\n opts.mkdirSync(path, opts)\n return made || path\n } catch (er) {\n const fer = er as NodeJS.ErrnoException\n if (fer && fer.code === 'ENOENT') {\n return mkdirpManualSync(path, opts, mkdirpManualSync(parent, opts, made))\n }\n if (fer && fer.code !== 'EEXIST' && fer && fer.code !== 'EROFS') {\n throw er\n }\n try {\n if (!opts.statSync(path).isDirectory()) throw er\n } catch (_) {\n throw er\n }\n }\n}\n\nexport const mkdirpManual = Object.assign(\n async (\n path: string,\n options?: MkdirpOptions,\n made?: string | undefined | void\n ): Promise => {\n const opts = optsArg(options)\n opts.recursive = false\n const parent = dirname(path)\n if (parent === path) {\n return opts.mkdirAsync(path, opts).catch(er => {\n // swallowed by recursive implementation on posix systems\n // any other error is a failure\n const fer = er as NodeJS.ErrnoException\n if (fer && fer.code !== 'EISDIR') {\n throw er\n }\n })\n }\n\n return opts.mkdirAsync(path, opts).then(\n () => made || path,\n async er => {\n const fer = er as NodeJS.ErrnoException\n if (fer && fer.code === 'ENOENT') {\n return mkdirpManual(parent, opts).then(\n (made?: string | undefined | void) => mkdirpManual(path, opts, made)\n )\n }\n if (fer && fer.code !== 'EEXIST' && fer.code !== 'EROFS') {\n throw er\n }\n return opts.statAsync(path).then(\n st => {\n if (st.isDirectory()) {\n return made\n } else {\n throw er\n }\n },\n () => {\n throw er\n }\n )\n }\n )\n },\n { sync: mkdirpManualSync }\n)\n"]} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-native.d.ts b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-native.d.ts new file mode 100644 index 00000000000000..28b64814b2545a --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-native.d.ts @@ -0,0 +1,6 @@ +import { MkdirpOptions } from './opts-arg.js'; +export declare const mkdirpNativeSync: (path: string, options?: MkdirpOptions) => string | void | undefined; +export declare const mkdirpNative: ((path: string, options?: MkdirpOptions) => Promise) & { + sync: (path: string, options?: MkdirpOptions) => string | void | undefined; +}; +//# sourceMappingURL=mkdirp-native.d.ts.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-native.d.ts.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-native.d.ts.map new file mode 100644 index 00000000000000..379c0f6591c686 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-native.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"mkdirp-native.d.ts","sourceRoot":"","sources":["../../../src/mkdirp-native.ts"],"names":[],"mappings":"AAGA,OAAO,EAAE,aAAa,EAAW,MAAM,eAAe,CAAA;AAEtD,eAAO,MAAM,gBAAgB,SACrB,MAAM,YACF,aAAa,KACtB,MAAM,GAAG,IAAI,GAAG,SAoBlB,CAAA;AAED,eAAO,MAAM,YAAY,UAEf,MAAM,YACF,aAAa,KACtB,QAAQ,MAAM,GAAG,IAAI,GAAG,SAAS,CAAC;iBA5B/B,MAAM,YACF,aAAa,KACtB,MAAM,GAAG,IAAI,GAAG,SAAS;CAgD3B,CAAA"} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-native.js b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-native.js new file mode 100644 index 00000000000000..9f00567d7cc200 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-native.js @@ -0,0 +1,50 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.mkdirpNative = exports.mkdirpNativeSync = void 0; +const path_1 = require("path"); +const find_made_js_1 = require("./find-made.js"); +const mkdirp_manual_js_1 = require("./mkdirp-manual.js"); +const opts_arg_js_1 = require("./opts-arg.js"); +const mkdirpNativeSync = (path, options) => { + const opts = (0, opts_arg_js_1.optsArg)(options); + opts.recursive = true; + const parent = (0, path_1.dirname)(path); + if (parent === path) { + return opts.mkdirSync(path, opts); + } + const made = (0, find_made_js_1.findMadeSync)(opts, path); + try { + opts.mkdirSync(path, opts); + return made; + } + catch (er) { + const fer = er; + if (fer && fer.code === 'ENOENT') { + return (0, mkdirp_manual_js_1.mkdirpManualSync)(path, opts); + } + else { + throw er; + } + } +}; +exports.mkdirpNativeSync = mkdirpNativeSync; +exports.mkdirpNative = Object.assign(async (path, options) => { + const opts = { ...(0, opts_arg_js_1.optsArg)(options), recursive: true }; + const parent = (0, path_1.dirname)(path); + if (parent === path) { + return await opts.mkdirAsync(path, opts); + } + return (0, find_made_js_1.findMade)(opts, path).then((made) => opts + .mkdirAsync(path, opts) + .then(m => made || m) + .catch(er => { + const fer = er; + if (fer && fer.code === 'ENOENT') { + return (0, mkdirp_manual_js_1.mkdirpManual)(path, opts); + } + else { + throw er; + } + })); +}, { sync: exports.mkdirpNativeSync }); +//# sourceMappingURL=mkdirp-native.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-native.js.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-native.js.map new file mode 100644 index 00000000000000..1f889ee98876cc --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/mkdirp-native.js.map @@ -0,0 +1 @@ +{"version":3,"file":"mkdirp-native.js","sourceRoot":"","sources":["../../../src/mkdirp-native.ts"],"names":[],"mappings":";;;AAAA,+BAA8B;AAC9B,iDAAuD;AACvD,yDAAmE;AACnE,+CAAsD;AAE/C,MAAM,gBAAgB,GAAG,CAC9B,IAAY,EACZ,OAAuB,EACI,EAAE;IAC7B,MAAM,IAAI,GAAG,IAAA,qBAAO,EAAC,OAAO,CAAC,CAAA;IAC7B,IAAI,CAAC,SAAS,GAAG,IAAI,CAAA;IACrB,MAAM,MAAM,GAAG,IAAA,cAAO,EAAC,IAAI,CAAC,CAAA;IAC5B,IAAI,MAAM,KAAK,IAAI,EAAE;QACnB,OAAO,IAAI,CAAC,SAAS,CAAC,IAAI,EAAE,IAAI,CAAC,CAAA;KAClC;IAED,MAAM,IAAI,GAAG,IAAA,2BAAY,EAAC,IAAI,EAAE,IAAI,CAAC,CAAA;IACrC,IAAI;QACF,IAAI,CAAC,SAAS,CAAC,IAAI,EAAE,IAAI,CAAC,CAAA;QAC1B,OAAO,IAAI,CAAA;KACZ;IAAC,OAAO,EAAE,EAAE;QACX,MAAM,GAAG,GAAG,EAA2B,CAAA;QACvC,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ,EAAE;YAChC,OAAO,IAAA,mCAAgB,EAAC,IAAI,EAAE,IAAI,CAAC,CAAA;SACpC;aAAM;YACL,MAAM,EAAE,CAAA;SACT;KACF;AACH,CAAC,CAAA;AAvBY,QAAA,gBAAgB,oBAuB5B;AAEY,QAAA,YAAY,GAAG,MAAM,CAAC,MAAM,CACvC,KAAK,EACH,IAAY,EACZ,OAAuB,EACa,EAAE;IACtC,MAAM,IAAI,GAAG,EAAE,GAAG,IAAA,qBAAO,EAAC,OAAO,CAAC,EAAE,SAAS,EAAE,IAAI,EAAE,CAAA;IACrD,MAAM,MAAM,GAAG,IAAA,cAAO,EAAC,IAAI,CAAC,CAAA;IAC5B,IAAI,MAAM,KAAK,IAAI,EAAE;QACnB,OAAO,MAAM,IAAI,CAAC,UAAU,CAAC,IAAI,EAAE,IAAI,CAAC,CAAA;KACzC;IAED,OAAO,IAAA,uBAAQ,EAAC,IAAI,EAAE,IAAI,CAAC,CAAC,IAAI,CAAC,CAAC,IAAyB,EAAE,EAAE,CAC7D,IAAI;SACD,UAAU,CAAC,IAAI,EAAE,IAAI,CAAC;SACtB,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,IAAI,CAAC,CAAC;SACpB,KAAK,CAAC,EAAE,CAAC,EAAE;QACV,MAAM,GAAG,GAAG,EAA2B,CAAA;QACvC,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ,EAAE;YAChC,OAAO,IAAA,+BAAY,EAAC,IAAI,EAAE,IAAI,CAAC,CAAA;SAChC;aAAM;YACL,MAAM,EAAE,CAAA;SACT;IACH,CAAC,CAAC,CACL,CAAA;AACH,CAAC,EACD,EAAE,IAAI,EAAE,wBAAgB,EAAE,CAC3B,CAAA","sourcesContent":["import { dirname } from 'path'\nimport { findMade, findMadeSync } from './find-made.js'\nimport { mkdirpManual, mkdirpManualSync } from './mkdirp-manual.js'\nimport { MkdirpOptions, optsArg } from './opts-arg.js'\n\nexport const mkdirpNativeSync = (\n path: string,\n options?: MkdirpOptions\n): string | void | undefined => {\n const opts = optsArg(options)\n opts.recursive = true\n const parent = dirname(path)\n if (parent === path) {\n return opts.mkdirSync(path, opts)\n }\n\n const made = findMadeSync(opts, path)\n try {\n opts.mkdirSync(path, opts)\n return made\n } catch (er) {\n const fer = er as NodeJS.ErrnoException\n if (fer && fer.code === 'ENOENT') {\n return mkdirpManualSync(path, opts)\n } else {\n throw er\n }\n }\n}\n\nexport const mkdirpNative = Object.assign(\n async (\n path: string,\n options?: MkdirpOptions\n ): Promise => {\n const opts = { ...optsArg(options), recursive: true }\n const parent = dirname(path)\n if (parent === path) {\n return await opts.mkdirAsync(path, opts)\n }\n\n return findMade(opts, path).then((made?: string | undefined) =>\n opts\n .mkdirAsync(path, opts)\n .then(m => made || m)\n .catch(er => {\n const fer = er as NodeJS.ErrnoException\n if (fer && fer.code === 'ENOENT') {\n return mkdirpManual(path, opts)\n } else {\n throw er\n }\n })\n )\n },\n { sync: mkdirpNativeSync }\n)\n"]} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/opts-arg.d.ts b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/opts-arg.d.ts new file mode 100644 index 00000000000000..73d076b3b6923c --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/opts-arg.d.ts @@ -0,0 +1,42 @@ +/// +/// +import { MakeDirectoryOptions, Stats } from 'fs'; +export interface FsProvider { + stat?: (path: string, callback: (err: NodeJS.ErrnoException | null, stats: Stats) => any) => any; + mkdir?: (path: string, opts: MakeDirectoryOptions & { + recursive?: boolean; + }, callback: (err: NodeJS.ErrnoException | null, made?: string) => any) => any; + statSync?: (path: string) => Stats; + mkdirSync?: (path: string, opts: MakeDirectoryOptions & { + recursive?: boolean; + }) => string | undefined; +} +interface Options extends FsProvider { + mode?: number | string; + fs?: FsProvider; + mkdirAsync?: (path: string, opts: MakeDirectoryOptions & { + recursive?: boolean; + }) => Promise; + statAsync?: (path: string) => Promise; +} +export type MkdirpOptions = Options | number | string; +export interface MkdirpOptionsResolved { + mode: number; + fs: FsProvider; + mkdirAsync: (path: string, opts: MakeDirectoryOptions & { + recursive?: boolean; + }) => Promise; + statAsync: (path: string) => Promise; + stat: (path: string, callback: (err: NodeJS.ErrnoException | null, stats: Stats) => any) => any; + mkdir: (path: string, opts: MakeDirectoryOptions & { + recursive?: boolean; + }, callback: (err: NodeJS.ErrnoException | null, made?: string) => any) => any; + statSync: (path: string) => Stats; + mkdirSync: (path: string, opts: MakeDirectoryOptions & { + recursive?: boolean; + }) => string | undefined; + recursive?: boolean; +} +export declare const optsArg: (opts?: MkdirpOptions) => MkdirpOptionsResolved; +export {}; +//# sourceMappingURL=opts-arg.d.ts.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/opts-arg.d.ts.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/opts-arg.d.ts.map new file mode 100644 index 00000000000000..e575161714f651 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/opts-arg.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"opts-arg.d.ts","sourceRoot":"","sources":["../../../src/opts-arg.ts"],"names":[],"mappings":";;AAAA,OAAO,EACL,oBAAoB,EAIpB,KAAK,EAEN,MAAM,IAAI,CAAA;AAEX,MAAM,WAAW,UAAU;IACzB,IAAI,CAAC,EAAE,CACL,IAAI,EAAE,MAAM,EACZ,QAAQ,EAAE,CAAC,GAAG,EAAE,MAAM,CAAC,cAAc,GAAG,IAAI,EAAE,KAAK,EAAE,KAAK,KAAK,GAAG,KAC/D,GAAG,CAAA;IACR,KAAK,CAAC,EAAE,CACN,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,oBAAoB,GAAG;QAAE,SAAS,CAAC,EAAE,OAAO,CAAA;KAAE,EACpD,QAAQ,EAAE,CAAC,GAAG,EAAE,MAAM,CAAC,cAAc,GAAG,IAAI,EAAE,IAAI,CAAC,EAAE,MAAM,KAAK,GAAG,KAChE,GAAG,CAAA;IACR,QAAQ,CAAC,EAAE,CAAC,IAAI,EAAE,MAAM,KAAK,KAAK,CAAA;IAClC,SAAS,CAAC,EAAE,CACV,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,oBAAoB,GAAG;QAAE,SAAS,CAAC,EAAE,OAAO,CAAA;KAAE,KACjD,MAAM,GAAG,SAAS,CAAA;CACxB;AAED,UAAU,OAAQ,SAAQ,UAAU;IAClC,IAAI,CAAC,EAAE,MAAM,GAAG,MAAM,CAAA;IACtB,EAAE,CAAC,EAAE,UAAU,CAAA;IACf,UAAU,CAAC,EAAE,CACX,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,oBAAoB,GAAG;QAAE,SAAS,CAAC,EAAE,OAAO,CAAA;KAAE,KACjD,OAAO,CAAC,MAAM,GAAG,SAAS,CAAC,CAAA;IAChC,SAAS,CAAC,EAAE,CAAC,IAAI,EAAE,MAAM,KAAK,OAAO,CAAC,KAAK,CAAC,CAAA;CAC7C;AAED,MAAM,MAAM,aAAa,GAAG,OAAO,GAAG,MAAM,GAAG,MAAM,CAAA;AAErD,MAAM,WAAW,qBAAqB;IACpC,IAAI,EAAE,MAAM,CAAA;IACZ,EAAE,EAAE,UAAU,CAAA;IACd,UAAU,EAAE,CACV,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,oBAAoB,GAAG;QAAE,SAAS,CAAC,EAAE,OAAO,CAAA;KAAE,KACjD,OAAO,CAAC,MAAM,GAAG,SAAS,CAAC,CAAA;IAChC,SAAS,EAAE,CAAC,IAAI,EAAE,MAAM,KAAK,OAAO,CAAC,KAAK,CAAC,CAAA;IAC3C,IAAI,EAAE,CACJ,IAAI,EAAE,MAAM,EACZ,QAAQ,EAAE,CAAC,GAAG,EAAE,MAAM,CAAC,cAAc,GAAG,IAAI,EAAE,KAAK,EAAE,KAAK,KAAK,GAAG,KAC/D,GAAG,CAAA;IACR,KAAK,EAAE,CACL,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,oBAAoB,GAAG;QAAE,SAAS,CAAC,EAAE,OAAO,CAAA;KAAE,EACpD,QAAQ,EAAE,CAAC,GAAG,EAAE,MAAM,CAAC,cAAc,GAAG,IAAI,EAAE,IAAI,CAAC,EAAE,MAAM,KAAK,GAAG,KAChE,GAAG,CAAA;IACR,QAAQ,EAAE,CAAC,IAAI,EAAE,MAAM,KAAK,KAAK,CAAA;IACjC,SAAS,EAAE,CACT,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,oBAAoB,GAAG;QAAE,SAAS,CAAC,EAAE,OAAO,CAAA;KAAE,KACjD,MAAM,GAAG,SAAS,CAAA;IACvB,SAAS,CAAC,EAAE,OAAO,CAAA;CACpB;AAED,eAAO,MAAM,OAAO,UAAW,aAAa,KAAG,qBA2C9C,CAAA"} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/opts-arg.js b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/opts-arg.js new file mode 100644 index 00000000000000..e8f486c0905957 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/opts-arg.js @@ -0,0 +1,38 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.optsArg = void 0; +const fs_1 = require("fs"); +const optsArg = (opts) => { + if (!opts) { + opts = { mode: 0o777 }; + } + else if (typeof opts === 'object') { + opts = { mode: 0o777, ...opts }; + } + else if (typeof opts === 'number') { + opts = { mode: opts }; + } + else if (typeof opts === 'string') { + opts = { mode: parseInt(opts, 8) }; + } + else { + throw new TypeError('invalid options argument'); + } + const resolved = opts; + const optsFs = opts.fs || {}; + opts.mkdir = opts.mkdir || optsFs.mkdir || fs_1.mkdir; + opts.mkdirAsync = opts.mkdirAsync + ? opts.mkdirAsync + : async (path, options) => { + return new Promise((res, rej) => resolved.mkdir(path, options, (er, made) => er ? rej(er) : res(made))); + }; + opts.stat = opts.stat || optsFs.stat || fs_1.stat; + opts.statAsync = opts.statAsync + ? opts.statAsync + : async (path) => new Promise((res, rej) => resolved.stat(path, (err, stats) => (err ? rej(err) : res(stats)))); + opts.statSync = opts.statSync || optsFs.statSync || fs_1.statSync; + opts.mkdirSync = opts.mkdirSync || optsFs.mkdirSync || fs_1.mkdirSync; + return resolved; +}; +exports.optsArg = optsArg; +//# sourceMappingURL=opts-arg.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/opts-arg.js.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/opts-arg.js.map new file mode 100644 index 00000000000000..fd5590f40f54cd --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/opts-arg.js.map @@ -0,0 +1 @@ +{"version":3,"file":"opts-arg.js","sourceRoot":"","sources":["../../../src/opts-arg.ts"],"names":[],"mappings":";;;AAAA,2BAOW;AAwDJ,MAAM,OAAO,GAAG,CAAC,IAAoB,EAAyB,EAAE;IACrE,IAAI,CAAC,IAAI,EAAE;QACT,IAAI,GAAG,EAAE,IAAI,EAAE,KAAK,EAAE,CAAA;KACvB;SAAM,IAAI,OAAO,IAAI,KAAK,QAAQ,EAAE;QACnC,IAAI,GAAG,EAAE,IAAI,EAAE,KAAK,EAAE,GAAG,IAAI,EAAE,CAAA;KAChC;SAAM,IAAI,OAAO,IAAI,KAAK,QAAQ,EAAE;QACnC,IAAI,GAAG,EAAE,IAAI,EAAE,IAAI,EAAE,CAAA;KACtB;SAAM,IAAI,OAAO,IAAI,KAAK,QAAQ,EAAE;QACnC,IAAI,GAAG,EAAE,IAAI,EAAE,QAAQ,CAAC,IAAI,EAAE,CAAC,CAAC,EAAE,CAAA;KACnC;SAAM;QACL,MAAM,IAAI,SAAS,CAAC,0BAA0B,CAAC,CAAA;KAChD;IAED,MAAM,QAAQ,GAAG,IAA6B,CAAA;IAC9C,MAAM,MAAM,GAAG,IAAI,CAAC,EAAE,IAAI,EAAE,CAAA;IAE5B,IAAI,CAAC,KAAK,GAAG,IAAI,CAAC,KAAK,IAAI,MAAM,CAAC,KAAK,IAAI,UAAK,CAAA;IAEhD,IAAI,CAAC,UAAU,GAAG,IAAI,CAAC,UAAU;QAC/B,CAAC,CAAC,IAAI,CAAC,UAAU;QACjB,CAAC,CAAC,KAAK,EACH,IAAY,EACZ,OAAuD,EAC1B,EAAE;YAC/B,OAAO,IAAI,OAAO,CAAqB,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE,CAClD,QAAQ,CAAC,KAAK,CAAC,IAAI,EAAE,OAAO,EAAE,CAAC,EAAE,EAAE,IAAI,EAAE,EAAE,CACzC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,CACzB,CACF,CAAA;QACH,CAAC,CAAA;IAEL,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,IAAI,IAAI,MAAM,CAAC,IAAI,IAAI,SAAI,CAAA;IAC5C,IAAI,CAAC,SAAS,GAAG,IAAI,CAAC,SAAS;QAC7B,CAAC,CAAC,IAAI,CAAC,SAAS;QAChB,CAAC,CAAC,KAAK,EAAE,IAAY,EAAE,EAAE,CACrB,IAAI,OAAO,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE,CACvB,QAAQ,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,GAAG,EAAE,KAAK,EAAE,EAAE,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CACnE,CAAA;IAEP,IAAI,CAAC,QAAQ,GAAG,IAAI,CAAC,QAAQ,IAAI,MAAM,CAAC,QAAQ,IAAI,aAAQ,CAAA;IAC5D,IAAI,CAAC,SAAS,GAAG,IAAI,CAAC,SAAS,IAAI,MAAM,CAAC,SAAS,IAAI,cAAS,CAAA;IAEhE,OAAO,QAAQ,CAAA;AACjB,CAAC,CAAA;AA3CY,QAAA,OAAO,WA2CnB","sourcesContent":["import {\n MakeDirectoryOptions,\n mkdir,\n mkdirSync,\n stat,\n Stats,\n statSync,\n} from 'fs'\n\nexport interface FsProvider {\n stat?: (\n path: string,\n callback: (err: NodeJS.ErrnoException | null, stats: Stats) => any\n ) => any\n mkdir?: (\n path: string,\n opts: MakeDirectoryOptions & { recursive?: boolean },\n callback: (err: NodeJS.ErrnoException | null, made?: string) => any\n ) => any\n statSync?: (path: string) => Stats\n mkdirSync?: (\n path: string,\n opts: MakeDirectoryOptions & { recursive?: boolean }\n ) => string | undefined\n}\n\ninterface Options extends FsProvider {\n mode?: number | string\n fs?: FsProvider\n mkdirAsync?: (\n path: string,\n opts: MakeDirectoryOptions & { recursive?: boolean }\n ) => Promise\n statAsync?: (path: string) => Promise\n}\n\nexport type MkdirpOptions = Options | number | string\n\nexport interface MkdirpOptionsResolved {\n mode: number\n fs: FsProvider\n mkdirAsync: (\n path: string,\n opts: MakeDirectoryOptions & { recursive?: boolean }\n ) => Promise\n statAsync: (path: string) => Promise\n stat: (\n path: string,\n callback: (err: NodeJS.ErrnoException | null, stats: Stats) => any\n ) => any\n mkdir: (\n path: string,\n opts: MakeDirectoryOptions & { recursive?: boolean },\n callback: (err: NodeJS.ErrnoException | null, made?: string) => any\n ) => any\n statSync: (path: string) => Stats\n mkdirSync: (\n path: string,\n opts: MakeDirectoryOptions & { recursive?: boolean }\n ) => string | undefined\n recursive?: boolean\n}\n\nexport const optsArg = (opts?: MkdirpOptions): MkdirpOptionsResolved => {\n if (!opts) {\n opts = { mode: 0o777 }\n } else if (typeof opts === 'object') {\n opts = { mode: 0o777, ...opts }\n } else if (typeof opts === 'number') {\n opts = { mode: opts }\n } else if (typeof opts === 'string') {\n opts = { mode: parseInt(opts, 8) }\n } else {\n throw new TypeError('invalid options argument')\n }\n\n const resolved = opts as MkdirpOptionsResolved\n const optsFs = opts.fs || {}\n\n opts.mkdir = opts.mkdir || optsFs.mkdir || mkdir\n\n opts.mkdirAsync = opts.mkdirAsync\n ? opts.mkdirAsync\n : async (\n path: string,\n options: MakeDirectoryOptions & { recursive?: boolean }\n ): Promise => {\n return new Promise((res, rej) =>\n resolved.mkdir(path, options, (er, made) =>\n er ? rej(er) : res(made)\n )\n )\n }\n\n opts.stat = opts.stat || optsFs.stat || stat\n opts.statAsync = opts.statAsync\n ? opts.statAsync\n : async (path: string) =>\n new Promise((res, rej) =>\n resolved.stat(path, (err, stats) => (err ? rej(err) : res(stats)))\n )\n\n opts.statSync = opts.statSync || optsFs.statSync || statSync\n opts.mkdirSync = opts.mkdirSync || optsFs.mkdirSync || mkdirSync\n\n return resolved\n}\n"]} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/path-arg.d.ts b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/path-arg.d.ts new file mode 100644 index 00000000000000..ad0ccfc482a485 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/path-arg.d.ts @@ -0,0 +1,2 @@ +export declare const pathArg: (path: string) => string; +//# sourceMappingURL=path-arg.d.ts.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/path-arg.d.ts.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/path-arg.d.ts.map new file mode 100644 index 00000000000000..3b52b077c6c05c --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/path-arg.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"path-arg.d.ts","sourceRoot":"","sources":["../../../src/path-arg.ts"],"names":[],"mappings":"AAEA,eAAO,MAAM,OAAO,SAAU,MAAM,WAyBnC,CAAA"} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/path-arg.js b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/path-arg.js new file mode 100644 index 00000000000000..a6b457f6e23d58 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/path-arg.js @@ -0,0 +1,28 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.pathArg = void 0; +const platform = process.env.__TESTING_MKDIRP_PLATFORM__ || process.platform; +const path_1 = require("path"); +const pathArg = (path) => { + if (/\0/.test(path)) { + // simulate same failure that node raises + throw Object.assign(new TypeError('path must be a string without null bytes'), { + path, + code: 'ERR_INVALID_ARG_VALUE', + }); + } + path = (0, path_1.resolve)(path); + if (platform === 'win32') { + const badWinChars = /[*|"<>?:]/; + const { root } = (0, path_1.parse)(path); + if (badWinChars.test(path.substring(root.length))) { + throw Object.assign(new Error('Illegal characters in path.'), { + path, + code: 'EINVAL', + }); + } + } + return path; +}; +exports.pathArg = pathArg; +//# sourceMappingURL=path-arg.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/path-arg.js.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/path-arg.js.map new file mode 100644 index 00000000000000..ad3b5d38cad3cd --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/path-arg.js.map @@ -0,0 +1 @@ +{"version":3,"file":"path-arg.js","sourceRoot":"","sources":["../../../src/path-arg.ts"],"names":[],"mappings":";;;AAAA,MAAM,QAAQ,GAAG,OAAO,CAAC,GAAG,CAAC,2BAA2B,IAAI,OAAO,CAAC,QAAQ,CAAA;AAC5E,+BAAqC;AAC9B,MAAM,OAAO,GAAG,CAAC,IAAY,EAAE,EAAE;IACtC,IAAI,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE;QACnB,yCAAyC;QACzC,MAAM,MAAM,CAAC,MAAM,CACjB,IAAI,SAAS,CAAC,0CAA0C,CAAC,EACzD;YACE,IAAI;YACJ,IAAI,EAAE,uBAAuB;SAC9B,CACF,CAAA;KACF;IAED,IAAI,GAAG,IAAA,cAAO,EAAC,IAAI,CAAC,CAAA;IACpB,IAAI,QAAQ,KAAK,OAAO,EAAE;QACxB,MAAM,WAAW,GAAG,WAAW,CAAA;QAC/B,MAAM,EAAE,IAAI,EAAE,GAAG,IAAA,YAAK,EAAC,IAAI,CAAC,CAAA;QAC5B,IAAI,WAAW,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,EAAE;YACjD,MAAM,MAAM,CAAC,MAAM,CAAC,IAAI,KAAK,CAAC,6BAA6B,CAAC,EAAE;gBAC5D,IAAI;gBACJ,IAAI,EAAE,QAAQ;aACf,CAAC,CAAA;SACH;KACF;IAED,OAAO,IAAI,CAAA;AACb,CAAC,CAAA;AAzBY,QAAA,OAAO,WAyBnB","sourcesContent":["const platform = process.env.__TESTING_MKDIRP_PLATFORM__ || process.platform\nimport { parse, resolve } from 'path'\nexport const pathArg = (path: string) => {\n if (/\\0/.test(path)) {\n // simulate same failure that node raises\n throw Object.assign(\n new TypeError('path must be a string without null bytes'),\n {\n path,\n code: 'ERR_INVALID_ARG_VALUE',\n }\n )\n }\n\n path = resolve(path)\n if (platform === 'win32') {\n const badWinChars = /[*|\"<>?:]/\n const { root } = parse(path)\n if (badWinChars.test(path.substring(root.length))) {\n throw Object.assign(new Error('Illegal characters in path.'), {\n path,\n code: 'EINVAL',\n })\n }\n }\n\n return path\n}\n"]} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/use-native.d.ts b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/use-native.d.ts new file mode 100644 index 00000000000000..1c6cb619e30405 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/use-native.d.ts @@ -0,0 +1,6 @@ +import { MkdirpOptions } from './opts-arg.js'; +export declare const useNativeSync: (opts?: MkdirpOptions) => boolean; +export declare const useNative: ((opts?: MkdirpOptions) => boolean) & { + sync: (opts?: MkdirpOptions) => boolean; +}; +//# sourceMappingURL=use-native.d.ts.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/use-native.d.ts.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/use-native.d.ts.map new file mode 100644 index 00000000000000..7dc275e322ea3b --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/use-native.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"use-native.d.ts","sourceRoot":"","sources":["../../../src/use-native.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,aAAa,EAAW,MAAM,eAAe,CAAA;AAMtD,eAAO,MAAM,aAAa,UAEd,aAAa,YAA0C,CAAA;AAEnE,eAAO,MAAM,SAAS,WAGR,aAAa;kBALf,aAAa;CASxB,CAAA"} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/use-native.js b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/use-native.js new file mode 100644 index 00000000000000..550b3452688ee5 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/use-native.js @@ -0,0 +1,17 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.useNative = exports.useNativeSync = void 0; +const fs_1 = require("fs"); +const opts_arg_js_1 = require("./opts-arg.js"); +const version = process.env.__TESTING_MKDIRP_NODE_VERSION__ || process.version; +const versArr = version.replace(/^v/, '').split('.'); +const hasNative = +versArr[0] > 10 || (+versArr[0] === 10 && +versArr[1] >= 12); +exports.useNativeSync = !hasNative + ? () => false + : (opts) => (0, opts_arg_js_1.optsArg)(opts).mkdirSync === fs_1.mkdirSync; +exports.useNative = Object.assign(!hasNative + ? () => false + : (opts) => (0, opts_arg_js_1.optsArg)(opts).mkdir === fs_1.mkdir, { + sync: exports.useNativeSync, +}); +//# sourceMappingURL=use-native.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/use-native.js.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/use-native.js.map new file mode 100644 index 00000000000000..9a15efebb9ec28 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/cjs/src/use-native.js.map @@ -0,0 +1 @@ +{"version":3,"file":"use-native.js","sourceRoot":"","sources":["../../../src/use-native.ts"],"names":[],"mappings":";;;AAAA,2BAAqC;AACrC,+CAAsD;AAEtD,MAAM,OAAO,GAAG,OAAO,CAAC,GAAG,CAAC,+BAA+B,IAAI,OAAO,CAAC,OAAO,CAAA;AAC9E,MAAM,OAAO,GAAG,OAAO,CAAC,OAAO,CAAC,IAAI,EAAE,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAA;AACpD,MAAM,SAAS,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG,EAAE,IAAI,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC,CAAA;AAElE,QAAA,aAAa,GAAG,CAAC,SAAS;IACrC,CAAC,CAAC,GAAG,EAAE,CAAC,KAAK;IACb,CAAC,CAAC,CAAC,IAAoB,EAAE,EAAE,CAAC,IAAA,qBAAO,EAAC,IAAI,CAAC,CAAC,SAAS,KAAK,cAAS,CAAA;AAEtD,QAAA,SAAS,GAAG,MAAM,CAAC,MAAM,CACpC,CAAC,SAAS;IACR,CAAC,CAAC,GAAG,EAAE,CAAC,KAAK;IACb,CAAC,CAAC,CAAC,IAAoB,EAAE,EAAE,CAAC,IAAA,qBAAO,EAAC,IAAI,CAAC,CAAC,KAAK,KAAK,UAAK,EAC3D;IACE,IAAI,EAAE,qBAAa;CACpB,CACF,CAAA","sourcesContent":["import { mkdir, mkdirSync } from 'fs'\nimport { MkdirpOptions, optsArg } from './opts-arg.js'\n\nconst version = process.env.__TESTING_MKDIRP_NODE_VERSION__ || process.version\nconst versArr = version.replace(/^v/, '').split('.')\nconst hasNative = +versArr[0] > 10 || (+versArr[0] === 10 && +versArr[1] >= 12)\n\nexport const useNativeSync = !hasNative\n ? () => false\n : (opts?: MkdirpOptions) => optsArg(opts).mkdirSync === mkdirSync\n\nexport const useNative = Object.assign(\n !hasNative\n ? () => false\n : (opts?: MkdirpOptions) => optsArg(opts).mkdir === mkdir,\n {\n sync: useNativeSync,\n }\n)\n"]} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/find-made.d.ts b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/find-made.d.ts new file mode 100644 index 00000000000000..e47794b3bb72a3 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/find-made.d.ts @@ -0,0 +1,4 @@ +import { MkdirpOptionsResolved } from './opts-arg.js'; +export declare const findMade: (opts: MkdirpOptionsResolved, parent: string, path?: string) => Promise; +export declare const findMadeSync: (opts: MkdirpOptionsResolved, parent: string, path?: string) => undefined | string; +//# sourceMappingURL=find-made.d.ts.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/find-made.d.ts.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/find-made.d.ts.map new file mode 100644 index 00000000000000..411aad1410eb7a --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/find-made.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"find-made.d.ts","sourceRoot":"","sources":["../../src/find-made.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,qBAAqB,EAAE,MAAM,eAAe,CAAA;AAErD,eAAO,MAAM,QAAQ,SACb,qBAAqB,UACnB,MAAM,SACP,MAAM,KACZ,QAAQ,SAAS,GAAG,MAAM,CAe5B,CAAA;AAED,eAAO,MAAM,YAAY,SACjB,qBAAqB,UACnB,MAAM,SACP,MAAM,KACZ,SAAS,GAAG,MAad,CAAA"} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/find-made.js b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/find-made.js new file mode 100644 index 00000000000000..3e72fd59a2c1fb --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/find-made.js @@ -0,0 +1,30 @@ +import { dirname } from 'path'; +export const findMade = async (opts, parent, path) => { + // we never want the 'made' return value to be a root directory + if (path === parent) { + return; + } + return opts.statAsync(parent).then(st => (st.isDirectory() ? path : undefined), // will fail later + // will fail later + er => { + const fer = er; + return fer && fer.code === 'ENOENT' + ? findMade(opts, dirname(parent), parent) + : undefined; + }); +}; +export const findMadeSync = (opts, parent, path) => { + if (path === parent) { + return undefined; + } + try { + return opts.statSync(parent).isDirectory() ? path : undefined; + } + catch (er) { + const fer = er; + return fer && fer.code === 'ENOENT' + ? findMadeSync(opts, dirname(parent), parent) + : undefined; + } +}; +//# sourceMappingURL=find-made.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/find-made.js.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/find-made.js.map new file mode 100644 index 00000000000000..7b58089c6266c1 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/find-made.js.map @@ -0,0 +1 @@ +{"version":3,"file":"find-made.js","sourceRoot":"","sources":["../../src/find-made.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,EAAE,MAAM,MAAM,CAAA;AAG9B,MAAM,CAAC,MAAM,QAAQ,GAAG,KAAK,EAC3B,IAA2B,EAC3B,MAAc,EACd,IAAa,EACgB,EAAE;IAC/B,+DAA+D;IAC/D,IAAI,IAAI,KAAK,MAAM,EAAE;QACnB,OAAM;KACP;IAED,OAAO,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,CAAC,IAAI,CAChC,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,WAAW,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC,EAAE,kBAAkB;IAC/D,AAD6C,kBAAkB;IAC/D,EAAE,CAAC,EAAE;QACH,MAAM,GAAG,GAAG,EAA2B,CAAA;QACvC,OAAO,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ;YACjC,CAAC,CAAC,QAAQ,CAAC,IAAI,EAAE,OAAO,CAAC,MAAM,CAAC,EAAE,MAAM,CAAC;YACzC,CAAC,CAAC,SAAS,CAAA;IACf,CAAC,CACF,CAAA;AACH,CAAC,CAAA;AAED,MAAM,CAAC,MAAM,YAAY,GAAG,CAC1B,IAA2B,EAC3B,MAAc,EACd,IAAa,EACO,EAAE;IACtB,IAAI,IAAI,KAAK,MAAM,EAAE;QACnB,OAAO,SAAS,CAAA;KACjB;IAED,IAAI;QACF,OAAO,IAAI,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAA;KAC9D;IAAC,OAAO,EAAE,EAAE;QACX,MAAM,GAAG,GAAG,EAA2B,CAAA;QACvC,OAAO,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ;YACjC,CAAC,CAAC,YAAY,CAAC,IAAI,EAAE,OAAO,CAAC,MAAM,CAAC,EAAE,MAAM,CAAC;YAC7C,CAAC,CAAC,SAAS,CAAA;KACd;AACH,CAAC,CAAA","sourcesContent":["import { dirname } from 'path'\nimport { MkdirpOptionsResolved } from './opts-arg.js'\n\nexport const findMade = async (\n opts: MkdirpOptionsResolved,\n parent: string,\n path?: string\n): Promise => {\n // we never want the 'made' return value to be a root directory\n if (path === parent) {\n return\n }\n\n return opts.statAsync(parent).then(\n st => (st.isDirectory() ? path : undefined), // will fail later\n er => {\n const fer = er as NodeJS.ErrnoException\n return fer && fer.code === 'ENOENT'\n ? findMade(opts, dirname(parent), parent)\n : undefined\n }\n )\n}\n\nexport const findMadeSync = (\n opts: MkdirpOptionsResolved,\n parent: string,\n path?: string\n): undefined | string => {\n if (path === parent) {\n return undefined\n }\n\n try {\n return opts.statSync(parent).isDirectory() ? path : undefined\n } catch (er) {\n const fer = er as NodeJS.ErrnoException\n return fer && fer.code === 'ENOENT'\n ? findMadeSync(opts, dirname(parent), parent)\n : undefined\n }\n}\n"]} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/index.d.ts b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/index.d.ts new file mode 100644 index 00000000000000..fc9e43b3a45de1 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/index.d.ts @@ -0,0 +1,39 @@ +import { MkdirpOptions } from './opts-arg.js'; +export { mkdirpManual, mkdirpManualSync } from './mkdirp-manual.js'; +export { mkdirpNative, mkdirpNativeSync } from './mkdirp-native.js'; +export { useNative, useNativeSync } from './use-native.js'; +export declare const mkdirpSync: (path: string, opts?: MkdirpOptions) => string | void; +export declare const sync: (path: string, opts?: MkdirpOptions) => string | void; +export declare const manual: ((path: string, options?: MkdirpOptions | undefined, made?: string | void | undefined) => Promise) & { + sync: (path: string, options?: MkdirpOptions | undefined, made?: string | void | undefined) => string | void | undefined; +}; +export declare const manualSync: (path: string, options?: MkdirpOptions | undefined, made?: string | void | undefined) => string | void | undefined; +export declare const native: ((path: string, options?: MkdirpOptions | undefined) => Promise) & { + sync: (path: string, options?: MkdirpOptions | undefined) => string | void | undefined; +}; +export declare const nativeSync: (path: string, options?: MkdirpOptions | undefined) => string | void | undefined; +export declare const mkdirp: ((path: string, opts?: MkdirpOptions) => Promise) & { + mkdirpSync: (path: string, opts?: MkdirpOptions) => string | void; + mkdirpNative: ((path: string, options?: MkdirpOptions | undefined) => Promise) & { + sync: (path: string, options?: MkdirpOptions | undefined) => string | void | undefined; + }; + mkdirpNativeSync: (path: string, options?: MkdirpOptions | undefined) => string | void | undefined; + mkdirpManual: ((path: string, options?: MkdirpOptions | undefined, made?: string | void | undefined) => Promise) & { + sync: (path: string, options?: MkdirpOptions | undefined, made?: string | void | undefined) => string | void | undefined; + }; + mkdirpManualSync: (path: string, options?: MkdirpOptions | undefined, made?: string | void | undefined) => string | void | undefined; + sync: (path: string, opts?: MkdirpOptions) => string | void; + native: ((path: string, options?: MkdirpOptions | undefined) => Promise) & { + sync: (path: string, options?: MkdirpOptions | undefined) => string | void | undefined; + }; + nativeSync: (path: string, options?: MkdirpOptions | undefined) => string | void | undefined; + manual: ((path: string, options?: MkdirpOptions | undefined, made?: string | void | undefined) => Promise) & { + sync: (path: string, options?: MkdirpOptions | undefined, made?: string | void | undefined) => string | void | undefined; + }; + manualSync: (path: string, options?: MkdirpOptions | undefined, made?: string | void | undefined) => string | void | undefined; + useNative: ((opts?: MkdirpOptions | undefined) => boolean) & { + sync: (opts?: MkdirpOptions | undefined) => boolean; + }; + useNativeSync: (opts?: MkdirpOptions | undefined) => boolean; +}; +//# sourceMappingURL=index.d.ts.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/index.d.ts.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/index.d.ts.map new file mode 100644 index 00000000000000..cfcc78083857b1 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/index.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAEA,OAAO,EAAE,aAAa,EAAW,MAAM,eAAe,CAAA;AAItD,OAAO,EAAE,YAAY,EAAE,gBAAgB,EAAE,MAAM,oBAAoB,CAAA;AACnE,OAAO,EAAE,YAAY,EAAE,gBAAgB,EAAE,MAAM,oBAAoB,CAAA;AACnE,OAAO,EAAE,SAAS,EAAE,aAAa,EAAE,MAAM,iBAAiB,CAAA;AAG1D,eAAO,MAAM,UAAU,SAAU,MAAM,SAAS,aAAa,kBAM5D,CAAA;AAED,eAAO,MAAM,IAAI,SARgB,MAAM,SAAS,aAAa,kBAQ/B,CAAA;AAC9B,eAAO,MAAM,MAAM;;CAAe,CAAA;AAClC,eAAO,MAAM,UAAU,oHAAmB,CAAA;AAC1C,eAAO,MAAM,MAAM;;CAAe,CAAA;AAClC,eAAO,MAAM,UAAU,kFAAmB,CAAA;AAC1C,eAAO,MAAM,MAAM,UACJ,MAAM,SAAS,aAAa;uBAdV,MAAM,SAAS,aAAa;;;;;;;;;iBAA5B,MAAM,SAAS,aAAa;;;;;;;;;;;;;CAoC5D,CAAA"} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/index.js b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/index.js new file mode 100644 index 00000000000000..0217ecc8cdd83d --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/index.js @@ -0,0 +1,43 @@ +import { mkdirpManual, mkdirpManualSync } from './mkdirp-manual.js'; +import { mkdirpNative, mkdirpNativeSync } from './mkdirp-native.js'; +import { optsArg } from './opts-arg.js'; +import { pathArg } from './path-arg.js'; +import { useNative, useNativeSync } from './use-native.js'; +/* c8 ignore start */ +export { mkdirpManual, mkdirpManualSync } from './mkdirp-manual.js'; +export { mkdirpNative, mkdirpNativeSync } from './mkdirp-native.js'; +export { useNative, useNativeSync } from './use-native.js'; +/* c8 ignore stop */ +export const mkdirpSync = (path, opts) => { + path = pathArg(path); + const resolved = optsArg(opts); + return useNativeSync(resolved) + ? mkdirpNativeSync(path, resolved) + : mkdirpManualSync(path, resolved); +}; +export const sync = mkdirpSync; +export const manual = mkdirpManual; +export const manualSync = mkdirpManualSync; +export const native = mkdirpNative; +export const nativeSync = mkdirpNativeSync; +export const mkdirp = Object.assign(async (path, opts) => { + path = pathArg(path); + const resolved = optsArg(opts); + return useNative(resolved) + ? mkdirpNative(path, resolved) + : mkdirpManual(path, resolved); +}, { + mkdirpSync, + mkdirpNative, + mkdirpNativeSync, + mkdirpManual, + mkdirpManualSync, + sync: mkdirpSync, + native: mkdirpNative, + nativeSync: mkdirpNativeSync, + manual: mkdirpManual, + manualSync: mkdirpManualSync, + useNative, + useNativeSync, +}); +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/index.js.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/index.js.map new file mode 100644 index 00000000000000..47a8133a070c8f --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/index.js.map @@ -0,0 +1 @@ +{"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,YAAY,EAAE,gBAAgB,EAAE,MAAM,oBAAoB,CAAA;AACnE,OAAO,EAAE,YAAY,EAAE,gBAAgB,EAAE,MAAM,oBAAoB,CAAA;AACnE,OAAO,EAAiB,OAAO,EAAE,MAAM,eAAe,CAAA;AACtD,OAAO,EAAE,OAAO,EAAE,MAAM,eAAe,CAAA;AACvC,OAAO,EAAE,SAAS,EAAE,aAAa,EAAE,MAAM,iBAAiB,CAAA;AAC1D,qBAAqB;AACrB,OAAO,EAAE,YAAY,EAAE,gBAAgB,EAAE,MAAM,oBAAoB,CAAA;AACnE,OAAO,EAAE,YAAY,EAAE,gBAAgB,EAAE,MAAM,oBAAoB,CAAA;AACnE,OAAO,EAAE,SAAS,EAAE,aAAa,EAAE,MAAM,iBAAiB,CAAA;AAC1D,oBAAoB;AAEpB,MAAM,CAAC,MAAM,UAAU,GAAG,CAAC,IAAY,EAAE,IAAoB,EAAE,EAAE;IAC/D,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC,CAAA;IACpB,MAAM,QAAQ,GAAG,OAAO,CAAC,IAAI,CAAC,CAAA;IAC9B,OAAO,aAAa,CAAC,QAAQ,CAAC;QAC5B,CAAC,CAAC,gBAAgB,CAAC,IAAI,EAAE,QAAQ,CAAC;QAClC,CAAC,CAAC,gBAAgB,CAAC,IAAI,EAAE,QAAQ,CAAC,CAAA;AACtC,CAAC,CAAA;AAED,MAAM,CAAC,MAAM,IAAI,GAAG,UAAU,CAAA;AAC9B,MAAM,CAAC,MAAM,MAAM,GAAG,YAAY,CAAA;AAClC,MAAM,CAAC,MAAM,UAAU,GAAG,gBAAgB,CAAA;AAC1C,MAAM,CAAC,MAAM,MAAM,GAAG,YAAY,CAAA;AAClC,MAAM,CAAC,MAAM,UAAU,GAAG,gBAAgB,CAAA;AAC1C,MAAM,CAAC,MAAM,MAAM,GAAG,MAAM,CAAC,MAAM,CACjC,KAAK,EAAE,IAAY,EAAE,IAAoB,EAAE,EAAE;IAC3C,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC,CAAA;IACpB,MAAM,QAAQ,GAAG,OAAO,CAAC,IAAI,CAAC,CAAA;IAC9B,OAAO,SAAS,CAAC,QAAQ,CAAC;QACxB,CAAC,CAAC,YAAY,CAAC,IAAI,EAAE,QAAQ,CAAC;QAC9B,CAAC,CAAC,YAAY,CAAC,IAAI,EAAE,QAAQ,CAAC,CAAA;AAClC,CAAC,EACD;IACE,UAAU;IACV,YAAY;IACZ,gBAAgB;IAChB,YAAY;IACZ,gBAAgB;IAEhB,IAAI,EAAE,UAAU;IAChB,MAAM,EAAE,YAAY;IACpB,UAAU,EAAE,gBAAgB;IAC5B,MAAM,EAAE,YAAY;IACpB,UAAU,EAAE,gBAAgB;IAC5B,SAAS;IACT,aAAa;CACd,CACF,CAAA","sourcesContent":["import { mkdirpManual, mkdirpManualSync } from './mkdirp-manual.js'\nimport { mkdirpNative, mkdirpNativeSync } from './mkdirp-native.js'\nimport { MkdirpOptions, optsArg } from './opts-arg.js'\nimport { pathArg } from './path-arg.js'\nimport { useNative, useNativeSync } from './use-native.js'\n/* c8 ignore start */\nexport { mkdirpManual, mkdirpManualSync } from './mkdirp-manual.js'\nexport { mkdirpNative, mkdirpNativeSync } from './mkdirp-native.js'\nexport { useNative, useNativeSync } from './use-native.js'\n/* c8 ignore stop */\n\nexport const mkdirpSync = (path: string, opts?: MkdirpOptions) => {\n path = pathArg(path)\n const resolved = optsArg(opts)\n return useNativeSync(resolved)\n ? mkdirpNativeSync(path, resolved)\n : mkdirpManualSync(path, resolved)\n}\n\nexport const sync = mkdirpSync\nexport const manual = mkdirpManual\nexport const manualSync = mkdirpManualSync\nexport const native = mkdirpNative\nexport const nativeSync = mkdirpNativeSync\nexport const mkdirp = Object.assign(\n async (path: string, opts?: MkdirpOptions) => {\n path = pathArg(path)\n const resolved = optsArg(opts)\n return useNative(resolved)\n ? mkdirpNative(path, resolved)\n : mkdirpManual(path, resolved)\n },\n {\n mkdirpSync,\n mkdirpNative,\n mkdirpNativeSync,\n mkdirpManual,\n mkdirpManualSync,\n\n sync: mkdirpSync,\n native: mkdirpNative,\n nativeSync: mkdirpNativeSync,\n manual: mkdirpManual,\n manualSync: mkdirpManualSync,\n useNative,\n useNativeSync,\n }\n)\n"]} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-manual.d.ts b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-manual.d.ts new file mode 100644 index 00000000000000..e49cdf9f1bd122 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-manual.d.ts @@ -0,0 +1,6 @@ +import { MkdirpOptions } from './opts-arg.js'; +export declare const mkdirpManualSync: (path: string, options?: MkdirpOptions, made?: string | undefined | void) => string | undefined | void; +export declare const mkdirpManual: ((path: string, options?: MkdirpOptions, made?: string | undefined | void) => Promise) & { + sync: (path: string, options?: MkdirpOptions, made?: string | undefined | void) => string | undefined | void; +}; +//# sourceMappingURL=mkdirp-manual.d.ts.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-manual.d.ts.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-manual.d.ts.map new file mode 100644 index 00000000000000..ae7f243d3ca78b --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-manual.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"mkdirp-manual.d.ts","sourceRoot":"","sources":["../../src/mkdirp-manual.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,aAAa,EAAW,MAAM,eAAe,CAAA;AAEtD,eAAO,MAAM,gBAAgB,SACrB,MAAM,YACF,aAAa,SAChB,MAAM,GAAG,SAAS,GAAG,IAAI,KAC/B,MAAM,GAAG,SAAS,GAAG,IAmCvB,CAAA;AAED,eAAO,MAAM,YAAY,UAEf,MAAM,YACF,aAAa,SAChB,MAAM,GAAG,SAAS,GAAG,IAAI,KAC/B,QAAQ,MAAM,GAAG,SAAS,GAAG,IAAI,CAAC;iBA7C/B,MAAM,YACF,aAAa,SAChB,MAAM,GAAG,SAAS,GAAG,IAAI,KAC/B,MAAM,GAAG,SAAS,GAAG,IAAI;CAqF3B,CAAA"} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-manual.js b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-manual.js new file mode 100644 index 00000000000000..a4d044e02d3bfc --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-manual.js @@ -0,0 +1,75 @@ +import { dirname } from 'path'; +import { optsArg } from './opts-arg.js'; +export const mkdirpManualSync = (path, options, made) => { + const parent = dirname(path); + const opts = { ...optsArg(options), recursive: false }; + if (parent === path) { + try { + return opts.mkdirSync(path, opts); + } + catch (er) { + // swallowed by recursive implementation on posix systems + // any other error is a failure + const fer = er; + if (fer && fer.code !== 'EISDIR') { + throw er; + } + return; + } + } + try { + opts.mkdirSync(path, opts); + return made || path; + } + catch (er) { + const fer = er; + if (fer && fer.code === 'ENOENT') { + return mkdirpManualSync(path, opts, mkdirpManualSync(parent, opts, made)); + } + if (fer && fer.code !== 'EEXIST' && fer && fer.code !== 'EROFS') { + throw er; + } + try { + if (!opts.statSync(path).isDirectory()) + throw er; + } + catch (_) { + throw er; + } + } +}; +export const mkdirpManual = Object.assign(async (path, options, made) => { + const opts = optsArg(options); + opts.recursive = false; + const parent = dirname(path); + if (parent === path) { + return opts.mkdirAsync(path, opts).catch(er => { + // swallowed by recursive implementation on posix systems + // any other error is a failure + const fer = er; + if (fer && fer.code !== 'EISDIR') { + throw er; + } + }); + } + return opts.mkdirAsync(path, opts).then(() => made || path, async (er) => { + const fer = er; + if (fer && fer.code === 'ENOENT') { + return mkdirpManual(parent, opts).then((made) => mkdirpManual(path, opts, made)); + } + if (fer && fer.code !== 'EEXIST' && fer.code !== 'EROFS') { + throw er; + } + return opts.statAsync(path).then(st => { + if (st.isDirectory()) { + return made; + } + else { + throw er; + } + }, () => { + throw er; + }); + }); +}, { sync: mkdirpManualSync }); +//# sourceMappingURL=mkdirp-manual.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-manual.js.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-manual.js.map new file mode 100644 index 00000000000000..29eab250e126c8 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-manual.js.map @@ -0,0 +1 @@ +{"version":3,"file":"mkdirp-manual.js","sourceRoot":"","sources":["../../src/mkdirp-manual.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,EAAE,MAAM,MAAM,CAAA;AAC9B,OAAO,EAAiB,OAAO,EAAE,MAAM,eAAe,CAAA;AAEtD,MAAM,CAAC,MAAM,gBAAgB,GAAG,CAC9B,IAAY,EACZ,OAAuB,EACvB,IAAgC,EACL,EAAE;IAC7B,MAAM,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC,CAAA;IAC5B,MAAM,IAAI,GAAG,EAAE,GAAG,OAAO,CAAC,OAAO,CAAC,EAAE,SAAS,EAAE,KAAK,EAAE,CAAA;IAEtD,IAAI,MAAM,KAAK,IAAI,EAAE;QACnB,IAAI;YACF,OAAO,IAAI,CAAC,SAAS,CAAC,IAAI,EAAE,IAAI,CAAC,CAAA;SAClC;QAAC,OAAO,EAAE,EAAE;YACX,yDAAyD;YACzD,+BAA+B;YAC/B,MAAM,GAAG,GAAG,EAA2B,CAAA;YACvC,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ,EAAE;gBAChC,MAAM,EAAE,CAAA;aACT;YACD,OAAM;SACP;KACF;IAED,IAAI;QACF,IAAI,CAAC,SAAS,CAAC,IAAI,EAAE,IAAI,CAAC,CAAA;QAC1B,OAAO,IAAI,IAAI,IAAI,CAAA;KACpB;IAAC,OAAO,EAAE,EAAE;QACX,MAAM,GAAG,GAAG,EAA2B,CAAA;QACvC,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ,EAAE;YAChC,OAAO,gBAAgB,CAAC,IAAI,EAAE,IAAI,EAAE,gBAAgB,CAAC,MAAM,EAAE,IAAI,EAAE,IAAI,CAAC,CAAC,CAAA;SAC1E;QACD,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,OAAO,EAAE;YAC/D,MAAM,EAAE,CAAA;SACT;QACD,IAAI;YACF,IAAI,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,WAAW,EAAE;gBAAE,MAAM,EAAE,CAAA;SACjD;QAAC,OAAO,CAAC,EAAE;YACV,MAAM,EAAE,CAAA;SACT;KACF;AACH,CAAC,CAAA;AAED,MAAM,CAAC,MAAM,YAAY,GAAG,MAAM,CAAC,MAAM,CACvC,KAAK,EACH,IAAY,EACZ,OAAuB,EACvB,IAAgC,EACI,EAAE;IACtC,MAAM,IAAI,GAAG,OAAO,CAAC,OAAO,CAAC,CAAA;IAC7B,IAAI,CAAC,SAAS,GAAG,KAAK,CAAA;IACtB,MAAM,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC,CAAA;IAC5B,IAAI,MAAM,KAAK,IAAI,EAAE;QACnB,OAAO,IAAI,CAAC,UAAU,CAAC,IAAI,EAAE,IAAI,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,EAAE;YAC5C,yDAAyD;YACzD,+BAA+B;YAC/B,MAAM,GAAG,GAAG,EAA2B,CAAA;YACvC,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ,EAAE;gBAChC,MAAM,EAAE,CAAA;aACT;QACH,CAAC,CAAC,CAAA;KACH;IAED,OAAO,IAAI,CAAC,UAAU,CAAC,IAAI,EAAE,IAAI,CAAC,CAAC,IAAI,CACrC,GAAG,EAAE,CAAC,IAAI,IAAI,IAAI,EAClB,KAAK,EAAC,EAAE,EAAC,EAAE;QACT,MAAM,GAAG,GAAG,EAA2B,CAAA;QACvC,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ,EAAE;YAChC,OAAO,YAAY,CAAC,MAAM,EAAE,IAAI,CAAC,CAAC,IAAI,CACpC,CAAC,IAAgC,EAAE,EAAE,CAAC,YAAY,CAAC,IAAI,EAAE,IAAI,EAAE,IAAI,CAAC,CACrE,CAAA;SACF;QACD,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ,IAAI,GAAG,CAAC,IAAI,KAAK,OAAO,EAAE;YACxD,MAAM,EAAE,CAAA;SACT;QACD,OAAO,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,CAAC,IAAI,CAC9B,EAAE,CAAC,EAAE;YACH,IAAI,EAAE,CAAC,WAAW,EAAE,EAAE;gBACpB,OAAO,IAAI,CAAA;aACZ;iBAAM;gBACL,MAAM,EAAE,CAAA;aACT;QACH,CAAC,EACD,GAAG,EAAE;YACH,MAAM,EAAE,CAAA;QACV,CAAC,CACF,CAAA;IACH,CAAC,CACF,CAAA;AACH,CAAC,EACD,EAAE,IAAI,EAAE,gBAAgB,EAAE,CAC3B,CAAA","sourcesContent":["import { dirname } from 'path'\nimport { MkdirpOptions, optsArg } from './opts-arg.js'\n\nexport const mkdirpManualSync = (\n path: string,\n options?: MkdirpOptions,\n made?: string | undefined | void\n): string | undefined | void => {\n const parent = dirname(path)\n const opts = { ...optsArg(options), recursive: false }\n\n if (parent === path) {\n try {\n return opts.mkdirSync(path, opts)\n } catch (er) {\n // swallowed by recursive implementation on posix systems\n // any other error is a failure\n const fer = er as NodeJS.ErrnoException\n if (fer && fer.code !== 'EISDIR') {\n throw er\n }\n return\n }\n }\n\n try {\n opts.mkdirSync(path, opts)\n return made || path\n } catch (er) {\n const fer = er as NodeJS.ErrnoException\n if (fer && fer.code === 'ENOENT') {\n return mkdirpManualSync(path, opts, mkdirpManualSync(parent, opts, made))\n }\n if (fer && fer.code !== 'EEXIST' && fer && fer.code !== 'EROFS') {\n throw er\n }\n try {\n if (!opts.statSync(path).isDirectory()) throw er\n } catch (_) {\n throw er\n }\n }\n}\n\nexport const mkdirpManual = Object.assign(\n async (\n path: string,\n options?: MkdirpOptions,\n made?: string | undefined | void\n ): Promise => {\n const opts = optsArg(options)\n opts.recursive = false\n const parent = dirname(path)\n if (parent === path) {\n return opts.mkdirAsync(path, opts).catch(er => {\n // swallowed by recursive implementation on posix systems\n // any other error is a failure\n const fer = er as NodeJS.ErrnoException\n if (fer && fer.code !== 'EISDIR') {\n throw er\n }\n })\n }\n\n return opts.mkdirAsync(path, opts).then(\n () => made || path,\n async er => {\n const fer = er as NodeJS.ErrnoException\n if (fer && fer.code === 'ENOENT') {\n return mkdirpManual(parent, opts).then(\n (made?: string | undefined | void) => mkdirpManual(path, opts, made)\n )\n }\n if (fer && fer.code !== 'EEXIST' && fer.code !== 'EROFS') {\n throw er\n }\n return opts.statAsync(path).then(\n st => {\n if (st.isDirectory()) {\n return made\n } else {\n throw er\n }\n },\n () => {\n throw er\n }\n )\n }\n )\n },\n { sync: mkdirpManualSync }\n)\n"]} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-native.d.ts b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-native.d.ts new file mode 100644 index 00000000000000..28b64814b2545a --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-native.d.ts @@ -0,0 +1,6 @@ +import { MkdirpOptions } from './opts-arg.js'; +export declare const mkdirpNativeSync: (path: string, options?: MkdirpOptions) => string | void | undefined; +export declare const mkdirpNative: ((path: string, options?: MkdirpOptions) => Promise) & { + sync: (path: string, options?: MkdirpOptions) => string | void | undefined; +}; +//# sourceMappingURL=mkdirp-native.d.ts.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-native.d.ts.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-native.d.ts.map new file mode 100644 index 00000000000000..517dfabe7d1213 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-native.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"mkdirp-native.d.ts","sourceRoot":"","sources":["../../src/mkdirp-native.ts"],"names":[],"mappings":"AAGA,OAAO,EAAE,aAAa,EAAW,MAAM,eAAe,CAAA;AAEtD,eAAO,MAAM,gBAAgB,SACrB,MAAM,YACF,aAAa,KACtB,MAAM,GAAG,IAAI,GAAG,SAoBlB,CAAA;AAED,eAAO,MAAM,YAAY,UAEf,MAAM,YACF,aAAa,KACtB,QAAQ,MAAM,GAAG,IAAI,GAAG,SAAS,CAAC;iBA5B/B,MAAM,YACF,aAAa,KACtB,MAAM,GAAG,IAAI,GAAG,SAAS;CAgD3B,CAAA"} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-native.js b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-native.js new file mode 100644 index 00000000000000..99d10a5425dade --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-native.js @@ -0,0 +1,46 @@ +import { dirname } from 'path'; +import { findMade, findMadeSync } from './find-made.js'; +import { mkdirpManual, mkdirpManualSync } from './mkdirp-manual.js'; +import { optsArg } from './opts-arg.js'; +export const mkdirpNativeSync = (path, options) => { + const opts = optsArg(options); + opts.recursive = true; + const parent = dirname(path); + if (parent === path) { + return opts.mkdirSync(path, opts); + } + const made = findMadeSync(opts, path); + try { + opts.mkdirSync(path, opts); + return made; + } + catch (er) { + const fer = er; + if (fer && fer.code === 'ENOENT') { + return mkdirpManualSync(path, opts); + } + else { + throw er; + } + } +}; +export const mkdirpNative = Object.assign(async (path, options) => { + const opts = { ...optsArg(options), recursive: true }; + const parent = dirname(path); + if (parent === path) { + return await opts.mkdirAsync(path, opts); + } + return findMade(opts, path).then((made) => opts + .mkdirAsync(path, opts) + .then(m => made || m) + .catch(er => { + const fer = er; + if (fer && fer.code === 'ENOENT') { + return mkdirpManual(path, opts); + } + else { + throw er; + } + })); +}, { sync: mkdirpNativeSync }); +//# sourceMappingURL=mkdirp-native.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-native.js.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-native.js.map new file mode 100644 index 00000000000000..27de32d9436d67 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/mkdirp-native.js.map @@ -0,0 +1 @@ +{"version":3,"file":"mkdirp-native.js","sourceRoot":"","sources":["../../src/mkdirp-native.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,EAAE,MAAM,MAAM,CAAA;AAC9B,OAAO,EAAE,QAAQ,EAAE,YAAY,EAAE,MAAM,gBAAgB,CAAA;AACvD,OAAO,EAAE,YAAY,EAAE,gBAAgB,EAAE,MAAM,oBAAoB,CAAA;AACnE,OAAO,EAAiB,OAAO,EAAE,MAAM,eAAe,CAAA;AAEtD,MAAM,CAAC,MAAM,gBAAgB,GAAG,CAC9B,IAAY,EACZ,OAAuB,EACI,EAAE;IAC7B,MAAM,IAAI,GAAG,OAAO,CAAC,OAAO,CAAC,CAAA;IAC7B,IAAI,CAAC,SAAS,GAAG,IAAI,CAAA;IACrB,MAAM,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC,CAAA;IAC5B,IAAI,MAAM,KAAK,IAAI,EAAE;QACnB,OAAO,IAAI,CAAC,SAAS,CAAC,IAAI,EAAE,IAAI,CAAC,CAAA;KAClC;IAED,MAAM,IAAI,GAAG,YAAY,CAAC,IAAI,EAAE,IAAI,CAAC,CAAA;IACrC,IAAI;QACF,IAAI,CAAC,SAAS,CAAC,IAAI,EAAE,IAAI,CAAC,CAAA;QAC1B,OAAO,IAAI,CAAA;KACZ;IAAC,OAAO,EAAE,EAAE;QACX,MAAM,GAAG,GAAG,EAA2B,CAAA;QACvC,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ,EAAE;YAChC,OAAO,gBAAgB,CAAC,IAAI,EAAE,IAAI,CAAC,CAAA;SACpC;aAAM;YACL,MAAM,EAAE,CAAA;SACT;KACF;AACH,CAAC,CAAA;AAED,MAAM,CAAC,MAAM,YAAY,GAAG,MAAM,CAAC,MAAM,CACvC,KAAK,EACH,IAAY,EACZ,OAAuB,EACa,EAAE;IACtC,MAAM,IAAI,GAAG,EAAE,GAAG,OAAO,CAAC,OAAO,CAAC,EAAE,SAAS,EAAE,IAAI,EAAE,CAAA;IACrD,MAAM,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC,CAAA;IAC5B,IAAI,MAAM,KAAK,IAAI,EAAE;QACnB,OAAO,MAAM,IAAI,CAAC,UAAU,CAAC,IAAI,EAAE,IAAI,CAAC,CAAA;KACzC;IAED,OAAO,QAAQ,CAAC,IAAI,EAAE,IAAI,CAAC,CAAC,IAAI,CAAC,CAAC,IAAyB,EAAE,EAAE,CAC7D,IAAI;SACD,UAAU,CAAC,IAAI,EAAE,IAAI,CAAC;SACtB,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,IAAI,CAAC,CAAC;SACpB,KAAK,CAAC,EAAE,CAAC,EAAE;QACV,MAAM,GAAG,GAAG,EAA2B,CAAA;QACvC,IAAI,GAAG,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ,EAAE;YAChC,OAAO,YAAY,CAAC,IAAI,EAAE,IAAI,CAAC,CAAA;SAChC;aAAM;YACL,MAAM,EAAE,CAAA;SACT;IACH,CAAC,CAAC,CACL,CAAA;AACH,CAAC,EACD,EAAE,IAAI,EAAE,gBAAgB,EAAE,CAC3B,CAAA","sourcesContent":["import { dirname } from 'path'\nimport { findMade, findMadeSync } from './find-made.js'\nimport { mkdirpManual, mkdirpManualSync } from './mkdirp-manual.js'\nimport { MkdirpOptions, optsArg } from './opts-arg.js'\n\nexport const mkdirpNativeSync = (\n path: string,\n options?: MkdirpOptions\n): string | void | undefined => {\n const opts = optsArg(options)\n opts.recursive = true\n const parent = dirname(path)\n if (parent === path) {\n return opts.mkdirSync(path, opts)\n }\n\n const made = findMadeSync(opts, path)\n try {\n opts.mkdirSync(path, opts)\n return made\n } catch (er) {\n const fer = er as NodeJS.ErrnoException\n if (fer && fer.code === 'ENOENT') {\n return mkdirpManualSync(path, opts)\n } else {\n throw er\n }\n }\n}\n\nexport const mkdirpNative = Object.assign(\n async (\n path: string,\n options?: MkdirpOptions\n ): Promise => {\n const opts = { ...optsArg(options), recursive: true }\n const parent = dirname(path)\n if (parent === path) {\n return await opts.mkdirAsync(path, opts)\n }\n\n return findMade(opts, path).then((made?: string | undefined) =>\n opts\n .mkdirAsync(path, opts)\n .then(m => made || m)\n .catch(er => {\n const fer = er as NodeJS.ErrnoException\n if (fer && fer.code === 'ENOENT') {\n return mkdirpManual(path, opts)\n } else {\n throw er\n }\n })\n )\n },\n { sync: mkdirpNativeSync }\n)\n"]} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/opts-arg.d.ts b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/opts-arg.d.ts new file mode 100644 index 00000000000000..73d076b3b6923c --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/opts-arg.d.ts @@ -0,0 +1,42 @@ +/// +/// +import { MakeDirectoryOptions, Stats } from 'fs'; +export interface FsProvider { + stat?: (path: string, callback: (err: NodeJS.ErrnoException | null, stats: Stats) => any) => any; + mkdir?: (path: string, opts: MakeDirectoryOptions & { + recursive?: boolean; + }, callback: (err: NodeJS.ErrnoException | null, made?: string) => any) => any; + statSync?: (path: string) => Stats; + mkdirSync?: (path: string, opts: MakeDirectoryOptions & { + recursive?: boolean; + }) => string | undefined; +} +interface Options extends FsProvider { + mode?: number | string; + fs?: FsProvider; + mkdirAsync?: (path: string, opts: MakeDirectoryOptions & { + recursive?: boolean; + }) => Promise; + statAsync?: (path: string) => Promise; +} +export type MkdirpOptions = Options | number | string; +export interface MkdirpOptionsResolved { + mode: number; + fs: FsProvider; + mkdirAsync: (path: string, opts: MakeDirectoryOptions & { + recursive?: boolean; + }) => Promise; + statAsync: (path: string) => Promise; + stat: (path: string, callback: (err: NodeJS.ErrnoException | null, stats: Stats) => any) => any; + mkdir: (path: string, opts: MakeDirectoryOptions & { + recursive?: boolean; + }, callback: (err: NodeJS.ErrnoException | null, made?: string) => any) => any; + statSync: (path: string) => Stats; + mkdirSync: (path: string, opts: MakeDirectoryOptions & { + recursive?: boolean; + }) => string | undefined; + recursive?: boolean; +} +export declare const optsArg: (opts?: MkdirpOptions) => MkdirpOptionsResolved; +export {}; +//# sourceMappingURL=opts-arg.d.ts.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/opts-arg.d.ts.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/opts-arg.d.ts.map new file mode 100644 index 00000000000000..717deb5f9cb0c6 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/opts-arg.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"opts-arg.d.ts","sourceRoot":"","sources":["../../src/opts-arg.ts"],"names":[],"mappings":";;AAAA,OAAO,EACL,oBAAoB,EAIpB,KAAK,EAEN,MAAM,IAAI,CAAA;AAEX,MAAM,WAAW,UAAU;IACzB,IAAI,CAAC,EAAE,CACL,IAAI,EAAE,MAAM,EACZ,QAAQ,EAAE,CAAC,GAAG,EAAE,MAAM,CAAC,cAAc,GAAG,IAAI,EAAE,KAAK,EAAE,KAAK,KAAK,GAAG,KAC/D,GAAG,CAAA;IACR,KAAK,CAAC,EAAE,CACN,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,oBAAoB,GAAG;QAAE,SAAS,CAAC,EAAE,OAAO,CAAA;KAAE,EACpD,QAAQ,EAAE,CAAC,GAAG,EAAE,MAAM,CAAC,cAAc,GAAG,IAAI,EAAE,IAAI,CAAC,EAAE,MAAM,KAAK,GAAG,KAChE,GAAG,CAAA;IACR,QAAQ,CAAC,EAAE,CAAC,IAAI,EAAE,MAAM,KAAK,KAAK,CAAA;IAClC,SAAS,CAAC,EAAE,CACV,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,oBAAoB,GAAG;QAAE,SAAS,CAAC,EAAE,OAAO,CAAA;KAAE,KACjD,MAAM,GAAG,SAAS,CAAA;CACxB;AAED,UAAU,OAAQ,SAAQ,UAAU;IAClC,IAAI,CAAC,EAAE,MAAM,GAAG,MAAM,CAAA;IACtB,EAAE,CAAC,EAAE,UAAU,CAAA;IACf,UAAU,CAAC,EAAE,CACX,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,oBAAoB,GAAG;QAAE,SAAS,CAAC,EAAE,OAAO,CAAA;KAAE,KACjD,OAAO,CAAC,MAAM,GAAG,SAAS,CAAC,CAAA;IAChC,SAAS,CAAC,EAAE,CAAC,IAAI,EAAE,MAAM,KAAK,OAAO,CAAC,KAAK,CAAC,CAAA;CAC7C;AAED,MAAM,MAAM,aAAa,GAAG,OAAO,GAAG,MAAM,GAAG,MAAM,CAAA;AAErD,MAAM,WAAW,qBAAqB;IACpC,IAAI,EAAE,MAAM,CAAA;IACZ,EAAE,EAAE,UAAU,CAAA;IACd,UAAU,EAAE,CACV,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,oBAAoB,GAAG;QAAE,SAAS,CAAC,EAAE,OAAO,CAAA;KAAE,KACjD,OAAO,CAAC,MAAM,GAAG,SAAS,CAAC,CAAA;IAChC,SAAS,EAAE,CAAC,IAAI,EAAE,MAAM,KAAK,OAAO,CAAC,KAAK,CAAC,CAAA;IAC3C,IAAI,EAAE,CACJ,IAAI,EAAE,MAAM,EACZ,QAAQ,EAAE,CAAC,GAAG,EAAE,MAAM,CAAC,cAAc,GAAG,IAAI,EAAE,KAAK,EAAE,KAAK,KAAK,GAAG,KAC/D,GAAG,CAAA;IACR,KAAK,EAAE,CACL,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,oBAAoB,GAAG;QAAE,SAAS,CAAC,EAAE,OAAO,CAAA;KAAE,EACpD,QAAQ,EAAE,CAAC,GAAG,EAAE,MAAM,CAAC,cAAc,GAAG,IAAI,EAAE,IAAI,CAAC,EAAE,MAAM,KAAK,GAAG,KAChE,GAAG,CAAA;IACR,QAAQ,EAAE,CAAC,IAAI,EAAE,MAAM,KAAK,KAAK,CAAA;IACjC,SAAS,EAAE,CACT,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,oBAAoB,GAAG;QAAE,SAAS,CAAC,EAAE,OAAO,CAAA;KAAE,KACjD,MAAM,GAAG,SAAS,CAAA;IACvB,SAAS,CAAC,EAAE,OAAO,CAAA;CACpB;AAED,eAAO,MAAM,OAAO,UAAW,aAAa,KAAG,qBA2C9C,CAAA"} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/opts-arg.js b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/opts-arg.js new file mode 100644 index 00000000000000..d47e2927fee4c0 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/opts-arg.js @@ -0,0 +1,34 @@ +import { mkdir, mkdirSync, stat, statSync, } from 'fs'; +export const optsArg = (opts) => { + if (!opts) { + opts = { mode: 0o777 }; + } + else if (typeof opts === 'object') { + opts = { mode: 0o777, ...opts }; + } + else if (typeof opts === 'number') { + opts = { mode: opts }; + } + else if (typeof opts === 'string') { + opts = { mode: parseInt(opts, 8) }; + } + else { + throw new TypeError('invalid options argument'); + } + const resolved = opts; + const optsFs = opts.fs || {}; + opts.mkdir = opts.mkdir || optsFs.mkdir || mkdir; + opts.mkdirAsync = opts.mkdirAsync + ? opts.mkdirAsync + : async (path, options) => { + return new Promise((res, rej) => resolved.mkdir(path, options, (er, made) => er ? rej(er) : res(made))); + }; + opts.stat = opts.stat || optsFs.stat || stat; + opts.statAsync = opts.statAsync + ? opts.statAsync + : async (path) => new Promise((res, rej) => resolved.stat(path, (err, stats) => (err ? rej(err) : res(stats)))); + opts.statSync = opts.statSync || optsFs.statSync || statSync; + opts.mkdirSync = opts.mkdirSync || optsFs.mkdirSync || mkdirSync; + return resolved; +}; +//# sourceMappingURL=opts-arg.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/opts-arg.js.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/opts-arg.js.map new file mode 100644 index 00000000000000..663286dc7212ed --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/opts-arg.js.map @@ -0,0 +1 @@ +{"version":3,"file":"opts-arg.js","sourceRoot":"","sources":["../../src/opts-arg.ts"],"names":[],"mappings":"AAAA,OAAO,EAEL,KAAK,EACL,SAAS,EACT,IAAI,EAEJ,QAAQ,GACT,MAAM,IAAI,CAAA;AAwDX,MAAM,CAAC,MAAM,OAAO,GAAG,CAAC,IAAoB,EAAyB,EAAE;IACrE,IAAI,CAAC,IAAI,EAAE;QACT,IAAI,GAAG,EAAE,IAAI,EAAE,KAAK,EAAE,CAAA;KACvB;SAAM,IAAI,OAAO,IAAI,KAAK,QAAQ,EAAE;QACnC,IAAI,GAAG,EAAE,IAAI,EAAE,KAAK,EAAE,GAAG,IAAI,EAAE,CAAA;KAChC;SAAM,IAAI,OAAO,IAAI,KAAK,QAAQ,EAAE;QACnC,IAAI,GAAG,EAAE,IAAI,EAAE,IAAI,EAAE,CAAA;KACtB;SAAM,IAAI,OAAO,IAAI,KAAK,QAAQ,EAAE;QACnC,IAAI,GAAG,EAAE,IAAI,EAAE,QAAQ,CAAC,IAAI,EAAE,CAAC,CAAC,EAAE,CAAA;KACnC;SAAM;QACL,MAAM,IAAI,SAAS,CAAC,0BAA0B,CAAC,CAAA;KAChD;IAED,MAAM,QAAQ,GAAG,IAA6B,CAAA;IAC9C,MAAM,MAAM,GAAG,IAAI,CAAC,EAAE,IAAI,EAAE,CAAA;IAE5B,IAAI,CAAC,KAAK,GAAG,IAAI,CAAC,KAAK,IAAI,MAAM,CAAC,KAAK,IAAI,KAAK,CAAA;IAEhD,IAAI,CAAC,UAAU,GAAG,IAAI,CAAC,UAAU;QAC/B,CAAC,CAAC,IAAI,CAAC,UAAU;QACjB,CAAC,CAAC,KAAK,EACH,IAAY,EACZ,OAAuD,EAC1B,EAAE;YAC/B,OAAO,IAAI,OAAO,CAAqB,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE,CAClD,QAAQ,CAAC,KAAK,CAAC,IAAI,EAAE,OAAO,EAAE,CAAC,EAAE,EAAE,IAAI,EAAE,EAAE,CACzC,EAAE,CAAC,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,IAAI,CAAC,CACzB,CACF,CAAA;QACH,CAAC,CAAA;IAEL,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,IAAI,IAAI,MAAM,CAAC,IAAI,IAAI,IAAI,CAAA;IAC5C,IAAI,CAAC,SAAS,GAAG,IAAI,CAAC,SAAS;QAC7B,CAAC,CAAC,IAAI,CAAC,SAAS;QAChB,CAAC,CAAC,KAAK,EAAE,IAAY,EAAE,EAAE,CACrB,IAAI,OAAO,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE,CACvB,QAAQ,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,GAAG,EAAE,KAAK,EAAE,EAAE,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC,CAAC,CACnE,CAAA;IAEP,IAAI,CAAC,QAAQ,GAAG,IAAI,CAAC,QAAQ,IAAI,MAAM,CAAC,QAAQ,IAAI,QAAQ,CAAA;IAC5D,IAAI,CAAC,SAAS,GAAG,IAAI,CAAC,SAAS,IAAI,MAAM,CAAC,SAAS,IAAI,SAAS,CAAA;IAEhE,OAAO,QAAQ,CAAA;AACjB,CAAC,CAAA","sourcesContent":["import {\n MakeDirectoryOptions,\n mkdir,\n mkdirSync,\n stat,\n Stats,\n statSync,\n} from 'fs'\n\nexport interface FsProvider {\n stat?: (\n path: string,\n callback: (err: NodeJS.ErrnoException | null, stats: Stats) => any\n ) => any\n mkdir?: (\n path: string,\n opts: MakeDirectoryOptions & { recursive?: boolean },\n callback: (err: NodeJS.ErrnoException | null, made?: string) => any\n ) => any\n statSync?: (path: string) => Stats\n mkdirSync?: (\n path: string,\n opts: MakeDirectoryOptions & { recursive?: boolean }\n ) => string | undefined\n}\n\ninterface Options extends FsProvider {\n mode?: number | string\n fs?: FsProvider\n mkdirAsync?: (\n path: string,\n opts: MakeDirectoryOptions & { recursive?: boolean }\n ) => Promise\n statAsync?: (path: string) => Promise\n}\n\nexport type MkdirpOptions = Options | number | string\n\nexport interface MkdirpOptionsResolved {\n mode: number\n fs: FsProvider\n mkdirAsync: (\n path: string,\n opts: MakeDirectoryOptions & { recursive?: boolean }\n ) => Promise\n statAsync: (path: string) => Promise\n stat: (\n path: string,\n callback: (err: NodeJS.ErrnoException | null, stats: Stats) => any\n ) => any\n mkdir: (\n path: string,\n opts: MakeDirectoryOptions & { recursive?: boolean },\n callback: (err: NodeJS.ErrnoException | null, made?: string) => any\n ) => any\n statSync: (path: string) => Stats\n mkdirSync: (\n path: string,\n opts: MakeDirectoryOptions & { recursive?: boolean }\n ) => string | undefined\n recursive?: boolean\n}\n\nexport const optsArg = (opts?: MkdirpOptions): MkdirpOptionsResolved => {\n if (!opts) {\n opts = { mode: 0o777 }\n } else if (typeof opts === 'object') {\n opts = { mode: 0o777, ...opts }\n } else if (typeof opts === 'number') {\n opts = { mode: opts }\n } else if (typeof opts === 'string') {\n opts = { mode: parseInt(opts, 8) }\n } else {\n throw new TypeError('invalid options argument')\n }\n\n const resolved = opts as MkdirpOptionsResolved\n const optsFs = opts.fs || {}\n\n opts.mkdir = opts.mkdir || optsFs.mkdir || mkdir\n\n opts.mkdirAsync = opts.mkdirAsync\n ? opts.mkdirAsync\n : async (\n path: string,\n options: MakeDirectoryOptions & { recursive?: boolean }\n ): Promise => {\n return new Promise((res, rej) =>\n resolved.mkdir(path, options, (er, made) =>\n er ? rej(er) : res(made)\n )\n )\n }\n\n opts.stat = opts.stat || optsFs.stat || stat\n opts.statAsync = opts.statAsync\n ? opts.statAsync\n : async (path: string) =>\n new Promise((res, rej) =>\n resolved.stat(path, (err, stats) => (err ? rej(err) : res(stats)))\n )\n\n opts.statSync = opts.statSync || optsFs.statSync || statSync\n opts.mkdirSync = opts.mkdirSync || optsFs.mkdirSync || mkdirSync\n\n return resolved\n}\n"]} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/package.json b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/package.json new file mode 100644 index 00000000000000..3dbc1ca591c055 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/package.json @@ -0,0 +1,3 @@ +{ + "type": "module" +} diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/path-arg.d.ts b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/path-arg.d.ts new file mode 100644 index 00000000000000..ad0ccfc482a485 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/path-arg.d.ts @@ -0,0 +1,2 @@ +export declare const pathArg: (path: string) => string; +//# sourceMappingURL=path-arg.d.ts.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/path-arg.d.ts.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/path-arg.d.ts.map new file mode 100644 index 00000000000000..801799e766fabc --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/path-arg.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"path-arg.d.ts","sourceRoot":"","sources":["../../src/path-arg.ts"],"names":[],"mappings":"AAEA,eAAO,MAAM,OAAO,SAAU,MAAM,WAyBnC,CAAA"} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/path-arg.js b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/path-arg.js new file mode 100644 index 00000000000000..03539cc5a94f98 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/path-arg.js @@ -0,0 +1,24 @@ +const platform = process.env.__TESTING_MKDIRP_PLATFORM__ || process.platform; +import { parse, resolve } from 'path'; +export const pathArg = (path) => { + if (/\0/.test(path)) { + // simulate same failure that node raises + throw Object.assign(new TypeError('path must be a string without null bytes'), { + path, + code: 'ERR_INVALID_ARG_VALUE', + }); + } + path = resolve(path); + if (platform === 'win32') { + const badWinChars = /[*|"<>?:]/; + const { root } = parse(path); + if (badWinChars.test(path.substring(root.length))) { + throw Object.assign(new Error('Illegal characters in path.'), { + path, + code: 'EINVAL', + }); + } + } + return path; +}; +//# sourceMappingURL=path-arg.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/path-arg.js.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/path-arg.js.map new file mode 100644 index 00000000000000..43efe1e3a9976f --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/path-arg.js.map @@ -0,0 +1 @@ +{"version":3,"file":"path-arg.js","sourceRoot":"","sources":["../../src/path-arg.ts"],"names":[],"mappings":"AAAA,MAAM,QAAQ,GAAG,OAAO,CAAC,GAAG,CAAC,2BAA2B,IAAI,OAAO,CAAC,QAAQ,CAAA;AAC5E,OAAO,EAAE,KAAK,EAAE,OAAO,EAAE,MAAM,MAAM,CAAA;AACrC,MAAM,CAAC,MAAM,OAAO,GAAG,CAAC,IAAY,EAAE,EAAE;IACtC,IAAI,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE;QACnB,yCAAyC;QACzC,MAAM,MAAM,CAAC,MAAM,CACjB,IAAI,SAAS,CAAC,0CAA0C,CAAC,EACzD;YACE,IAAI;YACJ,IAAI,EAAE,uBAAuB;SAC9B,CACF,CAAA;KACF;IAED,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC,CAAA;IACpB,IAAI,QAAQ,KAAK,OAAO,EAAE;QACxB,MAAM,WAAW,GAAG,WAAW,CAAA;QAC/B,MAAM,EAAE,IAAI,EAAE,GAAG,KAAK,CAAC,IAAI,CAAC,CAAA;QAC5B,IAAI,WAAW,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,EAAE;YACjD,MAAM,MAAM,CAAC,MAAM,CAAC,IAAI,KAAK,CAAC,6BAA6B,CAAC,EAAE;gBAC5D,IAAI;gBACJ,IAAI,EAAE,QAAQ;aACf,CAAC,CAAA;SACH;KACF;IAED,OAAO,IAAI,CAAA;AACb,CAAC,CAAA","sourcesContent":["const platform = process.env.__TESTING_MKDIRP_PLATFORM__ || process.platform\nimport { parse, resolve } from 'path'\nexport const pathArg = (path: string) => {\n if (/\\0/.test(path)) {\n // simulate same failure that node raises\n throw Object.assign(\n new TypeError('path must be a string without null bytes'),\n {\n path,\n code: 'ERR_INVALID_ARG_VALUE',\n }\n )\n }\n\n path = resolve(path)\n if (platform === 'win32') {\n const badWinChars = /[*|\"<>?:]/\n const { root } = parse(path)\n if (badWinChars.test(path.substring(root.length))) {\n throw Object.assign(new Error('Illegal characters in path.'), {\n path,\n code: 'EINVAL',\n })\n }\n }\n\n return path\n}\n"]} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/use-native.d.ts b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/use-native.d.ts new file mode 100644 index 00000000000000..1c6cb619e30405 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/use-native.d.ts @@ -0,0 +1,6 @@ +import { MkdirpOptions } from './opts-arg.js'; +export declare const useNativeSync: (opts?: MkdirpOptions) => boolean; +export declare const useNative: ((opts?: MkdirpOptions) => boolean) & { + sync: (opts?: MkdirpOptions) => boolean; +}; +//# sourceMappingURL=use-native.d.ts.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/use-native.d.ts.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/use-native.d.ts.map new file mode 100644 index 00000000000000..e2484228a04472 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/use-native.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"use-native.d.ts","sourceRoot":"","sources":["../../src/use-native.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,aAAa,EAAW,MAAM,eAAe,CAAA;AAMtD,eAAO,MAAM,aAAa,UAEd,aAAa,YAA0C,CAAA;AAEnE,eAAO,MAAM,SAAS,WAGR,aAAa;kBALf,aAAa;CASxB,CAAA"} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/use-native.js b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/use-native.js new file mode 100644 index 00000000000000..ad2093867eb74e --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/use-native.js @@ -0,0 +1,14 @@ +import { mkdir, mkdirSync } from 'fs'; +import { optsArg } from './opts-arg.js'; +const version = process.env.__TESTING_MKDIRP_NODE_VERSION__ || process.version; +const versArr = version.replace(/^v/, '').split('.'); +const hasNative = +versArr[0] > 10 || (+versArr[0] === 10 && +versArr[1] >= 12); +export const useNativeSync = !hasNative + ? () => false + : (opts) => optsArg(opts).mkdirSync === mkdirSync; +export const useNative = Object.assign(!hasNative + ? () => false + : (opts) => optsArg(opts).mkdir === mkdir, { + sync: useNativeSync, +}); +//# sourceMappingURL=use-native.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/use-native.js.map b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/use-native.js.map new file mode 100644 index 00000000000000..08c616d365510f --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/dist/mjs/use-native.js.map @@ -0,0 +1 @@ +{"version":3,"file":"use-native.js","sourceRoot":"","sources":["../../src/use-native.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,KAAK,EAAE,SAAS,EAAE,MAAM,IAAI,CAAA;AACrC,OAAO,EAAiB,OAAO,EAAE,MAAM,eAAe,CAAA;AAEtD,MAAM,OAAO,GAAG,OAAO,CAAC,GAAG,CAAC,+BAA+B,IAAI,OAAO,CAAC,OAAO,CAAA;AAC9E,MAAM,OAAO,GAAG,OAAO,CAAC,OAAO,CAAC,IAAI,EAAE,EAAE,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAA;AACpD,MAAM,SAAS,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG,EAAE,IAAI,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,KAAK,EAAE,IAAI,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC,CAAA;AAE/E,MAAM,CAAC,MAAM,aAAa,GAAG,CAAC,SAAS;IACrC,CAAC,CAAC,GAAG,EAAE,CAAC,KAAK;IACb,CAAC,CAAC,CAAC,IAAoB,EAAE,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,SAAS,KAAK,SAAS,CAAA;AAEnE,MAAM,CAAC,MAAM,SAAS,GAAG,MAAM,CAAC,MAAM,CACpC,CAAC,SAAS;IACR,CAAC,CAAC,GAAG,EAAE,CAAC,KAAK;IACb,CAAC,CAAC,CAAC,IAAoB,EAAE,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,KAAK,KAAK,KAAK,EAC3D;IACE,IAAI,EAAE,aAAa;CACpB,CACF,CAAA","sourcesContent":["import { mkdir, mkdirSync } from 'fs'\nimport { MkdirpOptions, optsArg } from './opts-arg.js'\n\nconst version = process.env.__TESTING_MKDIRP_NODE_VERSION__ || process.version\nconst versArr = version.replace(/^v/, '').split('.')\nconst hasNative = +versArr[0] > 10 || (+versArr[0] === 10 && +versArr[1] >= 12)\n\nexport const useNativeSync = !hasNative\n ? () => false\n : (opts?: MkdirpOptions) => optsArg(opts).mkdirSync === mkdirSync\n\nexport const useNative = Object.assign(\n !hasNative\n ? () => false\n : (opts?: MkdirpOptions) => optsArg(opts).mkdir === mkdir,\n {\n sync: useNativeSync,\n }\n)\n"]} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/package.json b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/package.json new file mode 100644 index 00000000000000..f31ac3314d6f6a --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/package.json @@ -0,0 +1,91 @@ +{ + "name": "mkdirp", + "description": "Recursively mkdir, like `mkdir -p`", + "version": "3.0.1", + "keywords": [ + "mkdir", + "directory", + "make dir", + "make", + "dir", + "recursive", + "native" + ], + "bin": "./dist/cjs/src/bin.js", + "main": "./dist/cjs/src/index.js", + "module": "./dist/mjs/index.js", + "types": "./dist/mjs/index.d.ts", + "exports": { + ".": { + "import": { + "types": "./dist/mjs/index.d.ts", + "default": "./dist/mjs/index.js" + }, + "require": { + "types": "./dist/cjs/src/index.d.ts", + "default": "./dist/cjs/src/index.js" + } + } + }, + "files": [ + "dist" + ], + "scripts": { + "preversion": "npm test", + "postversion": "npm publish", + "prepublishOnly": "git push origin --follow-tags", + "preprepare": "rm -rf dist", + "prepare": "tsc -p tsconfig.json && tsc -p tsconfig-esm.json", + "postprepare": "bash fixup.sh", + "pretest": "npm run prepare", + "presnap": "npm run prepare", + "test": "c8 tap", + "snap": "c8 tap", + "format": "prettier --write . --loglevel warn", + "benchmark": "node benchmark/index.js", + "typedoc": "typedoc --tsconfig tsconfig-esm.json ./src/*.ts" + }, + "prettier": { + "semi": false, + "printWidth": 80, + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "jsxSingleQuote": false, + "bracketSameLine": true, + "arrowParens": "avoid", + "endOfLine": "lf" + }, + "devDependencies": { + "@types/brace-expansion": "^1.1.0", + "@types/node": "^18.11.9", + "@types/tap": "^15.0.7", + "c8": "^7.12.0", + "eslint-config-prettier": "^8.6.0", + "prettier": "^2.8.2", + "tap": "^16.3.3", + "ts-node": "^10.9.1", + "typedoc": "^0.23.21", + "typescript": "^4.9.3" + }, + "tap": { + "coverage": false, + "node-arg": [ + "--no-warnings", + "--loader", + "ts-node/esm" + ], + "ts": false + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "repository": { + "type": "git", + "url": "https://github.com/isaacs/node-mkdirp.git" + }, + "license": "MIT", + "engines": { + "node": ">=10" + } +} diff --git a/deps/npm/node_modules/node-gyp/node_modules/mkdirp/readme.markdown b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/readme.markdown new file mode 100644 index 00000000000000..df654b808755f5 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/mkdirp/readme.markdown @@ -0,0 +1,281 @@ +# mkdirp + +Like `mkdir -p`, but in Node.js! + +Now with a modern API and no\* bugs! + +\* may contain some bugs + +# example + +## pow.js + +```js +// hybrid module, import or require() both work +import { mkdirp } from 'mkdirp' +// or: +const { mkdirp } = require('mkdirp') + +// return value is a Promise resolving to the first directory created +mkdirp('/tmp/foo/bar/baz').then(made => + console.log(`made directories, starting with ${made}`) +) +``` + +Output (where `/tmp/foo` already exists) + +``` +made directories, starting with /tmp/foo/bar +``` + +Or, if you don't have time to wait around for promises: + +```js +import { mkdirp } from 'mkdirp' + +// return value is the first directory created +const made = mkdirp.sync('/tmp/foo/bar/baz') +console.log(`made directories, starting with ${made}`) +``` + +And now /tmp/foo/bar/baz exists, huzzah! + +# methods + +```js +import { mkdirp } from 'mkdirp' +``` + +## `mkdirp(dir: string, opts?: MkdirpOptions) => Promise` + +Create a new directory and any necessary subdirectories at `dir` +with octal permission string `opts.mode`. If `opts` is a string +or number, it will be treated as the `opts.mode`. + +If `opts.mode` isn't specified, it defaults to `0o777`. + +Promise resolves to first directory `made` that had to be +created, or `undefined` if everything already exists. Promise +rejects if any errors are encountered. Note that, in the case of +promise rejection, some directories _may_ have been created, as +recursive directory creation is not an atomic operation. + +You can optionally pass in an alternate `fs` implementation by +passing in `opts.fs`. Your implementation should have +`opts.fs.mkdir(path, opts, cb)` and `opts.fs.stat(path, cb)`. + +You can also override just one or the other of `mkdir` and `stat` +by passing in `opts.stat` or `opts.mkdir`, or providing an `fs` +option that only overrides one of these. + +## `mkdirp.sync(dir: string, opts: MkdirpOptions) => string|undefined` + +Synchronously create a new directory and any necessary +subdirectories at `dir` with octal permission string `opts.mode`. +If `opts` is a string or number, it will be treated as the +`opts.mode`. + +If `opts.mode` isn't specified, it defaults to `0o777`. + +Returns the first directory that had to be created, or undefined +if everything already exists. + +You can optionally pass in an alternate `fs` implementation by +passing in `opts.fs`. Your implementation should have +`opts.fs.mkdirSync(path, mode)` and `opts.fs.statSync(path)`. + +You can also override just one or the other of `mkdirSync` and +`statSync` by passing in `opts.statSync` or `opts.mkdirSync`, or +providing an `fs` option that only overrides one of these. + +## `mkdirp.manual`, `mkdirp.manualSync` + +Use the manual implementation (not the native one). This is the +default when the native implementation is not available or the +stat/mkdir implementation is overridden. + +## `mkdirp.native`, `mkdirp.nativeSync` + +Use the native implementation (not the manual one). This is the +default when the native implementation is available and +stat/mkdir are not overridden. + +# implementation + +On Node.js v10.12.0 and above, use the native `fs.mkdir(p, +{recursive:true})` option, unless `fs.mkdir`/`fs.mkdirSync` has +been overridden by an option. + +## native implementation + +- If the path is a root directory, then pass it to the underlying + implementation and return the result/error. (In this case, + it'll either succeed or fail, but we aren't actually creating + any dirs.) +- Walk up the path statting each directory, to find the first + path that will be created, `made`. +- Call `fs.mkdir(path, { recursive: true })` (or `fs.mkdirSync`) +- If error, raise it to the caller. +- Return `made`. + +## manual implementation + +- Call underlying `fs.mkdir` implementation, with `recursive: +false` +- If error: + - If path is a root directory, raise to the caller and do not + handle it + - If ENOENT, mkdirp parent dir, store result as `made` + - stat(path) + - If error, raise original `mkdir` error + - If directory, return `made` + - Else, raise original `mkdir` error +- else + - return `undefined` if a root dir, or `made` if set, or `path` + +## windows vs unix caveat + +On Windows file systems, attempts to create a root directory (ie, +a drive letter or root UNC path) will fail. If the root +directory exists, then it will fail with `EPERM`. If the root +directory does not exist, then it will fail with `ENOENT`. + +On posix file systems, attempts to create a root directory (in +recursive mode) will succeed silently, as it is treated like just +another directory that already exists. (In non-recursive mode, +of course, it fails with `EEXIST`.) + +In order to preserve this system-specific behavior (and because +it's not as if we can create the parent of a root directory +anyway), attempts to create a root directory are passed directly +to the `fs` implementation, and any errors encountered are not +handled. + +## native error caveat + +The native implementation (as of at least Node.js v13.4.0) does +not provide appropriate errors in some cases (see +[nodejs/node#31481](https://github.com/nodejs/node/issues/31481) +and +[nodejs/node#28015](https://github.com/nodejs/node/issues/28015)). + +In order to work around this issue, the native implementation +will fall back to the manual implementation if an `ENOENT` error +is encountered. + +# choosing a recursive mkdir implementation + +There are a few to choose from! Use the one that suits your +needs best :D + +## use `fs.mkdir(path, {recursive: true}, cb)` if: + +- You wish to optimize performance even at the expense of other + factors. +- You don't need to know the first dir created. +- You are ok with getting `ENOENT` as the error when some other + problem is the actual cause. +- You can limit your platforms to Node.js v10.12 and above. +- You're ok with using callbacks instead of promises. +- You don't need/want a CLI. +- You don't need to override the `fs` methods in use. + +## use this module (mkdirp 1.x or 2.x) if: + +- You need to know the first directory that was created. +- You wish to use the native implementation if available, but + fall back when it's not. +- You prefer promise-returning APIs to callback-taking APIs. +- You want more useful error messages than the native recursive + mkdir provides (at least as of Node.js v13.4), and are ok with + re-trying on `ENOENT` to achieve this. +- You need (or at least, are ok with) a CLI. +- You need to override the `fs` methods in use. + +## use [`make-dir`](http://npm.im/make-dir) if: + +- You do not need to know the first dir created (and wish to save + a few `stat` calls when using the native implementation for + this reason). +- You wish to use the native implementation if available, but + fall back when it's not. +- You prefer promise-returning APIs to callback-taking APIs. +- You are ok with occasionally getting `ENOENT` errors for + failures that are actually related to something other than a + missing file system entry. +- You don't need/want a CLI. +- You need to override the `fs` methods in use. + +## use mkdirp 0.x if: + +- You need to know the first directory that was created. +- You need (or at least, are ok with) a CLI. +- You need to override the `fs` methods in use. +- You're ok with using callbacks instead of promises. +- You are not running on Windows, where the root-level ENOENT + errors can lead to infinite regress. +- You think vinyl just sounds warmer and richer for some weird + reason. +- You are supporting truly ancient Node.js versions, before even + the advent of a `Promise` language primitive. (Please don't. + You deserve better.) + +# cli + +This package also ships with a `mkdirp` command. + +``` +$ mkdirp -h + +usage: mkdirp [DIR1,DIR2..] {OPTIONS} + + Create each supplied directory including any necessary parent directories + that don't yet exist. + + If the directory already exists, do nothing. + +OPTIONS are: + + -m If a directory needs to be created, set the mode as an octal + --mode= permission string. + + -v --version Print the mkdirp version number + + -h --help Print this helpful banner + + -p --print Print the first directories created for each path provided + + --manual Use manual implementation, even if native is available +``` + +# install + +With [npm](http://npmjs.org) do: + +``` +npm install mkdirp +``` + +to get the library locally, or + +``` +npm install -g mkdirp +``` + +to get the command everywhere, or + +``` +npx mkdirp ... +``` + +to run the command without installing it globally. + +# platform support + +This module works on node v8, but only v10 and above are officially +supported, as Node v8 reached its LTS end of life 2020-01-01, which is in +the past, as of this writing. + +# license + +MIT diff --git a/deps/npm/node_modules/node-gyp/node_modules/nopt/README.md b/deps/npm/node_modules/node-gyp/node_modules/nopt/README.md deleted file mode 100644 index a99531c04655fe..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/nopt/README.md +++ /dev/null @@ -1,213 +0,0 @@ -If you want to write an option parser, and have it be good, there are -two ways to do it. The Right Way, and the Wrong Way. - -The Wrong Way is to sit down and write an option parser. We've all done -that. - -The Right Way is to write some complex configurable program with so many -options that you hit the limit of your frustration just trying to -manage them all, and defer it with duct-tape solutions until you see -exactly to the core of the problem, and finally snap and write an -awesome option parser. - -If you want to write an option parser, don't write an option parser. -Write a package manager, or a source control system, or a service -restarter, or an operating system. You probably won't end up with a -good one of those, but if you don't give up, and you are relentless and -diligent enough in your procrastination, you may just end up with a very -nice option parser. - -## USAGE - -```javascript -// my-program.js -var nopt = require("nopt") - , Stream = require("stream").Stream - , path = require("path") - , knownOpts = { "foo" : [String, null] - , "bar" : [Stream, Number] - , "baz" : path - , "bloo" : [ "big", "medium", "small" ] - , "flag" : Boolean - , "pick" : Boolean - , "many1" : [String, Array] - , "many2" : [path, Array] - } - , shortHands = { "foofoo" : ["--foo", "Mr. Foo"] - , "b7" : ["--bar", "7"] - , "m" : ["--bloo", "medium"] - , "p" : ["--pick"] - , "f" : ["--flag"] - } - // everything is optional. - // knownOpts and shorthands default to {} - // arg list defaults to process.argv - // slice defaults to 2 - , parsed = nopt(knownOpts, shortHands, process.argv, 2) -console.log(parsed) -``` - -This would give you support for any of the following: - -```console -$ node my-program.js --foo "blerp" --no-flag -{ "foo" : "blerp", "flag" : false } - -$ node my-program.js ---bar 7 --foo "Mr. Hand" --flag -{ bar: 7, foo: "Mr. Hand", flag: true } - -$ node my-program.js --foo "blerp" -f -----p -{ foo: "blerp", flag: true, pick: true } - -$ node my-program.js -fp --foofoo -{ foo: "Mr. Foo", flag: true, pick: true } - -$ node my-program.js --foofoo -- -fp # -- stops the flag parsing. -{ foo: "Mr. Foo", argv: { remain: ["-fp"] } } - -$ node my-program.js --blatzk -fp # unknown opts are ok. -{ blatzk: true, flag: true, pick: true } - -$ node my-program.js --blatzk=1000 -fp # but you need to use = if they have a value -{ blatzk: 1000, flag: true, pick: true } - -$ node my-program.js --no-blatzk -fp # unless they start with "no-" -{ blatzk: false, flag: true, pick: true } - -$ node my-program.js --baz b/a/z # known paths are resolved. -{ baz: "/Users/isaacs/b/a/z" } - -# if Array is one of the types, then it can take many -# values, and will always be an array. The other types provided -# specify what types are allowed in the list. - -$ node my-program.js --many1 5 --many1 null --many1 foo -{ many1: ["5", "null", "foo"] } - -$ node my-program.js --many2 foo --many2 bar -{ many2: ["/path/to/foo", "path/to/bar"] } -``` - -Read the tests at the bottom of `lib/nopt.js` for more examples of -what this puppy can do. - -## Types - -The following types are supported, and defined on `nopt.typeDefs` - -* String: A normal string. No parsing is done. -* path: A file system path. Gets resolved against cwd if not absolute. -* url: A url. If it doesn't parse, it isn't accepted. -* Number: Must be numeric. -* Date: Must parse as a date. If it does, and `Date` is one of the options, - then it will return a Date object, not a string. -* Boolean: Must be either `true` or `false`. If an option is a boolean, - then it does not need a value, and its presence will imply `true` as - the value. To negate boolean flags, do `--no-whatever` or `--whatever - false` -* NaN: Means that the option is strictly not allowed. Any value will - fail. -* Stream: An object matching the "Stream" class in node. Valuable - for use when validating programmatically. (npm uses this to let you - supply any WriteStream on the `outfd` and `logfd` config options.) -* Array: If `Array` is specified as one of the types, then the value - will be parsed as a list of options. This means that multiple values - can be specified, and that the value will always be an array. - -If a type is an array of values not on this list, then those are -considered valid values. For instance, in the example above, the -`--bloo` option can only be one of `"big"`, `"medium"`, or `"small"`, -and any other value will be rejected. - -When parsing unknown fields, `"true"`, `"false"`, and `"null"` will be -interpreted as their JavaScript equivalents. - -You can also mix types and values, or multiple types, in a list. For -instance `{ blah: [Number, null] }` would allow a value to be set to -either a Number or null. When types are ordered, this implies a -preference, and the first type that can be used to properly interpret -the value will be used. - -To define a new type, add it to `nopt.typeDefs`. Each item in that -hash is an object with a `type` member and a `validate` method. The -`type` member is an object that matches what goes in the type list. The -`validate` method is a function that gets called with `validate(data, -key, val)`. Validate methods should assign `data[key]` to the valid -value of `val` if it can be handled properly, or return boolean -`false` if it cannot. - -You can also call `nopt.clean(data, types, typeDefs)` to clean up a -config object and remove its invalid properties. - -## Error Handling - -By default, nopt outputs a warning to standard error when invalid values for -known options are found. You can change this behavior by assigning a method -to `nopt.invalidHandler`. This method will be called with -the offending `nopt.invalidHandler(key, val, types)`. - -If no `nopt.invalidHandler` is assigned, then it will console.error -its whining. If it is assigned to boolean `false` then the warning is -suppressed. - -## Abbreviations - -Yes, they are supported. If you define options like this: - -```javascript -{ "foolhardyelephants" : Boolean -, "pileofmonkeys" : Boolean } -``` - -Then this will work: - -```bash -node program.js --foolhar --pil -node program.js --no-f --pileofmon -# etc. -``` - -## Shorthands - -Shorthands are a hash of shorter option names to a snippet of args that -they expand to. - -If multiple one-character shorthands are all combined, and the -combination does not unambiguously match any other option or shorthand, -then they will be broken up into their constituent parts. For example: - -```json -{ "s" : ["--loglevel", "silent"] -, "g" : "--global" -, "f" : "--force" -, "p" : "--parseable" -, "l" : "--long" -} -``` - -```bash -npm ls -sgflp -# just like doing this: -npm ls --loglevel silent --global --force --long --parseable -``` - -## The Rest of the args - -The config object returned by nopt is given a special member called -`argv`, which is an object with the following fields: - -* `remain`: The remaining args after all the parsing has occurred. -* `original`: The args as they originally appeared. -* `cooked`: The args after flags and shorthands are expanded. - -## Slicing - -Node programs are called with more or less the exact argv as it appears -in C land, after the v8 and node-specific options have been plucked off. -As such, `argv[0]` is always `node` and `argv[1]` is always the -JavaScript program being run. - -That's usually not very useful to you. So they're sliced off by -default. If you want them, then you can pass in `0` as the last -argument, or any other number that you'd like to slice off the start of -the list. diff --git a/deps/npm/node_modules/node-gyp/node_modules/nopt/bin/nopt.js b/deps/npm/node_modules/node-gyp/node_modules/nopt/bin/nopt.js deleted file mode 100755 index 6ed2082064b5ea..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/nopt/bin/nopt.js +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env node -const nopt = require('../lib/nopt') -const path = require('path') -console.log('parsed', nopt({ - num: Number, - bool: Boolean, - help: Boolean, - list: Array, - 'num-list': [Number, Array], - 'str-list': [String, Array], - 'bool-list': [Boolean, Array], - str: String, - clear: Boolean, - config: Boolean, - length: Number, - file: path, -}, { - s: ['--str', 'astring'], - b: ['--bool'], - nb: ['--no-bool'], - tft: ['--bool-list', '--no-bool-list', '--bool-list', 'true'], - '?': ['--help'], - h: ['--help'], - H: ['--help'], - n: ['--num', '125'], - c: ['--config'], - l: ['--length'], - f: ['--file'], -}, process.argv, 2)) diff --git a/deps/npm/node_modules/node-gyp/node_modules/nopt/lib/debug.js b/deps/npm/node_modules/node-gyp/node_modules/nopt/lib/debug.js deleted file mode 100644 index 544ab382ca85c0..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/nopt/lib/debug.js +++ /dev/null @@ -1,5 +0,0 @@ -/* istanbul ignore next */ -module.exports = process.env.DEBUG_NOPT || process.env.NOPT_DEBUG - // eslint-disable-next-line no-console - ? (...a) => console.error(...a) - : () => {} diff --git a/deps/npm/node_modules/node-gyp/node_modules/nopt/lib/nopt-lib.js b/deps/npm/node_modules/node-gyp/node_modules/nopt/lib/nopt-lib.js deleted file mode 100644 index d3d1de0255ba9b..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/nopt/lib/nopt-lib.js +++ /dev/null @@ -1,479 +0,0 @@ -const abbrev = require('abbrev') -const debug = require('./debug') -const defaultTypeDefs = require('./type-defs') - -const hasOwn = (o, k) => Object.prototype.hasOwnProperty.call(o, k) - -const getType = (k, { types, dynamicTypes }) => { - let hasType = hasOwn(types, k) - let type = types[k] - if (!hasType && typeof dynamicTypes === 'function') { - const matchedType = dynamicTypes(k) - if (matchedType !== undefined) { - type = matchedType - hasType = true - } - } - return [hasType, type] -} - -const isTypeDef = (type, def) => def && type === def -const hasTypeDef = (type, def) => def && type.indexOf(def) !== -1 -const doesNotHaveTypeDef = (type, def) => def && !hasTypeDef(type, def) - -function nopt (args, { - types, - shorthands, - typeDefs, - invalidHandler, - typeDefault, - dynamicTypes, -} = {}) { - debug(types, shorthands, args, typeDefs) - - const data = {} - const argv = { - remain: [], - cooked: args, - original: args.slice(0), - } - - parse(args, data, argv.remain, { typeDefs, types, dynamicTypes, shorthands }) - - // now data is full - clean(data, { types, dynamicTypes, typeDefs, invalidHandler, typeDefault }) - data.argv = argv - - Object.defineProperty(data.argv, 'toString', { - value: function () { - return this.original.map(JSON.stringify).join(' ') - }, - enumerable: false, - }) - - return data -} - -function clean (data, { - types = {}, - typeDefs = {}, - dynamicTypes, - invalidHandler, - typeDefault, -} = {}) { - const StringType = typeDefs.String?.type - const NumberType = typeDefs.Number?.type - const ArrayType = typeDefs.Array?.type - const BooleanType = typeDefs.Boolean?.type - const DateType = typeDefs.Date?.type - - const hasTypeDefault = typeof typeDefault !== 'undefined' - if (!hasTypeDefault) { - typeDefault = [false, true, null] - if (StringType) { - typeDefault.push(StringType) - } - if (ArrayType) { - typeDefault.push(ArrayType) - } - } - - const remove = {} - - Object.keys(data).forEach((k) => { - if (k === 'argv') { - return - } - let val = data[k] - debug('val=%j', val) - const isArray = Array.isArray(val) - let [hasType, rawType] = getType(k, { types, dynamicTypes }) - let type = rawType - if (!isArray) { - val = [val] - } - if (!type) { - type = typeDefault - } - if (isTypeDef(type, ArrayType)) { - type = typeDefault.concat(ArrayType) - } - if (!Array.isArray(type)) { - type = [type] - } - - debug('val=%j', val) - debug('types=', type) - val = val.map((v) => { - // if it's an unknown value, then parse false/true/null/numbers/dates - if (typeof v === 'string') { - debug('string %j', v) - v = v.trim() - if ((v === 'null' && ~type.indexOf(null)) - || (v === 'true' && - (~type.indexOf(true) || hasTypeDef(type, BooleanType))) - || (v === 'false' && - (~type.indexOf(false) || hasTypeDef(type, BooleanType)))) { - v = JSON.parse(v) - debug('jsonable %j', v) - } else if (hasTypeDef(type, NumberType) && !isNaN(v)) { - debug('convert to number', v) - v = +v - } else if (hasTypeDef(type, DateType) && !isNaN(Date.parse(v))) { - debug('convert to date', v) - v = new Date(v) - } - } - - if (!hasType) { - if (!hasTypeDefault) { - return v - } - // if the default type has been passed in then we want to validate the - // unknown data key instead of bailing out earlier. we also set the raw - // type which is passed to the invalid handler so that it can be - // determined if during validation if it is unknown vs invalid - rawType = typeDefault - } - - // allow `--no-blah` to set 'blah' to null if null is allowed - if (v === false && ~type.indexOf(null) && - !(~type.indexOf(false) || hasTypeDef(type, BooleanType))) { - v = null - } - - const d = {} - d[k] = v - debug('prevalidated val', d, v, rawType) - if (!validate(d, k, v, rawType, { typeDefs })) { - if (invalidHandler) { - invalidHandler(k, v, rawType, data) - } else if (invalidHandler !== false) { - debug('invalid: ' + k + '=' + v, rawType) - } - return remove - } - debug('validated v', d, v, rawType) - return d[k] - }).filter((v) => v !== remove) - - // if we allow Array specifically, then an empty array is how we - // express 'no value here', not null. Allow it. - if (!val.length && doesNotHaveTypeDef(type, ArrayType)) { - debug('VAL HAS NO LENGTH, DELETE IT', val, k, type.indexOf(ArrayType)) - delete data[k] - } else if (isArray) { - debug(isArray, data[k], val) - data[k] = val - } else { - data[k] = val[0] - } - - debug('k=%s val=%j', k, val, data[k]) - }) -} - -function validate (data, k, val, type, { typeDefs } = {}) { - const ArrayType = typeDefs?.Array?.type - // arrays are lists of types. - if (Array.isArray(type)) { - for (let i = 0, l = type.length; i < l; i++) { - if (isTypeDef(type[i], ArrayType)) { - continue - } - if (validate(data, k, val, type[i], { typeDefs })) { - return true - } - } - delete data[k] - return false - } - - // an array of anything? - if (isTypeDef(type, ArrayType)) { - return true - } - - // Original comment: - // NaN is poisonous. Means that something is not allowed. - // New comment: Changing this to an isNaN check breaks a lot of tests. - // Something is being assumed here that is not actually what happens in - // practice. Fixing it is outside the scope of getting linting to pass in - // this repo. Leaving as-is for now. - /* eslint-disable-next-line no-self-compare */ - if (type !== type) { - debug('Poison NaN', k, val, type) - delete data[k] - return false - } - - // explicit list of values - if (val === type) { - debug('Explicitly allowed %j', val) - data[k] = val - return true - } - - // now go through the list of typeDefs, validate against each one. - let ok = false - const types = Object.keys(typeDefs) - for (let i = 0, l = types.length; i < l; i++) { - debug('test type %j %j %j', k, val, types[i]) - const t = typeDefs[types[i]] - if (t && ( - (type && type.name && t.type && t.type.name) ? - (type.name === t.type.name) : - (type === t.type) - )) { - const d = {} - ok = t.validate(d, k, val) !== false - val = d[k] - if (ok) { - data[k] = val - break - } - } - } - debug('OK? %j (%j %j %j)', ok, k, val, types[types.length - 1]) - - if (!ok) { - delete data[k] - } - return ok -} - -function parse (args, data, remain, { - types = {}, - typeDefs = {}, - shorthands = {}, - dynamicTypes, -} = {}) { - const StringType = typeDefs.String?.type - const NumberType = typeDefs.Number?.type - const ArrayType = typeDefs.Array?.type - const BooleanType = typeDefs.Boolean?.type - - debug('parse', args, data, remain) - - const abbrevs = abbrev(Object.keys(types)) - debug('abbrevs=%j', abbrevs) - const shortAbbr = abbrev(Object.keys(shorthands)) - - for (let i = 0; i < args.length; i++) { - let arg = args[i] - debug('arg', arg) - - if (arg.match(/^-{2,}$/)) { - // done with keys. - // the rest are args. - remain.push.apply(remain, args.slice(i + 1)) - args[i] = '--' - break - } - let hadEq = false - if (arg.charAt(0) === '-' && arg.length > 1) { - const at = arg.indexOf('=') - if (at > -1) { - hadEq = true - const v = arg.slice(at + 1) - arg = arg.slice(0, at) - args.splice(i, 1, arg, v) - } - - // see if it's a shorthand - // if so, splice and back up to re-parse it. - const shRes = resolveShort(arg, shortAbbr, abbrevs, { shorthands }) - debug('arg=%j shRes=%j', arg, shRes) - if (shRes) { - args.splice.apply(args, [i, 1].concat(shRes)) - if (arg !== shRes[0]) { - i-- - continue - } - } - arg = arg.replace(/^-+/, '') - let no = null - while (arg.toLowerCase().indexOf('no-') === 0) { - no = !no - arg = arg.slice(3) - } - - if (abbrevs[arg]) { - arg = abbrevs[arg] - } - - let [hasType, argType] = getType(arg, { types, dynamicTypes }) - let isTypeArray = Array.isArray(argType) - if (isTypeArray && argType.length === 1) { - isTypeArray = false - argType = argType[0] - } - - let isArray = isTypeDef(argType, ArrayType) || - isTypeArray && hasTypeDef(argType, ArrayType) - - // allow unknown things to be arrays if specified multiple times. - if (!hasType && hasOwn(data, arg)) { - if (!Array.isArray(data[arg])) { - data[arg] = [data[arg]] - } - isArray = true - } - - let val - let la = args[i + 1] - - const isBool = typeof no === 'boolean' || - isTypeDef(argType, BooleanType) || - isTypeArray && hasTypeDef(argType, BooleanType) || - (typeof argType === 'undefined' && !hadEq) || - (la === 'false' && - (argType === null || - isTypeArray && ~argType.indexOf(null))) - - if (isBool) { - // just set and move along - val = !no - // however, also support --bool true or --bool false - if (la === 'true' || la === 'false') { - val = JSON.parse(la) - la = null - if (no) { - val = !val - } - i++ - } - - // also support "foo":[Boolean, "bar"] and "--foo bar" - if (isTypeArray && la) { - if (~argType.indexOf(la)) { - // an explicit type - val = la - i++ - } else if (la === 'null' && ~argType.indexOf(null)) { - // null allowed - val = null - i++ - } else if (!la.match(/^-{2,}[^-]/) && - !isNaN(la) && - hasTypeDef(argType, NumberType)) { - // number - val = +la - i++ - } else if (!la.match(/^-[^-]/) && hasTypeDef(argType, StringType)) { - // string - val = la - i++ - } - } - - if (isArray) { - (data[arg] = data[arg] || []).push(val) - } else { - data[arg] = val - } - - continue - } - - if (isTypeDef(argType, StringType)) { - if (la === undefined) { - la = '' - } else if (la.match(/^-{1,2}[^-]+/)) { - la = '' - i-- - } - } - - if (la && la.match(/^-{2,}$/)) { - la = undefined - i-- - } - - val = la === undefined ? true : la - if (isArray) { - (data[arg] = data[arg] || []).push(val) - } else { - data[arg] = val - } - - i++ - continue - } - remain.push(arg) - } -} - -const SINGLES = Symbol('singles') -const singleCharacters = (arg, shorthands) => { - let singles = shorthands[SINGLES] - if (!singles) { - singles = Object.keys(shorthands).filter((s) => s.length === 1).reduce((l, r) => { - l[r] = true - return l - }, {}) - shorthands[SINGLES] = singles - debug('shorthand singles', singles) - } - const chrs = arg.split('').filter((c) => singles[c]) - return chrs.join('') === arg ? chrs : null -} - -function resolveShort (arg, ...rest) { - const { types = {}, shorthands = {} } = rest.length ? rest.pop() : {} - const shortAbbr = rest[0] ?? abbrev(Object.keys(shorthands)) - const abbrevs = rest[1] ?? abbrev(Object.keys(types)) - - // handle single-char shorthands glommed together, like - // npm ls -glp, but only if there is one dash, and only if - // all of the chars are single-char shorthands, and it's - // not a match to some other abbrev. - arg = arg.replace(/^-+/, '') - - // if it's an exact known option, then don't go any further - if (abbrevs[arg] === arg) { - return null - } - - // if it's an exact known shortopt, same deal - if (shorthands[arg]) { - // make it an array, if it's a list of words - if (shorthands[arg] && !Array.isArray(shorthands[arg])) { - shorthands[arg] = shorthands[arg].split(/\s+/) - } - - return shorthands[arg] - } - - // first check to see if this arg is a set of single-char shorthands - const chrs = singleCharacters(arg, shorthands) - if (chrs) { - return chrs.map((c) => shorthands[c]).reduce((l, r) => l.concat(r), []) - } - - // if it's an arg abbrev, and not a literal shorthand, then prefer the arg - if (abbrevs[arg] && !shorthands[arg]) { - return null - } - - // if it's an abbr for a shorthand, then use that - if (shortAbbr[arg]) { - arg = shortAbbr[arg] - } - - // make it an array, if it's a list of words - if (shorthands[arg] && !Array.isArray(shorthands[arg])) { - shorthands[arg] = shorthands[arg].split(/\s+/) - } - - return shorthands[arg] -} - -module.exports = { - nopt, - clean, - parse, - validate, - resolveShort, - typeDefs: defaultTypeDefs, -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/nopt/lib/nopt.js b/deps/npm/node_modules/node-gyp/node_modules/nopt/lib/nopt.js deleted file mode 100644 index 37f01a08783f87..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/nopt/lib/nopt.js +++ /dev/null @@ -1,30 +0,0 @@ -const lib = require('./nopt-lib') -const defaultTypeDefs = require('./type-defs') - -// This is the version of nopt's API that requires setting typeDefs and invalidHandler -// on the required `nopt` object since it is a singleton. To not do a breaking change -// an API that requires all options be passed in is located in `nopt-lib.js` and -// exported here as lib. -// TODO(breaking): make API only work in non-singleton mode - -module.exports = exports = nopt -exports.clean = clean -exports.typeDefs = defaultTypeDefs -exports.lib = lib - -function nopt (types, shorthands, args = process.argv, slice = 2) { - return lib.nopt(args.slice(slice), { - types: types || {}, - shorthands: shorthands || {}, - typeDefs: exports.typeDefs, - invalidHandler: exports.invalidHandler, - }) -} - -function clean (data, types, typeDefs = exports.typeDefs) { - return lib.clean(data, { - types: types || {}, - typeDefs, - invalidHandler: exports.invalidHandler, - }) -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/nopt/lib/type-defs.js b/deps/npm/node_modules/node-gyp/node_modules/nopt/lib/type-defs.js deleted file mode 100644 index 608352ee248cc4..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/nopt/lib/type-defs.js +++ /dev/null @@ -1,91 +0,0 @@ -const url = require('url') -const path = require('path') -const Stream = require('stream').Stream -const os = require('os') -const debug = require('./debug') - -function validateString (data, k, val) { - data[k] = String(val) -} - -function validatePath (data, k, val) { - if (val === true) { - return false - } - if (val === null) { - return true - } - - val = String(val) - - const isWin = process.platform === 'win32' - const homePattern = isWin ? /^~(\/|\\)/ : /^~\// - const home = os.homedir() - - if (home && val.match(homePattern)) { - data[k] = path.resolve(home, val.slice(2)) - } else { - data[k] = path.resolve(val) - } - return true -} - -function validateNumber (data, k, val) { - debug('validate Number %j %j %j', k, val, isNaN(val)) - if (isNaN(val)) { - return false - } - data[k] = +val -} - -function validateDate (data, k, val) { - const s = Date.parse(val) - debug('validate Date %j %j %j', k, val, s) - if (isNaN(s)) { - return false - } - data[k] = new Date(val) -} - -function validateBoolean (data, k, val) { - if (typeof val === 'string') { - if (!isNaN(val)) { - val = !!(+val) - } else if (val === 'null' || val === 'false') { - val = false - } else { - val = true - } - } else { - val = !!val - } - data[k] = val -} - -function validateUrl (data, k, val) { - // Changing this would be a breaking change in the npm cli - /* eslint-disable-next-line node/no-deprecated-api */ - val = url.parse(String(val)) - if (!val.host) { - return false - } - data[k] = val.href -} - -function validateStream (data, k, val) { - if (!(val instanceof Stream)) { - return false - } - data[k] = val -} - -module.exports = { - String: { type: String, validate: validateString }, - Boolean: { type: Boolean, validate: validateBoolean }, - url: { type: url, validate: validateUrl }, - Number: { type: Number, validate: validateNumber }, - path: { type: path, validate: validatePath }, - Stream: { type: Stream, validate: validateStream }, - Date: { type: Date, validate: validateDate }, - Array: { type: Array }, -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/nopt/package.json b/deps/npm/node_modules/node-gyp/node_modules/nopt/package.json deleted file mode 100644 index 37b770ad487711..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/nopt/package.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "name": "nopt", - "version": "7.2.1", - "description": "Option parsing for Node, supporting types, shorthands, etc. Used by npm.", - "author": "GitHub Inc.", - "main": "lib/nopt.js", - "scripts": { - "test": "tap", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap", - "posttest": "npm run lint" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/nopt.git" - }, - "bin": { - "nopt": "bin/nopt.js" - }, - "license": "ISC", - "dependencies": { - "abbrev": "^2.0.0" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "tap": "^16.3.0" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "windowsCI": false, - "version": "4.22.0", - "publish": true - } -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/proc-log/LICENSE b/deps/npm/node_modules/node-gyp/node_modules/proc-log/LICENSE deleted file mode 100644 index 83837797202b70..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/proc-log/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -The ISC License - -Copyright (c) GitHub, Inc. - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/node-gyp/node_modules/proc-log/lib/index.js b/deps/npm/node_modules/node-gyp/node_modules/proc-log/lib/index.js deleted file mode 100644 index 86d90861078dab..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/proc-log/lib/index.js +++ /dev/null @@ -1,153 +0,0 @@ -const META = Symbol('proc-log.meta') -module.exports = { - META: META, - output: { - LEVELS: [ - 'standard', - 'error', - 'buffer', - 'flush', - ], - KEYS: { - standard: 'standard', - error: 'error', - buffer: 'buffer', - flush: 'flush', - }, - standard: function (...args) { - return process.emit('output', 'standard', ...args) - }, - error: function (...args) { - return process.emit('output', 'error', ...args) - }, - buffer: function (...args) { - return process.emit('output', 'buffer', ...args) - }, - flush: function (...args) { - return process.emit('output', 'flush', ...args) - }, - }, - log: { - LEVELS: [ - 'notice', - 'error', - 'warn', - 'info', - 'verbose', - 'http', - 'silly', - 'timing', - 'pause', - 'resume', - ], - KEYS: { - notice: 'notice', - error: 'error', - warn: 'warn', - info: 'info', - verbose: 'verbose', - http: 'http', - silly: 'silly', - timing: 'timing', - pause: 'pause', - resume: 'resume', - }, - error: function (...args) { - return process.emit('log', 'error', ...args) - }, - notice: function (...args) { - return process.emit('log', 'notice', ...args) - }, - warn: function (...args) { - return process.emit('log', 'warn', ...args) - }, - info: function (...args) { - return process.emit('log', 'info', ...args) - }, - verbose: function (...args) { - return process.emit('log', 'verbose', ...args) - }, - http: function (...args) { - return process.emit('log', 'http', ...args) - }, - silly: function (...args) { - return process.emit('log', 'silly', ...args) - }, - timing: function (...args) { - return process.emit('log', 'timing', ...args) - }, - pause: function () { - return process.emit('log', 'pause') - }, - resume: function () { - return process.emit('log', 'resume') - }, - }, - time: { - LEVELS: [ - 'start', - 'end', - ], - KEYS: { - start: 'start', - end: 'end', - }, - start: function (name, fn) { - process.emit('time', 'start', name) - function end () { - return process.emit('time', 'end', name) - } - if (typeof fn === 'function') { - const res = fn() - if (res && res.finally) { - return res.finally(end) - } - end() - return res - } - return end - }, - end: function (name) { - return process.emit('time', 'end', name) - }, - }, - input: { - LEVELS: [ - 'start', - 'end', - 'read', - ], - KEYS: { - start: 'start', - end: 'end', - read: 'read', - }, - start: function (fn) { - process.emit('input', 'start') - function end () { - return process.emit('input', 'end') - } - if (typeof fn === 'function') { - const res = fn() - if (res && res.finally) { - return res.finally(end) - } - end() - return res - } - return end - }, - end: function () { - return process.emit('input', 'end') - }, - read: function (...args) { - let resolve, reject - const promise = new Promise((_resolve, _reject) => { - resolve = _resolve - reject = _reject - }) - process.emit('input', 'read', resolve, reject, ...args) - return promise - }, - }, -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/proc-log/package.json b/deps/npm/node_modules/node-gyp/node_modules/proc-log/package.json deleted file mode 100644 index 4ab89102ecc9b5..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/proc-log/package.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "name": "proc-log", - "version": "4.2.0", - "files": [ - "bin/", - "lib/" - ], - "main": "lib/index.js", - "description": "just emit 'log' events on the process object", - "repository": { - "type": "git", - "url": "https://github.com/npm/proc-log.git" - }, - "author": "GitHub Inc.", - "license": "ISC", - "scripts": { - "test": "tap", - "snap": "tap", - "posttest": "npm run lint", - "postsnap": "eslint index.js test/*.js --fix", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "lintfix": "npm run lint -- --fix", - "template-oss-apply": "template-oss-apply --force" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.21.3", - "tap": "^16.0.1" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.21.3", - "publish": true - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/ssri/LICENSE.md b/deps/npm/node_modules/node-gyp/node_modules/ssri/LICENSE.md deleted file mode 100644 index e335388869f50f..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/ssri/LICENSE.md +++ /dev/null @@ -1,16 +0,0 @@ -ISC License - -Copyright 2021 (c) npm, Inc. - -Permission to use, copy, modify, and/or distribute this software for -any purpose with or without fee is hereby granted, provided that the -above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE COPYRIGHT HOLDER DISCLAIMS -ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR -CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/node-gyp/node_modules/ssri/lib/index.js b/deps/npm/node_modules/node-gyp/node_modules/ssri/lib/index.js deleted file mode 100644 index 7d749ed480fb98..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/ssri/lib/index.js +++ /dev/null @@ -1,580 +0,0 @@ -'use strict' - -const crypto = require('crypto') -const { Minipass } = require('minipass') - -const SPEC_ALGORITHMS = ['sha512', 'sha384', 'sha256'] -const DEFAULT_ALGORITHMS = ['sha512'] - -// TODO: this should really be a hardcoded list of algorithms we support, -// rather than [a-z0-9]. -const BASE64_REGEX = /^[a-z0-9+/]+(?:=?=?)$/i -const SRI_REGEX = /^([a-z0-9]+)-([^?]+)([?\S*]*)$/ -const STRICT_SRI_REGEX = /^([a-z0-9]+)-([A-Za-z0-9+/=]{44,88})(\?[\x21-\x7E]*)?$/ -const VCHAR_REGEX = /^[\x21-\x7E]+$/ - -const getOptString = options => options?.length ? `?${options.join('?')}` : '' - -class IntegrityStream extends Minipass { - #emittedIntegrity - #emittedSize - #emittedVerified - - constructor (opts) { - super() - this.size = 0 - this.opts = opts - - // may be overridden later, but set now for class consistency - this.#getOptions() - - // options used for calculating stream. can't be changed. - if (opts?.algorithms) { - this.algorithms = [...opts.algorithms] - } else { - this.algorithms = [...DEFAULT_ALGORITHMS] - } - if (this.algorithm !== null && !this.algorithms.includes(this.algorithm)) { - this.algorithms.push(this.algorithm) - } - - this.hashes = this.algorithms.map(crypto.createHash) - } - - #getOptions () { - // For verification - this.sri = this.opts?.integrity ? parse(this.opts?.integrity, this.opts) : null - this.expectedSize = this.opts?.size - - if (!this.sri) { - this.algorithm = null - } else if (this.sri.isHash) { - this.goodSri = true - this.algorithm = this.sri.algorithm - } else { - this.goodSri = !this.sri.isEmpty() - this.algorithm = this.sri.pickAlgorithm(this.opts) - } - - this.digests = this.goodSri ? this.sri[this.algorithm] : null - this.optString = getOptString(this.opts?.options) - } - - on (ev, handler) { - if (ev === 'size' && this.#emittedSize) { - return handler(this.#emittedSize) - } - - if (ev === 'integrity' && this.#emittedIntegrity) { - return handler(this.#emittedIntegrity) - } - - if (ev === 'verified' && this.#emittedVerified) { - return handler(this.#emittedVerified) - } - - return super.on(ev, handler) - } - - emit (ev, data) { - if (ev === 'end') { - this.#onEnd() - } - return super.emit(ev, data) - } - - write (data) { - this.size += data.length - this.hashes.forEach(h => h.update(data)) - return super.write(data) - } - - #onEnd () { - if (!this.goodSri) { - this.#getOptions() - } - const newSri = parse(this.hashes.map((h, i) => { - return `${this.algorithms[i]}-${h.digest('base64')}${this.optString}` - }).join(' '), this.opts) - // Integrity verification mode - const match = this.goodSri && newSri.match(this.sri, this.opts) - if (typeof this.expectedSize === 'number' && this.size !== this.expectedSize) { - /* eslint-disable-next-line max-len */ - const err = new Error(`stream size mismatch when checking ${this.sri}.\n Wanted: ${this.expectedSize}\n Found: ${this.size}`) - err.code = 'EBADSIZE' - err.found = this.size - err.expected = this.expectedSize - err.sri = this.sri - this.emit('error', err) - } else if (this.sri && !match) { - /* eslint-disable-next-line max-len */ - const err = new Error(`${this.sri} integrity checksum failed when using ${this.algorithm}: wanted ${this.digests} but got ${newSri}. (${this.size} bytes)`) - err.code = 'EINTEGRITY' - err.found = newSri - err.expected = this.digests - err.algorithm = this.algorithm - err.sri = this.sri - this.emit('error', err) - } else { - this.#emittedSize = this.size - this.emit('size', this.size) - this.#emittedIntegrity = newSri - this.emit('integrity', newSri) - if (match) { - this.#emittedVerified = match - this.emit('verified', match) - } - } - } -} - -class Hash { - get isHash () { - return true - } - - constructor (hash, opts) { - const strict = opts?.strict - this.source = hash.trim() - - // set default values so that we make V8 happy to - // always see a familiar object template. - this.digest = '' - this.algorithm = '' - this.options = [] - - // 3.1. Integrity metadata (called "Hash" by ssri) - // https://w3c.github.io/webappsec-subresource-integrity/#integrity-metadata-description - const match = this.source.match( - strict - ? STRICT_SRI_REGEX - : SRI_REGEX - ) - if (!match) { - return - } - if (strict && !SPEC_ALGORITHMS.includes(match[1])) { - return - } - this.algorithm = match[1] - this.digest = match[2] - - const rawOpts = match[3] - if (rawOpts) { - this.options = rawOpts.slice(1).split('?') - } - } - - hexDigest () { - return this.digest && Buffer.from(this.digest, 'base64').toString('hex') - } - - toJSON () { - return this.toString() - } - - match (integrity, opts) { - const other = parse(integrity, opts) - if (!other) { - return false - } - if (other.isIntegrity) { - const algo = other.pickAlgorithm(opts, [this.algorithm]) - - if (!algo) { - return false - } - - const foundHash = other[algo].find(hash => hash.digest === this.digest) - - if (foundHash) { - return foundHash - } - - return false - } - return other.digest === this.digest ? other : false - } - - toString (opts) { - if (opts?.strict) { - // Strict mode enforces the standard as close to the foot of the - // letter as it can. - if (!( - // The spec has very restricted productions for algorithms. - // https://www.w3.org/TR/CSP2/#source-list-syntax - SPEC_ALGORITHMS.includes(this.algorithm) && - // Usually, if someone insists on using a "different" base64, we - // leave it as-is, since there's multiple standards, and the - // specified is not a URL-safe variant. - // https://www.w3.org/TR/CSP2/#base64_value - this.digest.match(BASE64_REGEX) && - // Option syntax is strictly visual chars. - // https://w3c.github.io/webappsec-subresource-integrity/#grammardef-option-expression - // https://tools.ietf.org/html/rfc5234#appendix-B.1 - this.options.every(opt => opt.match(VCHAR_REGEX)) - )) { - return '' - } - } - return `${this.algorithm}-${this.digest}${getOptString(this.options)}` - } -} - -function integrityHashToString (toString, sep, opts, hashes) { - const toStringIsNotEmpty = toString !== '' - - let shouldAddFirstSep = false - let complement = '' - - const lastIndex = hashes.length - 1 - - for (let i = 0; i < lastIndex; i++) { - const hashString = Hash.prototype.toString.call(hashes[i], opts) - - if (hashString) { - shouldAddFirstSep = true - - complement += hashString - complement += sep - } - } - - const finalHashString = Hash.prototype.toString.call(hashes[lastIndex], opts) - - if (finalHashString) { - shouldAddFirstSep = true - complement += finalHashString - } - - if (toStringIsNotEmpty && shouldAddFirstSep) { - return toString + sep + complement - } - - return toString + complement -} - -class Integrity { - get isIntegrity () { - return true - } - - toJSON () { - return this.toString() - } - - isEmpty () { - return Object.keys(this).length === 0 - } - - toString (opts) { - let sep = opts?.sep || ' ' - let toString = '' - - if (opts?.strict) { - // Entries must be separated by whitespace, according to spec. - sep = sep.replace(/\S+/g, ' ') - - for (const hash of SPEC_ALGORITHMS) { - if (this[hash]) { - toString = integrityHashToString(toString, sep, opts, this[hash]) - } - } - } else { - for (const hash of Object.keys(this)) { - toString = integrityHashToString(toString, sep, opts, this[hash]) - } - } - - return toString - } - - concat (integrity, opts) { - const other = typeof integrity === 'string' - ? integrity - : stringify(integrity, opts) - return parse(`${this.toString(opts)} ${other}`, opts) - } - - hexDigest () { - return parse(this, { single: true }).hexDigest() - } - - // add additional hashes to an integrity value, but prevent - // *changing* an existing integrity hash. - merge (integrity, opts) { - const other = parse(integrity, opts) - for (const algo in other) { - if (this[algo]) { - if (!this[algo].find(hash => - other[algo].find(otherhash => - hash.digest === otherhash.digest))) { - throw new Error('hashes do not match, cannot update integrity') - } - } else { - this[algo] = other[algo] - } - } - } - - match (integrity, opts) { - const other = parse(integrity, opts) - if (!other) { - return false - } - const algo = other.pickAlgorithm(opts, Object.keys(this)) - return ( - !!algo && - this[algo] && - other[algo] && - this[algo].find(hash => - other[algo].find(otherhash => - hash.digest === otherhash.digest - ) - ) - ) || false - } - - // Pick the highest priority algorithm present, optionally also limited to a - // set of hashes found in another integrity. When limiting it may return - // nothing. - pickAlgorithm (opts, hashes) { - const pickAlgorithm = opts?.pickAlgorithm || getPrioritizedHash - const keys = Object.keys(this).filter(k => { - if (hashes?.length) { - return hashes.includes(k) - } - return true - }) - if (keys.length) { - return keys.reduce((acc, algo) => pickAlgorithm(acc, algo) || acc) - } - // no intersection between this and hashes, - return null - } -} - -module.exports.parse = parse -function parse (sri, opts) { - if (!sri) { - return null - } - if (typeof sri === 'string') { - return _parse(sri, opts) - } else if (sri.algorithm && sri.digest) { - const fullSri = new Integrity() - fullSri[sri.algorithm] = [sri] - return _parse(stringify(fullSri, opts), opts) - } else { - return _parse(stringify(sri, opts), opts) - } -} - -function _parse (integrity, opts) { - // 3.4.3. Parse metadata - // https://w3c.github.io/webappsec-subresource-integrity/#parse-metadata - if (opts?.single) { - return new Hash(integrity, opts) - } - const hashes = integrity.trim().split(/\s+/).reduce((acc, string) => { - const hash = new Hash(string, opts) - if (hash.algorithm && hash.digest) { - const algo = hash.algorithm - if (!acc[algo]) { - acc[algo] = [] - } - acc[algo].push(hash) - } - return acc - }, new Integrity()) - return hashes.isEmpty() ? null : hashes -} - -module.exports.stringify = stringify -function stringify (obj, opts) { - if (obj.algorithm && obj.digest) { - return Hash.prototype.toString.call(obj, opts) - } else if (typeof obj === 'string') { - return stringify(parse(obj, opts), opts) - } else { - return Integrity.prototype.toString.call(obj, opts) - } -} - -module.exports.fromHex = fromHex -function fromHex (hexDigest, algorithm, opts) { - const optString = getOptString(opts?.options) - return parse( - `${algorithm}-${ - Buffer.from(hexDigest, 'hex').toString('base64') - }${optString}`, opts - ) -} - -module.exports.fromData = fromData -function fromData (data, opts) { - const algorithms = opts?.algorithms || [...DEFAULT_ALGORITHMS] - const optString = getOptString(opts?.options) - return algorithms.reduce((acc, algo) => { - const digest = crypto.createHash(algo).update(data).digest('base64') - const hash = new Hash( - `${algo}-${digest}${optString}`, - opts - ) - /* istanbul ignore else - it would be VERY strange if the string we - * just calculated with an algo did not have an algo or digest. - */ - if (hash.algorithm && hash.digest) { - const hashAlgo = hash.algorithm - if (!acc[hashAlgo]) { - acc[hashAlgo] = [] - } - acc[hashAlgo].push(hash) - } - return acc - }, new Integrity()) -} - -module.exports.fromStream = fromStream -function fromStream (stream, opts) { - const istream = integrityStream(opts) - return new Promise((resolve, reject) => { - stream.pipe(istream) - stream.on('error', reject) - istream.on('error', reject) - let sri - istream.on('integrity', s => { - sri = s - }) - istream.on('end', () => resolve(sri)) - istream.resume() - }) -} - -module.exports.checkData = checkData -function checkData (data, sri, opts) { - sri = parse(sri, opts) - if (!sri || !Object.keys(sri).length) { - if (opts?.error) { - throw Object.assign( - new Error('No valid integrity hashes to check against'), { - code: 'EINTEGRITY', - } - ) - } else { - return false - } - } - const algorithm = sri.pickAlgorithm(opts) - const digest = crypto.createHash(algorithm).update(data).digest('base64') - const newSri = parse({ algorithm, digest }) - const match = newSri.match(sri, opts) - opts = opts || {} - if (match || !(opts.error)) { - return match - } else if (typeof opts.size === 'number' && (data.length !== opts.size)) { - /* eslint-disable-next-line max-len */ - const err = new Error(`data size mismatch when checking ${sri}.\n Wanted: ${opts.size}\n Found: ${data.length}`) - err.code = 'EBADSIZE' - err.found = data.length - err.expected = opts.size - err.sri = sri - throw err - } else { - /* eslint-disable-next-line max-len */ - const err = new Error(`Integrity checksum failed when using ${algorithm}: Wanted ${sri}, but got ${newSri}. (${data.length} bytes)`) - err.code = 'EINTEGRITY' - err.found = newSri - err.expected = sri - err.algorithm = algorithm - err.sri = sri - throw err - } -} - -module.exports.checkStream = checkStream -function checkStream (stream, sri, opts) { - opts = opts || Object.create(null) - opts.integrity = sri - sri = parse(sri, opts) - if (!sri || !Object.keys(sri).length) { - return Promise.reject(Object.assign( - new Error('No valid integrity hashes to check against'), { - code: 'EINTEGRITY', - } - )) - } - const checker = integrityStream(opts) - return new Promise((resolve, reject) => { - stream.pipe(checker) - stream.on('error', reject) - checker.on('error', reject) - let verified - checker.on('verified', s => { - verified = s - }) - checker.on('end', () => resolve(verified)) - checker.resume() - }) -} - -module.exports.integrityStream = integrityStream -function integrityStream (opts = Object.create(null)) { - return new IntegrityStream(opts) -} - -module.exports.create = createIntegrity -function createIntegrity (opts) { - const algorithms = opts?.algorithms || [...DEFAULT_ALGORITHMS] - const optString = getOptString(opts?.options) - - const hashes = algorithms.map(crypto.createHash) - - return { - update: function (chunk, enc) { - hashes.forEach(h => h.update(chunk, enc)) - return this - }, - digest: function () { - const integrity = algorithms.reduce((acc, algo) => { - const digest = hashes.shift().digest('base64') - const hash = new Hash( - `${algo}-${digest}${optString}`, - opts - ) - /* istanbul ignore else - it would be VERY strange if the hash we - * just calculated with an algo did not have an algo or digest. - */ - if (hash.algorithm && hash.digest) { - const hashAlgo = hash.algorithm - if (!acc[hashAlgo]) { - acc[hashAlgo] = [] - } - acc[hashAlgo].push(hash) - } - return acc - }, new Integrity()) - - return integrity - }, - } -} - -const NODE_HASHES = crypto.getHashes() - -// This is a Best Effort™ at a reasonable priority for hash algos -const DEFAULT_PRIORITY = [ - 'md5', 'whirlpool', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512', - // TODO - it's unclear _which_ of these Node will actually use as its name - // for the algorithm, so we guesswork it based on the OpenSSL names. - 'sha3', - 'sha3-256', 'sha3-384', 'sha3-512', - 'sha3_256', 'sha3_384', 'sha3_512', -].filter(algo => NODE_HASHES.includes(algo)) - -function getPrioritizedHash (algo1, algo2) { - /* eslint-disable-next-line max-len */ - return DEFAULT_PRIORITY.indexOf(algo1.toLowerCase()) >= DEFAULT_PRIORITY.indexOf(algo2.toLowerCase()) - ? algo1 - : algo2 -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/ssri/package.json b/deps/npm/node_modules/node-gyp/node_modules/ssri/package.json deleted file mode 100644 index 28395414e4643c..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/ssri/package.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "name": "ssri", - "version": "10.0.6", - "description": "Standard Subresource Integrity library -- parses, serializes, generates, and verifies integrity metadata according to the SRI spec.", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "scripts": { - "prerelease": "npm t", - "postrelease": "npm publish", - "posttest": "npm run lint", - "test": "tap", - "coverage": "tap", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap" - }, - "tap": { - "check-coverage": true, - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/ssri.git" - }, - "keywords": [ - "w3c", - "web", - "security", - "integrity", - "checksum", - "hashing", - "subresource integrity", - "sri", - "sri hash", - "sri string", - "sri generator", - "html" - ], - "author": "GitHub Inc.", - "license": "ISC", - "dependencies": { - "minipass": "^7.0.3" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "tap": "^16.0.1" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.22.0", - "publish": "true" - } -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/nopt/LICENSE b/deps/npm/node_modules/node-gyp/node_modules/tar/LICENSE similarity index 100% rename from deps/npm/node_modules/node-gyp/node_modules/nopt/LICENSE rename to deps/npm/node_modules/node-gyp/node_modules/tar/LICENSE diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/create.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/create.js new file mode 100644 index 00000000000000..3190afc48318f9 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/create.js @@ -0,0 +1,83 @@ +"use strict"; +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.create = void 0; +const fs_minipass_1 = require("@isaacs/fs-minipass"); +const node_path_1 = __importDefault(require("node:path")); +const list_js_1 = require("./list.js"); +const make_command_js_1 = require("./make-command.js"); +const pack_js_1 = require("./pack.js"); +const createFileSync = (opt, files) => { + const p = new pack_js_1.PackSync(opt); + const stream = new fs_minipass_1.WriteStreamSync(opt.file, { + mode: opt.mode || 0o666, + }); + p.pipe(stream); + addFilesSync(p, files); +}; +const createFile = (opt, files) => { + const p = new pack_js_1.Pack(opt); + const stream = new fs_minipass_1.WriteStream(opt.file, { + mode: opt.mode || 0o666, + }); + p.pipe(stream); + const promise = new Promise((res, rej) => { + stream.on('error', rej); + stream.on('close', res); + p.on('error', rej); + }); + addFilesAsync(p, files); + return promise; +}; +const addFilesSync = (p, files) => { + files.forEach(file => { + if (file.charAt(0) === '@') { + (0, list_js_1.list)({ + file: node_path_1.default.resolve(p.cwd, file.slice(1)), + sync: true, + noResume: true, + onReadEntry: entry => p.add(entry), + }); + } + else { + p.add(file); + } + }); + p.end(); +}; +const addFilesAsync = async (p, files) => { + for (let i = 0; i < files.length; i++) { + const file = String(files[i]); + if (file.charAt(0) === '@') { + await (0, list_js_1.list)({ + file: node_path_1.default.resolve(String(p.cwd), file.slice(1)), + noResume: true, + onReadEntry: entry => { + p.add(entry); + }, + }); + } + else { + p.add(file); + } + } + p.end(); +}; +const createSync = (opt, files) => { + const p = new pack_js_1.PackSync(opt); + addFilesSync(p, files); + return p; +}; +const createAsync = (opt, files) => { + const p = new pack_js_1.Pack(opt); + addFilesAsync(p, files); + return p; +}; +exports.create = (0, make_command_js_1.makeCommand)(createFileSync, createFile, createSync, createAsync, (_opt, files) => { + if (!files?.length) { + throw new TypeError('no paths specified to add to archive'); + } +}); +//# sourceMappingURL=create.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/cwd-error.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/cwd-error.js new file mode 100644 index 00000000000000..d703a7772be3a5 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/cwd-error.js @@ -0,0 +1,18 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.CwdError = void 0; +class CwdError extends Error { + path; + code; + syscall = 'chdir'; + constructor(path, code) { + super(`${code}: Cannot cd into '${path}'`); + this.path = path; + this.code = code; + } + get name() { + return 'CwdError'; + } +} +exports.CwdError = CwdError; +//# sourceMappingURL=cwd-error.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/extract.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/extract.js new file mode 100644 index 00000000000000..f848cbcbf779e8 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/extract.js @@ -0,0 +1,78 @@ +"use strict"; +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + var desc = Object.getOwnPropertyDescriptor(m, k); + if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { + desc = { enumerable: true, get: function() { return m[k]; } }; + } + Object.defineProperty(o, k2, desc); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); + __setModuleDefault(result, mod); + return result; +}; +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.extract = void 0; +// tar -x +const fsm = __importStar(require("@isaacs/fs-minipass")); +const node_fs_1 = __importDefault(require("node:fs")); +const list_js_1 = require("./list.js"); +const make_command_js_1 = require("./make-command.js"); +const unpack_js_1 = require("./unpack.js"); +const extractFileSync = (opt) => { + const u = new unpack_js_1.UnpackSync(opt); + const file = opt.file; + const stat = node_fs_1.default.statSync(file); + // This trades a zero-byte read() syscall for a stat + // However, it will usually result in less memory allocation + const readSize = opt.maxReadSize || 16 * 1024 * 1024; + const stream = new fsm.ReadStreamSync(file, { + readSize: readSize, + size: stat.size, + }); + stream.pipe(u); +}; +const extractFile = (opt, _) => { + const u = new unpack_js_1.Unpack(opt); + const readSize = opt.maxReadSize || 16 * 1024 * 1024; + const file = opt.file; + const p = new Promise((resolve, reject) => { + u.on('error', reject); + u.on('close', resolve); + // This trades a zero-byte read() syscall for a stat + // However, it will usually result in less memory allocation + node_fs_1.default.stat(file, (er, stat) => { + if (er) { + reject(er); + } + else { + const stream = new fsm.ReadStream(file, { + readSize: readSize, + size: stat.size, + }); + stream.on('error', reject); + stream.pipe(u); + } + }); + }); + return p; +}; +exports.extract = (0, make_command_js_1.makeCommand)(extractFileSync, extractFile, opt => new unpack_js_1.UnpackSync(opt), opt => new unpack_js_1.Unpack(opt), (opt, files) => { + if (files?.length) + (0, list_js_1.filesFilter)(opt, files); +}); +//# sourceMappingURL=extract.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/get-write-flag.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/get-write-flag.js new file mode 100644 index 00000000000000..94add8f6b2231c --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/get-write-flag.js @@ -0,0 +1,29 @@ +"use strict"; +// Get the appropriate flag to use for creating files +// We use fmap on Windows platforms for files less than +// 512kb. This is a fairly low limit, but avoids making +// things slower in some cases. Since most of what this +// library is used for is extracting tarballs of many +// relatively small files in npm packages and the like, +// it can be a big boost on Windows platforms. +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.getWriteFlag = void 0; +const fs_1 = __importDefault(require("fs")); +const platform = process.env.__FAKE_PLATFORM__ || process.platform; +const isWindows = platform === 'win32'; +/* c8 ignore start */ +const { O_CREAT, O_TRUNC, O_WRONLY } = fs_1.default.constants; +const UV_FS_O_FILEMAP = Number(process.env.__FAKE_FS_O_FILENAME__) || + fs_1.default.constants.UV_FS_O_FILEMAP || + 0; +/* c8 ignore stop */ +const fMapEnabled = isWindows && !!UV_FS_O_FILEMAP; +const fMapLimit = 512 * 1024; +const fMapFlag = UV_FS_O_FILEMAP | O_TRUNC | O_CREAT | O_WRONLY; +exports.getWriteFlag = !fMapEnabled ? + () => 'w' + : (size) => (size < fMapLimit ? fMapFlag : 'w'); +//# sourceMappingURL=get-write-flag.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/header.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/header.js new file mode 100644 index 00000000000000..b3a48037b849ab --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/header.js @@ -0,0 +1,306 @@ +"use strict"; +// parse a 512-byte header block to a data object, or vice-versa +// encode returns `true` if a pax extended header is needed, because +// the data could not be faithfully encoded in a simple header. +// (Also, check header.needPax to see if it needs a pax header.) +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + var desc = Object.getOwnPropertyDescriptor(m, k); + if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { + desc = { enumerable: true, get: function() { return m[k]; } }; + } + Object.defineProperty(o, k2, desc); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); + __setModuleDefault(result, mod); + return result; +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.Header = void 0; +const node_path_1 = require("node:path"); +const large = __importStar(require("./large-numbers.js")); +const types = __importStar(require("./types.js")); +class Header { + cksumValid = false; + needPax = false; + nullBlock = false; + block; + path; + mode; + uid; + gid; + size; + cksum; + #type = 'Unsupported'; + linkpath; + uname; + gname; + devmaj = 0; + devmin = 0; + atime; + ctime; + mtime; + charset; + comment; + constructor(data, off = 0, ex, gex) { + if (Buffer.isBuffer(data)) { + this.decode(data, off || 0, ex, gex); + } + else if (data) { + this.#slurp(data); + } + } + decode(buf, off, ex, gex) { + if (!off) { + off = 0; + } + if (!buf || !(buf.length >= off + 512)) { + throw new Error('need 512 bytes for header'); + } + this.path = decString(buf, off, 100); + this.mode = decNumber(buf, off + 100, 8); + this.uid = decNumber(buf, off + 108, 8); + this.gid = decNumber(buf, off + 116, 8); + this.size = decNumber(buf, off + 124, 12); + this.mtime = decDate(buf, off + 136, 12); + this.cksum = decNumber(buf, off + 148, 12); + // if we have extended or global extended headers, apply them now + // See https://github.com/npm/node-tar/pull/187 + // Apply global before local, so it overrides + if (gex) + this.#slurp(gex, true); + if (ex) + this.#slurp(ex); + // old tar versions marked dirs as a file with a trailing / + const t = decString(buf, off + 156, 1); + if (types.isCode(t)) { + this.#type = t || '0'; + } + if (this.#type === '0' && this.path.slice(-1) === '/') { + this.#type = '5'; + } + // tar implementations sometimes incorrectly put the stat(dir).size + // as the size in the tarball, even though Directory entries are + // not able to have any body at all. In the very rare chance that + // it actually DOES have a body, we weren't going to do anything with + // it anyway, and it'll just be a warning about an invalid header. + if (this.#type === '5') { + this.size = 0; + } + this.linkpath = decString(buf, off + 157, 100); + if (buf.subarray(off + 257, off + 265).toString() === + 'ustar\u000000') { + this.uname = decString(buf, off + 265, 32); + this.gname = decString(buf, off + 297, 32); + /* c8 ignore start */ + this.devmaj = decNumber(buf, off + 329, 8) ?? 0; + this.devmin = decNumber(buf, off + 337, 8) ?? 0; + /* c8 ignore stop */ + if (buf[off + 475] !== 0) { + // definitely a prefix, definitely >130 chars. + const prefix = decString(buf, off + 345, 155); + this.path = prefix + '/' + this.path; + } + else { + const prefix = decString(buf, off + 345, 130); + if (prefix) { + this.path = prefix + '/' + this.path; + } + this.atime = decDate(buf, off + 476, 12); + this.ctime = decDate(buf, off + 488, 12); + } + } + let sum = 8 * 0x20; + for (let i = off; i < off + 148; i++) { + sum += buf[i]; + } + for (let i = off + 156; i < off + 512; i++) { + sum += buf[i]; + } + this.cksumValid = sum === this.cksum; + if (this.cksum === undefined && sum === 8 * 0x20) { + this.nullBlock = true; + } + } + #slurp(ex, gex = false) { + Object.assign(this, Object.fromEntries(Object.entries(ex).filter(([k, v]) => { + // we slurp in everything except for the path attribute in + // a global extended header, because that's weird. Also, any + // null/undefined values are ignored. + return !(v === null || + v === undefined || + (k === 'path' && gex) || + (k === 'linkpath' && gex) || + k === 'global'); + }))); + } + encode(buf, off = 0) { + if (!buf) { + buf = this.block = Buffer.alloc(512); + } + if (this.#type === 'Unsupported') { + this.#type = '0'; + } + if (!(buf.length >= off + 512)) { + throw new Error('need 512 bytes for header'); + } + const prefixSize = this.ctime || this.atime ? 130 : 155; + const split = splitPrefix(this.path || '', prefixSize); + const path = split[0]; + const prefix = split[1]; + this.needPax = !!split[2]; + this.needPax = encString(buf, off, 100, path) || this.needPax; + this.needPax = + encNumber(buf, off + 100, 8, this.mode) || this.needPax; + this.needPax = + encNumber(buf, off + 108, 8, this.uid) || this.needPax; + this.needPax = + encNumber(buf, off + 116, 8, this.gid) || this.needPax; + this.needPax = + encNumber(buf, off + 124, 12, this.size) || this.needPax; + this.needPax = + encDate(buf, off + 136, 12, this.mtime) || this.needPax; + buf[off + 156] = this.#type.charCodeAt(0); + this.needPax = + encString(buf, off + 157, 100, this.linkpath) || this.needPax; + buf.write('ustar\u000000', off + 257, 8); + this.needPax = + encString(buf, off + 265, 32, this.uname) || this.needPax; + this.needPax = + encString(buf, off + 297, 32, this.gname) || this.needPax; + this.needPax = + encNumber(buf, off + 329, 8, this.devmaj) || this.needPax; + this.needPax = + encNumber(buf, off + 337, 8, this.devmin) || this.needPax; + this.needPax = + encString(buf, off + 345, prefixSize, prefix) || this.needPax; + if (buf[off + 475] !== 0) { + this.needPax = + encString(buf, off + 345, 155, prefix) || this.needPax; + } + else { + this.needPax = + encString(buf, off + 345, 130, prefix) || this.needPax; + this.needPax = + encDate(buf, off + 476, 12, this.atime) || this.needPax; + this.needPax = + encDate(buf, off + 488, 12, this.ctime) || this.needPax; + } + let sum = 8 * 0x20; + for (let i = off; i < off + 148; i++) { + sum += buf[i]; + } + for (let i = off + 156; i < off + 512; i++) { + sum += buf[i]; + } + this.cksum = sum; + encNumber(buf, off + 148, 8, this.cksum); + this.cksumValid = true; + return this.needPax; + } + get type() { + return (this.#type === 'Unsupported' ? + this.#type + : types.name.get(this.#type)); + } + get typeKey() { + return this.#type; + } + set type(type) { + const c = String(types.code.get(type)); + if (types.isCode(c) || c === 'Unsupported') { + this.#type = c; + } + else if (types.isCode(type)) { + this.#type = type; + } + else { + throw new TypeError('invalid entry type: ' + type); + } + } +} +exports.Header = Header; +const splitPrefix = (p, prefixSize) => { + const pathSize = 100; + let pp = p; + let prefix = ''; + let ret = undefined; + const root = node_path_1.posix.parse(p).root || '.'; + if (Buffer.byteLength(pp) < pathSize) { + ret = [pp, prefix, false]; + } + else { + // first set prefix to the dir, and path to the base + prefix = node_path_1.posix.dirname(pp); + pp = node_path_1.posix.basename(pp); + do { + if (Buffer.byteLength(pp) <= pathSize && + Buffer.byteLength(prefix) <= prefixSize) { + // both fit! + ret = [pp, prefix, false]; + } + else if (Buffer.byteLength(pp) > pathSize && + Buffer.byteLength(prefix) <= prefixSize) { + // prefix fits in prefix, but path doesn't fit in path + ret = [pp.slice(0, pathSize - 1), prefix, true]; + } + else { + // make path take a bit from prefix + pp = node_path_1.posix.join(node_path_1.posix.basename(prefix), pp); + prefix = node_path_1.posix.dirname(prefix); + } + } while (prefix !== root && ret === undefined); + // at this point, found no resolution, just truncate + if (!ret) { + ret = [p.slice(0, pathSize - 1), '', true]; + } + } + return ret; +}; +const decString = (buf, off, size) => buf + .subarray(off, off + size) + .toString('utf8') + .replace(/\0.*/, ''); +const decDate = (buf, off, size) => numToDate(decNumber(buf, off, size)); +const numToDate = (num) => num === undefined ? undefined : new Date(num * 1000); +const decNumber = (buf, off, size) => Number(buf[off]) & 0x80 ? + large.parse(buf.subarray(off, off + size)) + : decSmallNumber(buf, off, size); +const nanUndef = (value) => (isNaN(value) ? undefined : value); +const decSmallNumber = (buf, off, size) => nanUndef(parseInt(buf + .subarray(off, off + size) + .toString('utf8') + .replace(/\0.*$/, '') + .trim(), 8)); +// the maximum encodable as a null-terminated octal, by field size +const MAXNUM = { + 12: 0o77777777777, + 8: 0o7777777, +}; +const encNumber = (buf, off, size, num) => num === undefined ? false + : num > MAXNUM[size] || num < 0 ? + (large.encode(num, buf.subarray(off, off + size)), true) + : (encSmallNumber(buf, off, size, num), false); +const encSmallNumber = (buf, off, size, num) => buf.write(octalString(num, size), off, size, 'ascii'); +const octalString = (num, size) => padOctal(Math.floor(num).toString(8), size); +const padOctal = (str, size) => (str.length === size - 1 ? + str + : new Array(size - str.length - 1).join('0') + str + ' ') + '\0'; +const encDate = (buf, off, size, date) => date === undefined ? false : (encNumber(buf, off, size, date.getTime() / 1000)); +// enough to fill the longest string we've got +const NULLS = new Array(156).join('\0'); +// pad with nulls, return true if it's longer or non-ascii +const encString = (buf, off, size, str) => str === undefined ? false : ((buf.write(str + NULLS, off, size, 'utf8'), + str.length !== Buffer.byteLength(str) || str.length > size)); +//# sourceMappingURL=header.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/index.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/index.js new file mode 100644 index 00000000000000..e93ed5ad54aa6e --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/index.js @@ -0,0 +1,54 @@ +"use strict"; +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + var desc = Object.getOwnPropertyDescriptor(m, k); + if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { + desc = { enumerable: true, get: function() { return m[k]; } }; + } + Object.defineProperty(o, k2, desc); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __exportStar = (this && this.__exportStar) || function(m, exports) { + for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p); +}; +var __importStar = (this && this.__importStar) || function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); + __setModuleDefault(result, mod); + return result; +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.u = exports.types = exports.r = exports.t = exports.x = exports.c = void 0; +__exportStar(require("./create.js"), exports); +var create_js_1 = require("./create.js"); +Object.defineProperty(exports, "c", { enumerable: true, get: function () { return create_js_1.create; } }); +__exportStar(require("./extract.js"), exports); +var extract_js_1 = require("./extract.js"); +Object.defineProperty(exports, "x", { enumerable: true, get: function () { return extract_js_1.extract; } }); +__exportStar(require("./header.js"), exports); +__exportStar(require("./list.js"), exports); +var list_js_1 = require("./list.js"); +Object.defineProperty(exports, "t", { enumerable: true, get: function () { return list_js_1.list; } }); +// classes +__exportStar(require("./pack.js"), exports); +__exportStar(require("./parse.js"), exports); +__exportStar(require("./pax.js"), exports); +__exportStar(require("./read-entry.js"), exports); +__exportStar(require("./replace.js"), exports); +var replace_js_1 = require("./replace.js"); +Object.defineProperty(exports, "r", { enumerable: true, get: function () { return replace_js_1.replace; } }); +exports.types = __importStar(require("./types.js")); +__exportStar(require("./unpack.js"), exports); +__exportStar(require("./update.js"), exports); +var update_js_1 = require("./update.js"); +Object.defineProperty(exports, "u", { enumerable: true, get: function () { return update_js_1.update; } }); +__exportStar(require("./write-entry.js"), exports); +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/large-numbers.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/large-numbers.js new file mode 100644 index 00000000000000..5b07aa7f71b48d --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/large-numbers.js @@ -0,0 +1,99 @@ +"use strict"; +// Tar can encode large and negative numbers using a leading byte of +// 0xff for negative, and 0x80 for positive. +Object.defineProperty(exports, "__esModule", { value: true }); +exports.parse = exports.encode = void 0; +const encode = (num, buf) => { + if (!Number.isSafeInteger(num)) { + // The number is so large that javascript cannot represent it with integer + // precision. + throw Error('cannot encode number outside of javascript safe integer range'); + } + else if (num < 0) { + encodeNegative(num, buf); + } + else { + encodePositive(num, buf); + } + return buf; +}; +exports.encode = encode; +const encodePositive = (num, buf) => { + buf[0] = 0x80; + for (var i = buf.length; i > 1; i--) { + buf[i - 1] = num & 0xff; + num = Math.floor(num / 0x100); + } +}; +const encodeNegative = (num, buf) => { + buf[0] = 0xff; + var flipped = false; + num = num * -1; + for (var i = buf.length; i > 1; i--) { + var byte = num & 0xff; + num = Math.floor(num / 0x100); + if (flipped) { + buf[i - 1] = onesComp(byte); + } + else if (byte === 0) { + buf[i - 1] = 0; + } + else { + flipped = true; + buf[i - 1] = twosComp(byte); + } + } +}; +const parse = (buf) => { + const pre = buf[0]; + const value = pre === 0x80 ? pos(buf.subarray(1, buf.length)) + : pre === 0xff ? twos(buf) + : null; + if (value === null) { + throw Error('invalid base256 encoding'); + } + if (!Number.isSafeInteger(value)) { + // The number is so large that javascript cannot represent it with integer + // precision. + throw Error('parsed number outside of javascript safe integer range'); + } + return value; +}; +exports.parse = parse; +const twos = (buf) => { + var len = buf.length; + var sum = 0; + var flipped = false; + for (var i = len - 1; i > -1; i--) { + var byte = Number(buf[i]); + var f; + if (flipped) { + f = onesComp(byte); + } + else if (byte === 0) { + f = byte; + } + else { + flipped = true; + f = twosComp(byte); + } + if (f !== 0) { + sum -= f * Math.pow(256, len - i - 1); + } + } + return sum; +}; +const pos = (buf) => { + var len = buf.length; + var sum = 0; + for (var i = len - 1; i > -1; i--) { + var byte = Number(buf[i]); + if (byte !== 0) { + sum += byte * Math.pow(256, len - i - 1); + } + } + return sum; +}; +const onesComp = (byte) => (0xff ^ byte) & 0xff; +const twosComp = (byte) => ((0xff ^ byte) + 1) & 0xff; +//# sourceMappingURL=large-numbers.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/list.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/list.js new file mode 100644 index 00000000000000..3cd34bb4bad481 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/list.js @@ -0,0 +1,136 @@ +"use strict"; +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + var desc = Object.getOwnPropertyDescriptor(m, k); + if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { + desc = { enumerable: true, get: function() { return m[k]; } }; + } + Object.defineProperty(o, k2, desc); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); + __setModuleDefault(result, mod); + return result; +}; +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.list = exports.filesFilter = void 0; +// tar -t +const fsm = __importStar(require("@isaacs/fs-minipass")); +const node_fs_1 = __importDefault(require("node:fs")); +const path_1 = require("path"); +const make_command_js_1 = require("./make-command.js"); +const parse_js_1 = require("./parse.js"); +const strip_trailing_slashes_js_1 = require("./strip-trailing-slashes.js"); +const onReadEntryFunction = (opt) => { + const onReadEntry = opt.onReadEntry; + opt.onReadEntry = + onReadEntry ? + e => { + onReadEntry(e); + e.resume(); + } + : e => e.resume(); +}; +// construct a filter that limits the file entries listed +// include child entries if a dir is included +const filesFilter = (opt, files) => { + const map = new Map(files.map(f => [(0, strip_trailing_slashes_js_1.stripTrailingSlashes)(f), true])); + const filter = opt.filter; + const mapHas = (file, r = '') => { + const root = r || (0, path_1.parse)(file).root || '.'; + let ret; + if (file === root) + ret = false; + else { + const m = map.get(file); + if (m !== undefined) { + ret = m; + } + else { + ret = mapHas((0, path_1.dirname)(file), root); + } + } + map.set(file, ret); + return ret; + }; + opt.filter = + filter ? + (file, entry) => filter(file, entry) && mapHas((0, strip_trailing_slashes_js_1.stripTrailingSlashes)(file)) + : file => mapHas((0, strip_trailing_slashes_js_1.stripTrailingSlashes)(file)); +}; +exports.filesFilter = filesFilter; +const listFileSync = (opt) => { + const p = new parse_js_1.Parser(opt); + const file = opt.file; + let fd; + try { + const stat = node_fs_1.default.statSync(file); + const readSize = opt.maxReadSize || 16 * 1024 * 1024; + if (stat.size < readSize) { + p.end(node_fs_1.default.readFileSync(file)); + } + else { + let pos = 0; + const buf = Buffer.allocUnsafe(readSize); + fd = node_fs_1.default.openSync(file, 'r'); + while (pos < stat.size) { + const bytesRead = node_fs_1.default.readSync(fd, buf, 0, readSize, pos); + pos += bytesRead; + p.write(buf.subarray(0, bytesRead)); + } + p.end(); + } + } + finally { + if (typeof fd === 'number') { + try { + node_fs_1.default.closeSync(fd); + /* c8 ignore next */ + } + catch (er) { } + } + } +}; +const listFile = (opt, _files) => { + const parse = new parse_js_1.Parser(opt); + const readSize = opt.maxReadSize || 16 * 1024 * 1024; + const file = opt.file; + const p = new Promise((resolve, reject) => { + parse.on('error', reject); + parse.on('end', resolve); + node_fs_1.default.stat(file, (er, stat) => { + if (er) { + reject(er); + } + else { + const stream = new fsm.ReadStream(file, { + readSize: readSize, + size: stat.size, + }); + stream.on('error', reject); + stream.pipe(parse); + } + }); + }); + return p; +}; +exports.list = (0, make_command_js_1.makeCommand)(listFileSync, listFile, opt => new parse_js_1.Parser(opt), opt => new parse_js_1.Parser(opt), (opt, files) => { + if (files?.length) + (0, exports.filesFilter)(opt, files); + if (!opt.noResume) + onReadEntryFunction(opt); +}); +//# sourceMappingURL=list.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/make-command.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/make-command.js new file mode 100644 index 00000000000000..1814319e78bc62 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/make-command.js @@ -0,0 +1,61 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.makeCommand = void 0; +const options_js_1 = require("./options.js"); +const makeCommand = (syncFile, asyncFile, syncNoFile, asyncNoFile, validate) => { + return Object.assign((opt_ = [], entries, cb) => { + if (Array.isArray(opt_)) { + entries = opt_; + opt_ = {}; + } + if (typeof entries === 'function') { + cb = entries; + entries = undefined; + } + if (!entries) { + entries = []; + } + else { + entries = Array.from(entries); + } + const opt = (0, options_js_1.dealias)(opt_); + validate?.(opt, entries); + if ((0, options_js_1.isSyncFile)(opt)) { + if (typeof cb === 'function') { + throw new TypeError('callback not supported for sync tar functions'); + } + return syncFile(opt, entries); + } + else if ((0, options_js_1.isAsyncFile)(opt)) { + const p = asyncFile(opt, entries); + // weirdness to make TS happy + const c = cb ? cb : undefined; + return c ? p.then(() => c(), c) : p; + } + else if ((0, options_js_1.isSyncNoFile)(opt)) { + if (typeof cb === 'function') { + throw new TypeError('callback not supported for sync tar functions'); + } + return syncNoFile(opt, entries); + } + else if ((0, options_js_1.isAsyncNoFile)(opt)) { + if (typeof cb === 'function') { + throw new TypeError('callback only supported with file option'); + } + return asyncNoFile(opt, entries); + /* c8 ignore start */ + } + else { + throw new Error('impossible options??'); + } + /* c8 ignore stop */ + }, { + syncFile, + asyncFile, + syncNoFile, + asyncNoFile, + validate, + }); +}; +exports.makeCommand = makeCommand; +//# sourceMappingURL=make-command.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/mkdir.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/mkdir.js new file mode 100644 index 00000000000000..2b13ecbab6723e --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/mkdir.js @@ -0,0 +1,209 @@ +"use strict"; +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.mkdirSync = exports.mkdir = void 0; +const chownr_1 = require("chownr"); +const fs_1 = __importDefault(require("fs")); +const mkdirp_1 = require("mkdirp"); +const node_path_1 = __importDefault(require("node:path")); +const cwd_error_js_1 = require("./cwd-error.js"); +const normalize_windows_path_js_1 = require("./normalize-windows-path.js"); +const symlink_error_js_1 = require("./symlink-error.js"); +const cGet = (cache, key) => cache.get((0, normalize_windows_path_js_1.normalizeWindowsPath)(key)); +const cSet = (cache, key, val) => cache.set((0, normalize_windows_path_js_1.normalizeWindowsPath)(key), val); +const checkCwd = (dir, cb) => { + fs_1.default.stat(dir, (er, st) => { + if (er || !st.isDirectory()) { + er = new cwd_error_js_1.CwdError(dir, er?.code || 'ENOTDIR'); + } + cb(er); + }); +}; +/** + * Wrapper around mkdirp for tar's needs. + * + * The main purpose is to avoid creating directories if we know that + * they already exist (and track which ones exist for this purpose), + * and prevent entries from being extracted into symlinked folders, + * if `preservePaths` is not set. + */ +const mkdir = (dir, opt, cb) => { + dir = (0, normalize_windows_path_js_1.normalizeWindowsPath)(dir); + // if there's any overlap between mask and mode, + // then we'll need an explicit chmod + /* c8 ignore next */ + const umask = opt.umask ?? 0o22; + const mode = opt.mode | 0o0700; + const needChmod = (mode & umask) !== 0; + const uid = opt.uid; + const gid = opt.gid; + const doChown = typeof uid === 'number' && + typeof gid === 'number' && + (uid !== opt.processUid || gid !== opt.processGid); + const preserve = opt.preserve; + const unlink = opt.unlink; + const cache = opt.cache; + const cwd = (0, normalize_windows_path_js_1.normalizeWindowsPath)(opt.cwd); + const done = (er, created) => { + if (er) { + cb(er); + } + else { + cSet(cache, dir, true); + if (created && doChown) { + (0, chownr_1.chownr)(created, uid, gid, er => done(er)); + } + else if (needChmod) { + fs_1.default.chmod(dir, mode, cb); + } + else { + cb(); + } + } + }; + if (cache && cGet(cache, dir) === true) { + return done(); + } + if (dir === cwd) { + return checkCwd(dir, done); + } + if (preserve) { + return (0, mkdirp_1.mkdirp)(dir, { mode }).then(made => done(null, made ?? undefined), // oh, ts + done); + } + const sub = (0, normalize_windows_path_js_1.normalizeWindowsPath)(node_path_1.default.relative(cwd, dir)); + const parts = sub.split('/'); + mkdir_(cwd, parts, mode, cache, unlink, cwd, undefined, done); +}; +exports.mkdir = mkdir; +const mkdir_ = (base, parts, mode, cache, unlink, cwd, created, cb) => { + if (!parts.length) { + return cb(null, created); + } + const p = parts.shift(); + const part = (0, normalize_windows_path_js_1.normalizeWindowsPath)(node_path_1.default.resolve(base + '/' + p)); + if (cGet(cache, part)) { + return mkdir_(part, parts, mode, cache, unlink, cwd, created, cb); + } + fs_1.default.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cwd, created, cb)); +}; +const onmkdir = (part, parts, mode, cache, unlink, cwd, created, cb) => (er) => { + if (er) { + fs_1.default.lstat(part, (statEr, st) => { + if (statEr) { + statEr.path = + statEr.path && (0, normalize_windows_path_js_1.normalizeWindowsPath)(statEr.path); + cb(statEr); + } + else if (st.isDirectory()) { + mkdir_(part, parts, mode, cache, unlink, cwd, created, cb); + } + else if (unlink) { + fs_1.default.unlink(part, er => { + if (er) { + return cb(er); + } + fs_1.default.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cwd, created, cb)); + }); + } + else if (st.isSymbolicLink()) { + return cb(new symlink_error_js_1.SymlinkError(part, part + '/' + parts.join('/'))); + } + else { + cb(er); + } + }); + } + else { + created = created || part; + mkdir_(part, parts, mode, cache, unlink, cwd, created, cb); + } +}; +const checkCwdSync = (dir) => { + let ok = false; + let code = undefined; + try { + ok = fs_1.default.statSync(dir).isDirectory(); + } + catch (er) { + code = er?.code; + } + finally { + if (!ok) { + throw new cwd_error_js_1.CwdError(dir, code ?? 'ENOTDIR'); + } + } +}; +const mkdirSync = (dir, opt) => { + dir = (0, normalize_windows_path_js_1.normalizeWindowsPath)(dir); + // if there's any overlap between mask and mode, + // then we'll need an explicit chmod + /* c8 ignore next */ + const umask = opt.umask ?? 0o22; + const mode = opt.mode | 0o700; + const needChmod = (mode & umask) !== 0; + const uid = opt.uid; + const gid = opt.gid; + const doChown = typeof uid === 'number' && + typeof gid === 'number' && + (uid !== opt.processUid || gid !== opt.processGid); + const preserve = opt.preserve; + const unlink = opt.unlink; + const cache = opt.cache; + const cwd = (0, normalize_windows_path_js_1.normalizeWindowsPath)(opt.cwd); + const done = (created) => { + cSet(cache, dir, true); + if (created && doChown) { + (0, chownr_1.chownrSync)(created, uid, gid); + } + if (needChmod) { + fs_1.default.chmodSync(dir, mode); + } + }; + if (cache && cGet(cache, dir) === true) { + return done(); + } + if (dir === cwd) { + checkCwdSync(cwd); + return done(); + } + if (preserve) { + return done((0, mkdirp_1.mkdirpSync)(dir, mode) ?? undefined); + } + const sub = (0, normalize_windows_path_js_1.normalizeWindowsPath)(node_path_1.default.relative(cwd, dir)); + const parts = sub.split('/'); + let created = undefined; + for (let p = parts.shift(), part = cwd; p && (part += '/' + p); p = parts.shift()) { + part = (0, normalize_windows_path_js_1.normalizeWindowsPath)(node_path_1.default.resolve(part)); + if (cGet(cache, part)) { + continue; + } + try { + fs_1.default.mkdirSync(part, mode); + created = created || part; + cSet(cache, part, true); + } + catch (er) { + const st = fs_1.default.lstatSync(part); + if (st.isDirectory()) { + cSet(cache, part, true); + continue; + } + else if (unlink) { + fs_1.default.unlinkSync(part); + fs_1.default.mkdirSync(part, mode); + created = created || part; + cSet(cache, part, true); + continue; + } + else if (st.isSymbolicLink()) { + return new symlink_error_js_1.SymlinkError(part, part + '/' + parts.join('/')); + } + } + } + return done(created); +}; +exports.mkdirSync = mkdirSync; +//# sourceMappingURL=mkdir.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/mode-fix.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/mode-fix.js new file mode 100644 index 00000000000000..49dd727961d290 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/mode-fix.js @@ -0,0 +1,29 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.modeFix = void 0; +const modeFix = (mode, isDir, portable) => { + mode &= 0o7777; + // in portable mode, use the minimum reasonable umask + // if this system creates files with 0o664 by default + // (as some linux distros do), then we'll write the + // archive with 0o644 instead. Also, don't ever create + // a file that is not readable/writable by the owner. + if (portable) { + mode = (mode | 0o600) & ~0o22; + } + // if dirs are readable, then they should be listable + if (isDir) { + if (mode & 0o400) { + mode |= 0o100; + } + if (mode & 0o40) { + mode |= 0o10; + } + if (mode & 0o4) { + mode |= 0o1; + } + } + return mode; +}; +exports.modeFix = modeFix; +//# sourceMappingURL=mode-fix.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/normalize-unicode.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/normalize-unicode.js new file mode 100644 index 00000000000000..2f08ce46d98c4c --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/normalize-unicode.js @@ -0,0 +1,17 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.normalizeUnicode = void 0; +// warning: extremely hot code path. +// This has been meticulously optimized for use +// within npm install on large package trees. +// Do not edit without careful benchmarking. +const normalizeCache = Object.create(null); +const { hasOwnProperty } = Object.prototype; +const normalizeUnicode = (s) => { + if (!hasOwnProperty.call(normalizeCache, s)) { + normalizeCache[s] = s.normalize('NFD'); + } + return normalizeCache[s]; +}; +exports.normalizeUnicode = normalizeUnicode; +//# sourceMappingURL=normalize-unicode.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/normalize-windows-path.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/normalize-windows-path.js new file mode 100644 index 00000000000000..b0c7aaa9f2d175 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/normalize-windows-path.js @@ -0,0 +1,12 @@ +"use strict"; +// on windows, either \ or / are valid directory separators. +// on unix, \ is a valid character in filenames. +// so, on windows, and only on windows, we replace all \ chars with /, +// so that we can use / as our one and only directory separator char. +Object.defineProperty(exports, "__esModule", { value: true }); +exports.normalizeWindowsPath = void 0; +const platform = process.env.TESTING_TAR_FAKE_PLATFORM || process.platform; +exports.normalizeWindowsPath = platform !== 'win32' ? + (p) => p + : (p) => p && p.replace(/\\/g, '/'); +//# sourceMappingURL=normalize-windows-path.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/options.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/options.js new file mode 100644 index 00000000000000..4cd06505bc72b2 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/options.js @@ -0,0 +1,66 @@ +"use strict"; +// turn tar(1) style args like `C` into the more verbose things like `cwd` +Object.defineProperty(exports, "__esModule", { value: true }); +exports.dealias = exports.isNoFile = exports.isFile = exports.isAsync = exports.isSync = exports.isAsyncNoFile = exports.isSyncNoFile = exports.isAsyncFile = exports.isSyncFile = void 0; +const argmap = new Map([ + ['C', 'cwd'], + ['f', 'file'], + ['z', 'gzip'], + ['P', 'preservePaths'], + ['U', 'unlink'], + ['strip-components', 'strip'], + ['stripComponents', 'strip'], + ['keep-newer', 'newer'], + ['keepNewer', 'newer'], + ['keep-newer-files', 'newer'], + ['keepNewerFiles', 'newer'], + ['k', 'keep'], + ['keep-existing', 'keep'], + ['keepExisting', 'keep'], + ['m', 'noMtime'], + ['no-mtime', 'noMtime'], + ['p', 'preserveOwner'], + ['L', 'follow'], + ['h', 'follow'], + ['onentry', 'onReadEntry'], +]); +const isSyncFile = (o) => !!o.sync && !!o.file; +exports.isSyncFile = isSyncFile; +const isAsyncFile = (o) => !o.sync && !!o.file; +exports.isAsyncFile = isAsyncFile; +const isSyncNoFile = (o) => !!o.sync && !o.file; +exports.isSyncNoFile = isSyncNoFile; +const isAsyncNoFile = (o) => !o.sync && !o.file; +exports.isAsyncNoFile = isAsyncNoFile; +const isSync = (o) => !!o.sync; +exports.isSync = isSync; +const isAsync = (o) => !o.sync; +exports.isAsync = isAsync; +const isFile = (o) => !!o.file; +exports.isFile = isFile; +const isNoFile = (o) => !o.file; +exports.isNoFile = isNoFile; +const dealiasKey = (k) => { + const d = argmap.get(k); + if (d) + return d; + return k; +}; +const dealias = (opt = {}) => { + if (!opt) + return {}; + const result = {}; + for (const [key, v] of Object.entries(opt)) { + // TS doesn't know that aliases are going to always be the same type + const k = dealiasKey(key); + result[k] = v; + } + // affordance for deprecated noChmod -> chmod + if (result.chmod === undefined && result.noChmod === false) { + result.chmod = true; + } + delete result.noChmod; + return result; +}; +exports.dealias = dealias; +//# sourceMappingURL=options.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/pack.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/pack.js new file mode 100644 index 00000000000000..303e93063c2db4 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/pack.js @@ -0,0 +1,477 @@ +"use strict"; +// A readable tar stream creator +// Technically, this is a transform stream that you write paths into, +// and tar format comes out of. +// The `add()` method is like `write()` but returns this, +// and end() return `this` as well, so you can +// do `new Pack(opt).add('files').add('dir').end().pipe(output) +// You could also do something like: +// streamOfPaths().pipe(new Pack()).pipe(new fs.WriteStream('out.tar')) +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + var desc = Object.getOwnPropertyDescriptor(m, k); + if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { + desc = { enumerable: true, get: function() { return m[k]; } }; + } + Object.defineProperty(o, k2, desc); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); + __setModuleDefault(result, mod); + return result; +}; +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.PackSync = exports.Pack = exports.PackJob = void 0; +const fs_1 = __importDefault(require("fs")); +const write_entry_js_1 = require("./write-entry.js"); +class PackJob { + path; + absolute; + entry; + stat; + readdir; + pending = false; + ignore = false; + piped = false; + constructor(path, absolute) { + this.path = path || './'; + this.absolute = absolute; + } +} +exports.PackJob = PackJob; +const minipass_1 = require("minipass"); +const zlib = __importStar(require("minizlib")); +const yallist_1 = require("yallist"); +const read_entry_js_1 = require("./read-entry.js"); +const warn_method_js_1 = require("./warn-method.js"); +const EOF = Buffer.alloc(1024); +const ONSTAT = Symbol('onStat'); +const ENDED = Symbol('ended'); +const QUEUE = Symbol('queue'); +const CURRENT = Symbol('current'); +const PROCESS = Symbol('process'); +const PROCESSING = Symbol('processing'); +const PROCESSJOB = Symbol('processJob'); +const JOBS = Symbol('jobs'); +const JOBDONE = Symbol('jobDone'); +const ADDFSENTRY = Symbol('addFSEntry'); +const ADDTARENTRY = Symbol('addTarEntry'); +const STAT = Symbol('stat'); +const READDIR = Symbol('readdir'); +const ONREADDIR = Symbol('onreaddir'); +const PIPE = Symbol('pipe'); +const ENTRY = Symbol('entry'); +const ENTRYOPT = Symbol('entryOpt'); +const WRITEENTRYCLASS = Symbol('writeEntryClass'); +const WRITE = Symbol('write'); +const ONDRAIN = Symbol('ondrain'); +const path_1 = __importDefault(require("path")); +const normalize_windows_path_js_1 = require("./normalize-windows-path.js"); +class Pack extends minipass_1.Minipass { + opt; + cwd; + maxReadSize; + preservePaths; + strict; + noPax; + prefix; + linkCache; + statCache; + file; + portable; + zip; + readdirCache; + noDirRecurse; + follow; + noMtime; + mtime; + filter; + jobs; + [WRITEENTRYCLASS]; + onWriteEntry; + [QUEUE]; + [JOBS] = 0; + [PROCESSING] = false; + [ENDED] = false; + constructor(opt = {}) { + //@ts-ignore + super(); + this.opt = opt; + this.file = opt.file || ''; + this.cwd = opt.cwd || process.cwd(); + this.maxReadSize = opt.maxReadSize; + this.preservePaths = !!opt.preservePaths; + this.strict = !!opt.strict; + this.noPax = !!opt.noPax; + this.prefix = (0, normalize_windows_path_js_1.normalizeWindowsPath)(opt.prefix || ''); + this.linkCache = opt.linkCache || new Map(); + this.statCache = opt.statCache || new Map(); + this.readdirCache = opt.readdirCache || new Map(); + this.onWriteEntry = opt.onWriteEntry; + this[WRITEENTRYCLASS] = write_entry_js_1.WriteEntry; + if (typeof opt.onwarn === 'function') { + this.on('warn', opt.onwarn); + } + this.portable = !!opt.portable; + if (opt.gzip || opt.brotli) { + if (opt.gzip && opt.brotli) { + throw new TypeError('gzip and brotli are mutually exclusive'); + } + if (opt.gzip) { + if (typeof opt.gzip !== 'object') { + opt.gzip = {}; + } + if (this.portable) { + opt.gzip.portable = true; + } + this.zip = new zlib.Gzip(opt.gzip); + } + if (opt.brotli) { + if (typeof opt.brotli !== 'object') { + opt.brotli = {}; + } + this.zip = new zlib.BrotliCompress(opt.brotli); + } + /* c8 ignore next */ + if (!this.zip) + throw new Error('impossible'); + const zip = this.zip; + zip.on('data', chunk => super.write(chunk)); + zip.on('end', () => super.end()); + zip.on('drain', () => this[ONDRAIN]()); + this.on('resume', () => zip.resume()); + } + else { + this.on('drain', this[ONDRAIN]); + } + this.noDirRecurse = !!opt.noDirRecurse; + this.follow = !!opt.follow; + this.noMtime = !!opt.noMtime; + if (opt.mtime) + this.mtime = opt.mtime; + this.filter = + typeof opt.filter === 'function' ? opt.filter : () => true; + this[QUEUE] = new yallist_1.Yallist(); + this[JOBS] = 0; + this.jobs = Number(opt.jobs) || 4; + this[PROCESSING] = false; + this[ENDED] = false; + } + [WRITE](chunk) { + return super.write(chunk); + } + add(path) { + this.write(path); + return this; + } + end(path, encoding, cb) { + /* c8 ignore start */ + if (typeof path === 'function') { + cb = path; + path = undefined; + } + if (typeof encoding === 'function') { + cb = encoding; + encoding = undefined; + } + /* c8 ignore stop */ + if (path) { + this.add(path); + } + this[ENDED] = true; + this[PROCESS](); + /* c8 ignore next */ + if (cb) + cb(); + return this; + } + write(path) { + if (this[ENDED]) { + throw new Error('write after end'); + } + if (path instanceof read_entry_js_1.ReadEntry) { + this[ADDTARENTRY](path); + } + else { + this[ADDFSENTRY](path); + } + return this.flowing; + } + [ADDTARENTRY](p) { + const absolute = (0, normalize_windows_path_js_1.normalizeWindowsPath)(path_1.default.resolve(this.cwd, p.path)); + // in this case, we don't have to wait for the stat + if (!this.filter(p.path, p)) { + p.resume(); + } + else { + const job = new PackJob(p.path, absolute); + job.entry = new write_entry_js_1.WriteEntryTar(p, this[ENTRYOPT](job)); + job.entry.on('end', () => this[JOBDONE](job)); + this[JOBS] += 1; + this[QUEUE].push(job); + } + this[PROCESS](); + } + [ADDFSENTRY](p) { + const absolute = (0, normalize_windows_path_js_1.normalizeWindowsPath)(path_1.default.resolve(this.cwd, p)); + this[QUEUE].push(new PackJob(p, absolute)); + this[PROCESS](); + } + [STAT](job) { + job.pending = true; + this[JOBS] += 1; + const stat = this.follow ? 'stat' : 'lstat'; + fs_1.default[stat](job.absolute, (er, stat) => { + job.pending = false; + this[JOBS] -= 1; + if (er) { + this.emit('error', er); + } + else { + this[ONSTAT](job, stat); + } + }); + } + [ONSTAT](job, stat) { + this.statCache.set(job.absolute, stat); + job.stat = stat; + // now we have the stat, we can filter it. + if (!this.filter(job.path, stat)) { + job.ignore = true; + } + this[PROCESS](); + } + [READDIR](job) { + job.pending = true; + this[JOBS] += 1; + fs_1.default.readdir(job.absolute, (er, entries) => { + job.pending = false; + this[JOBS] -= 1; + if (er) { + return this.emit('error', er); + } + this[ONREADDIR](job, entries); + }); + } + [ONREADDIR](job, entries) { + this.readdirCache.set(job.absolute, entries); + job.readdir = entries; + this[PROCESS](); + } + [PROCESS]() { + if (this[PROCESSING]) { + return; + } + this[PROCESSING] = true; + for (let w = this[QUEUE].head; !!w && this[JOBS] < this.jobs; w = w.next) { + this[PROCESSJOB](w.value); + if (w.value.ignore) { + const p = w.next; + this[QUEUE].removeNode(w); + w.next = p; + } + } + this[PROCESSING] = false; + if (this[ENDED] && !this[QUEUE].length && this[JOBS] === 0) { + if (this.zip) { + this.zip.end(EOF); + } + else { + super.write(EOF); + super.end(); + } + } + } + get [CURRENT]() { + return this[QUEUE] && this[QUEUE].head && this[QUEUE].head.value; + } + [JOBDONE](_job) { + this[QUEUE].shift(); + this[JOBS] -= 1; + this[PROCESS](); + } + [PROCESSJOB](job) { + if (job.pending) { + return; + } + if (job.entry) { + if (job === this[CURRENT] && !job.piped) { + this[PIPE](job); + } + return; + } + if (!job.stat) { + const sc = this.statCache.get(job.absolute); + if (sc) { + this[ONSTAT](job, sc); + } + else { + this[STAT](job); + } + } + if (!job.stat) { + return; + } + // filtered out! + if (job.ignore) { + return; + } + if (!this.noDirRecurse && + job.stat.isDirectory() && + !job.readdir) { + const rc = this.readdirCache.get(job.absolute); + if (rc) { + this[ONREADDIR](job, rc); + } + else { + this[READDIR](job); + } + if (!job.readdir) { + return; + } + } + // we know it doesn't have an entry, because that got checked above + job.entry = this[ENTRY](job); + if (!job.entry) { + job.ignore = true; + return; + } + if (job === this[CURRENT] && !job.piped) { + this[PIPE](job); + } + } + [ENTRYOPT](job) { + return { + onwarn: (code, msg, data) => this.warn(code, msg, data), + noPax: this.noPax, + cwd: this.cwd, + absolute: job.absolute, + preservePaths: this.preservePaths, + maxReadSize: this.maxReadSize, + strict: this.strict, + portable: this.portable, + linkCache: this.linkCache, + statCache: this.statCache, + noMtime: this.noMtime, + mtime: this.mtime, + prefix: this.prefix, + onWriteEntry: this.onWriteEntry, + }; + } + [ENTRY](job) { + this[JOBS] += 1; + try { + const e = new this[WRITEENTRYCLASS](job.path, this[ENTRYOPT](job)); + return e + .on('end', () => this[JOBDONE](job)) + .on('error', er => this.emit('error', er)); + } + catch (er) { + this.emit('error', er); + } + } + [ONDRAIN]() { + if (this[CURRENT] && this[CURRENT].entry) { + this[CURRENT].entry.resume(); + } + } + // like .pipe() but using super, because our write() is special + [PIPE](job) { + job.piped = true; + if (job.readdir) { + job.readdir.forEach(entry => { + const p = job.path; + const base = p === './' ? '' : p.replace(/\/*$/, '/'); + this[ADDFSENTRY](base + entry); + }); + } + const source = job.entry; + const zip = this.zip; + /* c8 ignore start */ + if (!source) + throw new Error('cannot pipe without source'); + /* c8 ignore stop */ + if (zip) { + source.on('data', chunk => { + if (!zip.write(chunk)) { + source.pause(); + } + }); + } + else { + source.on('data', chunk => { + if (!super.write(chunk)) { + source.pause(); + } + }); + } + } + pause() { + if (this.zip) { + this.zip.pause(); + } + return super.pause(); + } + warn(code, message, data = {}) { + (0, warn_method_js_1.warnMethod)(this, code, message, data); + } +} +exports.Pack = Pack; +class PackSync extends Pack { + sync = true; + constructor(opt) { + super(opt); + this[WRITEENTRYCLASS] = write_entry_js_1.WriteEntrySync; + } + // pause/resume are no-ops in sync streams. + pause() { } + resume() { } + [STAT](job) { + const stat = this.follow ? 'statSync' : 'lstatSync'; + this[ONSTAT](job, fs_1.default[stat](job.absolute)); + } + [READDIR](job) { + this[ONREADDIR](job, fs_1.default.readdirSync(job.absolute)); + } + // gotta get it all in this tick + [PIPE](job) { + const source = job.entry; + const zip = this.zip; + if (job.readdir) { + job.readdir.forEach(entry => { + const p = job.path; + const base = p === './' ? '' : p.replace(/\/*$/, '/'); + this[ADDFSENTRY](base + entry); + }); + } + /* c8 ignore start */ + if (!source) + throw new Error('Cannot pipe without source'); + /* c8 ignore stop */ + if (zip) { + source.on('data', chunk => { + zip.write(chunk); + }); + } + else { + source.on('data', chunk => { + super[WRITE](chunk); + }); + } + } +} +exports.PackSync = PackSync; +//# sourceMappingURL=pack.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/package.json b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/package.json new file mode 100644 index 00000000000000..5bbefffbabee39 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/package.json @@ -0,0 +1,3 @@ +{ + "type": "commonjs" +} diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/parse.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/parse.js new file mode 100644 index 00000000000000..1f7e5fd65e869f --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/parse.js @@ -0,0 +1,599 @@ +"use strict"; +// this[BUFFER] is the remainder of a chunk if we're waiting for +// the full 512 bytes of a header to come in. We will Buffer.concat() +// it to the next write(), which is a mem copy, but a small one. +// +// this[QUEUE] is a Yallist of entries that haven't been emitted +// yet this can only get filled up if the user keeps write()ing after +// a write() returns false, or does a write() with more than one entry +// +// We don't buffer chunks, we always parse them and either create an +// entry, or push it into the active entry. The ReadEntry class knows +// to throw data away if .ignore=true +// +// Shift entry off the buffer when it emits 'end', and emit 'entry' for +// the next one in the list. +// +// At any time, we're pushing body chunks into the entry at WRITEENTRY, +// and waiting for 'end' on the entry at READENTRY +// +// ignored entries get .resume() called on them straight away +Object.defineProperty(exports, "__esModule", { value: true }); +exports.Parser = void 0; +const events_1 = require("events"); +const minizlib_1 = require("minizlib"); +const yallist_1 = require("yallist"); +const header_js_1 = require("./header.js"); +const pax_js_1 = require("./pax.js"); +const read_entry_js_1 = require("./read-entry.js"); +const warn_method_js_1 = require("./warn-method.js"); +const maxMetaEntrySize = 1024 * 1024; +const gzipHeader = Buffer.from([0x1f, 0x8b]); +const STATE = Symbol('state'); +const WRITEENTRY = Symbol('writeEntry'); +const READENTRY = Symbol('readEntry'); +const NEXTENTRY = Symbol('nextEntry'); +const PROCESSENTRY = Symbol('processEntry'); +const EX = Symbol('extendedHeader'); +const GEX = Symbol('globalExtendedHeader'); +const META = Symbol('meta'); +const EMITMETA = Symbol('emitMeta'); +const BUFFER = Symbol('buffer'); +const QUEUE = Symbol('queue'); +const ENDED = Symbol('ended'); +const EMITTEDEND = Symbol('emittedEnd'); +const EMIT = Symbol('emit'); +const UNZIP = Symbol('unzip'); +const CONSUMECHUNK = Symbol('consumeChunk'); +const CONSUMECHUNKSUB = Symbol('consumeChunkSub'); +const CONSUMEBODY = Symbol('consumeBody'); +const CONSUMEMETA = Symbol('consumeMeta'); +const CONSUMEHEADER = Symbol('consumeHeader'); +const CONSUMING = Symbol('consuming'); +const BUFFERCONCAT = Symbol('bufferConcat'); +const MAYBEEND = Symbol('maybeEnd'); +const WRITING = Symbol('writing'); +const ABORTED = Symbol('aborted'); +const DONE = Symbol('onDone'); +const SAW_VALID_ENTRY = Symbol('sawValidEntry'); +const SAW_NULL_BLOCK = Symbol('sawNullBlock'); +const SAW_EOF = Symbol('sawEOF'); +const CLOSESTREAM = Symbol('closeStream'); +const noop = () => true; +class Parser extends events_1.EventEmitter { + file; + strict; + maxMetaEntrySize; + filter; + brotli; + writable = true; + readable = false; + [QUEUE] = new yallist_1.Yallist(); + [BUFFER]; + [READENTRY]; + [WRITEENTRY]; + [STATE] = 'begin'; + [META] = ''; + [EX]; + [GEX]; + [ENDED] = false; + [UNZIP]; + [ABORTED] = false; + [SAW_VALID_ENTRY]; + [SAW_NULL_BLOCK] = false; + [SAW_EOF] = false; + [WRITING] = false; + [CONSUMING] = false; + [EMITTEDEND] = false; + constructor(opt = {}) { + super(); + this.file = opt.file || ''; + // these BADARCHIVE errors can't be detected early. listen on DONE. + this.on(DONE, () => { + if (this[STATE] === 'begin' || + this[SAW_VALID_ENTRY] === false) { + // either less than 1 block of data, or all entries were invalid. + // Either way, probably not even a tarball. + this.warn('TAR_BAD_ARCHIVE', 'Unrecognized archive format'); + } + }); + if (opt.ondone) { + this.on(DONE, opt.ondone); + } + else { + this.on(DONE, () => { + this.emit('prefinish'); + this.emit('finish'); + this.emit('end'); + }); + } + this.strict = !!opt.strict; + this.maxMetaEntrySize = opt.maxMetaEntrySize || maxMetaEntrySize; + this.filter = typeof opt.filter === 'function' ? opt.filter : noop; + // Unlike gzip, brotli doesn't have any magic bytes to identify it + // Users need to explicitly tell us they're extracting a brotli file + // Or we infer from the file extension + const isTBR = opt.file && + (opt.file.endsWith('.tar.br') || opt.file.endsWith('.tbr')); + // if it's a tbr file it MIGHT be brotli, but we don't know until + // we look at it and verify it's not a valid tar file. + this.brotli = + !opt.gzip && opt.brotli !== undefined ? opt.brotli + : isTBR ? undefined + : false; + // have to set this so that streams are ok piping into it + this.on('end', () => this[CLOSESTREAM]()); + if (typeof opt.onwarn === 'function') { + this.on('warn', opt.onwarn); + } + if (typeof opt.onReadEntry === 'function') { + this.on('entry', opt.onReadEntry); + } + } + warn(code, message, data = {}) { + (0, warn_method_js_1.warnMethod)(this, code, message, data); + } + [CONSUMEHEADER](chunk, position) { + if (this[SAW_VALID_ENTRY] === undefined) { + this[SAW_VALID_ENTRY] = false; + } + let header; + try { + header = new header_js_1.Header(chunk, position, this[EX], this[GEX]); + } + catch (er) { + return this.warn('TAR_ENTRY_INVALID', er); + } + if (header.nullBlock) { + if (this[SAW_NULL_BLOCK]) { + this[SAW_EOF] = true; + // ending an archive with no entries. pointless, but legal. + if (this[STATE] === 'begin') { + this[STATE] = 'header'; + } + this[EMIT]('eof'); + } + else { + this[SAW_NULL_BLOCK] = true; + this[EMIT]('nullBlock'); + } + } + else { + this[SAW_NULL_BLOCK] = false; + if (!header.cksumValid) { + this.warn('TAR_ENTRY_INVALID', 'checksum failure', { header }); + } + else if (!header.path) { + this.warn('TAR_ENTRY_INVALID', 'path is required', { header }); + } + else { + const type = header.type; + if (/^(Symbolic)?Link$/.test(type) && !header.linkpath) { + this.warn('TAR_ENTRY_INVALID', 'linkpath required', { + header, + }); + } + else if (!/^(Symbolic)?Link$/.test(type) && + !/^(Global)?ExtendedHeader$/.test(type) && + header.linkpath) { + this.warn('TAR_ENTRY_INVALID', 'linkpath forbidden', { + header, + }); + } + else { + const entry = (this[WRITEENTRY] = new read_entry_js_1.ReadEntry(header, this[EX], this[GEX])); + // we do this for meta & ignored entries as well, because they + // are still valid tar, or else we wouldn't know to ignore them + if (!this[SAW_VALID_ENTRY]) { + if (entry.remain) { + // this might be the one! + const onend = () => { + if (!entry.invalid) { + this[SAW_VALID_ENTRY] = true; + } + }; + entry.on('end', onend); + } + else { + this[SAW_VALID_ENTRY] = true; + } + } + if (entry.meta) { + if (entry.size > this.maxMetaEntrySize) { + entry.ignore = true; + this[EMIT]('ignoredEntry', entry); + this[STATE] = 'ignore'; + entry.resume(); + } + else if (entry.size > 0) { + this[META] = ''; + entry.on('data', c => (this[META] += c)); + this[STATE] = 'meta'; + } + } + else { + this[EX] = undefined; + entry.ignore = + entry.ignore || !this.filter(entry.path, entry); + if (entry.ignore) { + // probably valid, just not something we care about + this[EMIT]('ignoredEntry', entry); + this[STATE] = entry.remain ? 'ignore' : 'header'; + entry.resume(); + } + else { + if (entry.remain) { + this[STATE] = 'body'; + } + else { + this[STATE] = 'header'; + entry.end(); + } + if (!this[READENTRY]) { + this[QUEUE].push(entry); + this[NEXTENTRY](); + } + else { + this[QUEUE].push(entry); + } + } + } + } + } + } + } + [CLOSESTREAM]() { + queueMicrotask(() => this.emit('close')); + } + [PROCESSENTRY](entry) { + let go = true; + if (!entry) { + this[READENTRY] = undefined; + go = false; + } + else if (Array.isArray(entry)) { + const [ev, ...args] = entry; + this.emit(ev, ...args); + } + else { + this[READENTRY] = entry; + this.emit('entry', entry); + if (!entry.emittedEnd) { + entry.on('end', () => this[NEXTENTRY]()); + go = false; + } + } + return go; + } + [NEXTENTRY]() { + do { } while (this[PROCESSENTRY](this[QUEUE].shift())); + if (!this[QUEUE].length) { + // At this point, there's nothing in the queue, but we may have an + // entry which is being consumed (readEntry). + // If we don't, then we definitely can handle more data. + // If we do, and either it's flowing, or it has never had any data + // written to it, then it needs more. + // The only other possibility is that it has returned false from a + // write() call, so we wait for the next drain to continue. + const re = this[READENTRY]; + const drainNow = !re || re.flowing || re.size === re.remain; + if (drainNow) { + if (!this[WRITING]) { + this.emit('drain'); + } + } + else { + re.once('drain', () => this.emit('drain')); + } + } + } + [CONSUMEBODY](chunk, position) { + // write up to but no more than writeEntry.blockRemain + const entry = this[WRITEENTRY]; + /* c8 ignore start */ + if (!entry) { + throw new Error('attempt to consume body without entry??'); + } + const br = entry.blockRemain ?? 0; + /* c8 ignore stop */ + const c = br >= chunk.length && position === 0 ? + chunk + : chunk.subarray(position, position + br); + entry.write(c); + if (!entry.blockRemain) { + this[STATE] = 'header'; + this[WRITEENTRY] = undefined; + entry.end(); + } + return c.length; + } + [CONSUMEMETA](chunk, position) { + const entry = this[WRITEENTRY]; + const ret = this[CONSUMEBODY](chunk, position); + // if we finished, then the entry is reset + if (!this[WRITEENTRY] && entry) { + this[EMITMETA](entry); + } + return ret; + } + [EMIT](ev, data, extra) { + if (!this[QUEUE].length && !this[READENTRY]) { + this.emit(ev, data, extra); + } + else { + this[QUEUE].push([ev, data, extra]); + } + } + [EMITMETA](entry) { + this[EMIT]('meta', this[META]); + switch (entry.type) { + case 'ExtendedHeader': + case 'OldExtendedHeader': + this[EX] = pax_js_1.Pax.parse(this[META], this[EX], false); + break; + case 'GlobalExtendedHeader': + this[GEX] = pax_js_1.Pax.parse(this[META], this[GEX], true); + break; + case 'NextFileHasLongPath': + case 'OldGnuLongPath': { + const ex = this[EX] ?? Object.create(null); + this[EX] = ex; + ex.path = this[META].replace(/\0.*/, ''); + break; + } + case 'NextFileHasLongLinkpath': { + const ex = this[EX] || Object.create(null); + this[EX] = ex; + ex.linkpath = this[META].replace(/\0.*/, ''); + break; + } + /* c8 ignore start */ + default: + throw new Error('unknown meta: ' + entry.type); + /* c8 ignore stop */ + } + } + abort(error) { + this[ABORTED] = true; + this.emit('abort', error); + // always throws, even in non-strict mode + this.warn('TAR_ABORT', error, { recoverable: false }); + } + write(chunk, encoding, cb) { + if (typeof encoding === 'function') { + cb = encoding; + encoding = undefined; + } + if (typeof chunk === 'string') { + chunk = Buffer.from(chunk, + /* c8 ignore next */ + typeof encoding === 'string' ? encoding : 'utf8'); + } + if (this[ABORTED]) { + /* c8 ignore next */ + cb?.(); + return false; + } + // first write, might be gzipped + const needSniff = this[UNZIP] === undefined || + (this.brotli === undefined && this[UNZIP] === false); + if (needSniff && chunk) { + if (this[BUFFER]) { + chunk = Buffer.concat([this[BUFFER], chunk]); + this[BUFFER] = undefined; + } + if (chunk.length < gzipHeader.length) { + this[BUFFER] = chunk; + /* c8 ignore next */ + cb?.(); + return true; + } + // look for gzip header + for (let i = 0; this[UNZIP] === undefined && i < gzipHeader.length; i++) { + if (chunk[i] !== gzipHeader[i]) { + this[UNZIP] = false; + } + } + const maybeBrotli = this.brotli === undefined; + if (this[UNZIP] === false && maybeBrotli) { + // read the first header to see if it's a valid tar file. If so, + // we can safely assume that it's not actually brotli, despite the + // .tbr or .tar.br file extension. + // if we ended before getting a full chunk, yes, def brotli + if (chunk.length < 512) { + if (this[ENDED]) { + this.brotli = true; + } + else { + this[BUFFER] = chunk; + /* c8 ignore next */ + cb?.(); + return true; + } + } + else { + // if it's tar, it's pretty reliably not brotli, chances of + // that happening are astronomical. + try { + new header_js_1.Header(chunk.subarray(0, 512)); + this.brotli = false; + } + catch (_) { + this.brotli = true; + } + } + } + if (this[UNZIP] === undefined || + (this[UNZIP] === false && this.brotli)) { + const ended = this[ENDED]; + this[ENDED] = false; + this[UNZIP] = + this[UNZIP] === undefined ? + new minizlib_1.Unzip({}) + : new minizlib_1.BrotliDecompress({}); + this[UNZIP].on('data', chunk => this[CONSUMECHUNK](chunk)); + this[UNZIP].on('error', er => this.abort(er)); + this[UNZIP].on('end', () => { + this[ENDED] = true; + this[CONSUMECHUNK](); + }); + this[WRITING] = true; + const ret = !!this[UNZIP][ended ? 'end' : 'write'](chunk); + this[WRITING] = false; + cb?.(); + return ret; + } + } + this[WRITING] = true; + if (this[UNZIP]) { + this[UNZIP].write(chunk); + } + else { + this[CONSUMECHUNK](chunk); + } + this[WRITING] = false; + // return false if there's a queue, or if the current entry isn't flowing + const ret = this[QUEUE].length ? false + : this[READENTRY] ? this[READENTRY].flowing + : true; + // if we have no queue, then that means a clogged READENTRY + if (!ret && !this[QUEUE].length) { + this[READENTRY]?.once('drain', () => this.emit('drain')); + } + /* c8 ignore next */ + cb?.(); + return ret; + } + [BUFFERCONCAT](c) { + if (c && !this[ABORTED]) { + this[BUFFER] = + this[BUFFER] ? Buffer.concat([this[BUFFER], c]) : c; + } + } + [MAYBEEND]() { + if (this[ENDED] && + !this[EMITTEDEND] && + !this[ABORTED] && + !this[CONSUMING]) { + this[EMITTEDEND] = true; + const entry = this[WRITEENTRY]; + if (entry && entry.blockRemain) { + // truncated, likely a damaged file + const have = this[BUFFER] ? this[BUFFER].length : 0; + this.warn('TAR_BAD_ARCHIVE', `Truncated input (needed ${entry.blockRemain} more bytes, only ${have} available)`, { entry }); + if (this[BUFFER]) { + entry.write(this[BUFFER]); + } + entry.end(); + } + this[EMIT](DONE); + } + } + [CONSUMECHUNK](chunk) { + if (this[CONSUMING] && chunk) { + this[BUFFERCONCAT](chunk); + } + else if (!chunk && !this[BUFFER]) { + this[MAYBEEND](); + } + else if (chunk) { + this[CONSUMING] = true; + if (this[BUFFER]) { + this[BUFFERCONCAT](chunk); + const c = this[BUFFER]; + this[BUFFER] = undefined; + this[CONSUMECHUNKSUB](c); + } + else { + this[CONSUMECHUNKSUB](chunk); + } + while (this[BUFFER] && + this[BUFFER]?.length >= 512 && + !this[ABORTED] && + !this[SAW_EOF]) { + const c = this[BUFFER]; + this[BUFFER] = undefined; + this[CONSUMECHUNKSUB](c); + } + this[CONSUMING] = false; + } + if (!this[BUFFER] || this[ENDED]) { + this[MAYBEEND](); + } + } + [CONSUMECHUNKSUB](chunk) { + // we know that we are in CONSUMING mode, so anything written goes into + // the buffer. Advance the position and put any remainder in the buffer. + let position = 0; + const length = chunk.length; + while (position + 512 <= length && + !this[ABORTED] && + !this[SAW_EOF]) { + switch (this[STATE]) { + case 'begin': + case 'header': + this[CONSUMEHEADER](chunk, position); + position += 512; + break; + case 'ignore': + case 'body': + position += this[CONSUMEBODY](chunk, position); + break; + case 'meta': + position += this[CONSUMEMETA](chunk, position); + break; + /* c8 ignore start */ + default: + throw new Error('invalid state: ' + this[STATE]); + /* c8 ignore stop */ + } + } + if (position < length) { + if (this[BUFFER]) { + this[BUFFER] = Buffer.concat([ + chunk.subarray(position), + this[BUFFER], + ]); + } + else { + this[BUFFER] = chunk.subarray(position); + } + } + } + end(chunk, encoding, cb) { + if (typeof chunk === 'function') { + cb = chunk; + encoding = undefined; + chunk = undefined; + } + if (typeof encoding === 'function') { + cb = encoding; + encoding = undefined; + } + if (typeof chunk === 'string') { + chunk = Buffer.from(chunk, encoding); + } + if (cb) + this.once('finish', cb); + if (!this[ABORTED]) { + if (this[UNZIP]) { + /* c8 ignore start */ + if (chunk) + this[UNZIP].write(chunk); + /* c8 ignore stop */ + this[UNZIP].end(); + } + else { + this[ENDED] = true; + if (this.brotli === undefined) + chunk = chunk || Buffer.alloc(0); + if (chunk) + this.write(chunk); + this[MAYBEEND](); + } + } + return this; + } +} +exports.Parser = Parser; +//# sourceMappingURL=parse.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/path-reservations.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/path-reservations.js new file mode 100644 index 00000000000000..9ff391c44092c7 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/path-reservations.js @@ -0,0 +1,170 @@ +"use strict"; +// A path exclusive reservation system +// reserve([list, of, paths], fn) +// When the fn is first in line for all its paths, it +// is called with a cb that clears the reservation. +// +// Used by async unpack to avoid clobbering paths in use, +// while still allowing maximal safe parallelization. +Object.defineProperty(exports, "__esModule", { value: true }); +exports.PathReservations = void 0; +const node_path_1 = require("node:path"); +const normalize_unicode_js_1 = require("./normalize-unicode.js"); +const strip_trailing_slashes_js_1 = require("./strip-trailing-slashes.js"); +const platform = process.env.TESTING_TAR_FAKE_PLATFORM || process.platform; +const isWindows = platform === 'win32'; +// return a set of parent dirs for a given path +// '/a/b/c/d' -> ['/', '/a', '/a/b', '/a/b/c', '/a/b/c/d'] +const getDirs = (path) => { + const dirs = path + .split('/') + .slice(0, -1) + .reduce((set, path) => { + const s = set[set.length - 1]; + if (s !== undefined) { + path = (0, node_path_1.join)(s, path); + } + set.push(path || '/'); + return set; + }, []); + return dirs; +}; +class PathReservations { + // path => [function or Set] + // A Set object means a directory reservation + // A fn is a direct reservation on that path + #queues = new Map(); + // fn => {paths:[path,...], dirs:[path, ...]} + #reservations = new Map(); + // functions currently running + #running = new Set(); + reserve(paths, fn) { + paths = + isWindows ? + ['win32 parallelization disabled'] + : paths.map(p => { + // don't need normPath, because we skip this entirely for windows + return (0, strip_trailing_slashes_js_1.stripTrailingSlashes)((0, node_path_1.join)((0, normalize_unicode_js_1.normalizeUnicode)(p))).toLowerCase(); + }); + const dirs = new Set(paths.map(path => getDirs(path)).reduce((a, b) => a.concat(b))); + this.#reservations.set(fn, { dirs, paths }); + for (const p of paths) { + const q = this.#queues.get(p); + if (!q) { + this.#queues.set(p, [fn]); + } + else { + q.push(fn); + } + } + for (const dir of dirs) { + const q = this.#queues.get(dir); + if (!q) { + this.#queues.set(dir, [new Set([fn])]); + } + else { + const l = q[q.length - 1]; + if (l instanceof Set) { + l.add(fn); + } + else { + q.push(new Set([fn])); + } + } + } + return this.#run(fn); + } + // return the queues for each path the function cares about + // fn => {paths, dirs} + #getQueues(fn) { + const res = this.#reservations.get(fn); + /* c8 ignore start */ + if (!res) { + throw new Error('function does not have any path reservations'); + } + /* c8 ignore stop */ + return { + paths: res.paths.map((path) => this.#queues.get(path)), + dirs: [...res.dirs].map(path => this.#queues.get(path)), + }; + } + // check if fn is first in line for all its paths, and is + // included in the first set for all its dir queues + check(fn) { + const { paths, dirs } = this.#getQueues(fn); + return (paths.every(q => q && q[0] === fn) && + dirs.every(q => q && q[0] instanceof Set && q[0].has(fn))); + } + // run the function if it's first in line and not already running + #run(fn) { + if (this.#running.has(fn) || !this.check(fn)) { + return false; + } + this.#running.add(fn); + fn(() => this.#clear(fn)); + return true; + } + #clear(fn) { + if (!this.#running.has(fn)) { + return false; + } + const res = this.#reservations.get(fn); + /* c8 ignore start */ + if (!res) { + throw new Error('invalid reservation'); + } + /* c8 ignore stop */ + const { paths, dirs } = res; + const next = new Set(); + for (const path of paths) { + const q = this.#queues.get(path); + /* c8 ignore start */ + if (!q || q?.[0] !== fn) { + continue; + } + /* c8 ignore stop */ + const q0 = q[1]; + if (!q0) { + this.#queues.delete(path); + continue; + } + q.shift(); + if (typeof q0 === 'function') { + next.add(q0); + } + else { + for (const f of q0) { + next.add(f); + } + } + } + for (const dir of dirs) { + const q = this.#queues.get(dir); + const q0 = q?.[0]; + /* c8 ignore next - type safety only */ + if (!q || !(q0 instanceof Set)) + continue; + if (q0.size === 1 && q.length === 1) { + this.#queues.delete(dir); + continue; + } + else if (q0.size === 1) { + q.shift(); + // next one must be a function, + // or else the Set would've been reused + const n = q[0]; + if (typeof n === 'function') { + next.add(n); + } + } + else { + q0.delete(fn); + } + } + this.#running.delete(fn); + next.forEach(fn => this.#run(fn)); + return true; + } +} +exports.PathReservations = PathReservations; +//# sourceMappingURL=path-reservations.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/pax.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/pax.js new file mode 100644 index 00000000000000..d30c0f3efbe9ea --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/pax.js @@ -0,0 +1,158 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.Pax = void 0; +const node_path_1 = require("node:path"); +const header_js_1 = require("./header.js"); +class Pax { + atime; + mtime; + ctime; + charset; + comment; + gid; + uid; + gname; + uname; + linkpath; + dev; + ino; + nlink; + path; + size; + mode; + global; + constructor(obj, global = false) { + this.atime = obj.atime; + this.charset = obj.charset; + this.comment = obj.comment; + this.ctime = obj.ctime; + this.dev = obj.dev; + this.gid = obj.gid; + this.global = global; + this.gname = obj.gname; + this.ino = obj.ino; + this.linkpath = obj.linkpath; + this.mtime = obj.mtime; + this.nlink = obj.nlink; + this.path = obj.path; + this.size = obj.size; + this.uid = obj.uid; + this.uname = obj.uname; + } + encode() { + const body = this.encodeBody(); + if (body === '') { + return Buffer.allocUnsafe(0); + } + const bodyLen = Buffer.byteLength(body); + // round up to 512 bytes + // add 512 for header + const bufLen = 512 * Math.ceil(1 + bodyLen / 512); + const buf = Buffer.allocUnsafe(bufLen); + // 0-fill the header section, it might not hit every field + for (let i = 0; i < 512; i++) { + buf[i] = 0; + } + new header_js_1.Header({ + // XXX split the path + // then the path should be PaxHeader + basename, but less than 99, + // prepend with the dirname + /* c8 ignore start */ + path: ('PaxHeader/' + (0, node_path_1.basename)(this.path ?? '')).slice(0, 99), + /* c8 ignore stop */ + mode: this.mode || 0o644, + uid: this.uid, + gid: this.gid, + size: bodyLen, + mtime: this.mtime, + type: this.global ? 'GlobalExtendedHeader' : 'ExtendedHeader', + linkpath: '', + uname: this.uname || '', + gname: this.gname || '', + devmaj: 0, + devmin: 0, + atime: this.atime, + ctime: this.ctime, + }).encode(buf); + buf.write(body, 512, bodyLen, 'utf8'); + // null pad after the body + for (let i = bodyLen + 512; i < buf.length; i++) { + buf[i] = 0; + } + return buf; + } + encodeBody() { + return (this.encodeField('path') + + this.encodeField('ctime') + + this.encodeField('atime') + + this.encodeField('dev') + + this.encodeField('ino') + + this.encodeField('nlink') + + this.encodeField('charset') + + this.encodeField('comment') + + this.encodeField('gid') + + this.encodeField('gname') + + this.encodeField('linkpath') + + this.encodeField('mtime') + + this.encodeField('size') + + this.encodeField('uid') + + this.encodeField('uname')); + } + encodeField(field) { + if (this[field] === undefined) { + return ''; + } + const r = this[field]; + const v = r instanceof Date ? r.getTime() / 1000 : r; + const s = ' ' + + (field === 'dev' || field === 'ino' || field === 'nlink' ? + 'SCHILY.' + : '') + + field + + '=' + + v + + '\n'; + const byteLen = Buffer.byteLength(s); + // the digits includes the length of the digits in ascii base-10 + // so if it's 9 characters, then adding 1 for the 9 makes it 10 + // which makes it 11 chars. + let digits = Math.floor(Math.log(byteLen) / Math.log(10)) + 1; + if (byteLen + digits >= Math.pow(10, digits)) { + digits += 1; + } + const len = digits + byteLen; + return len + s; + } + static parse(str, ex, g = false) { + return new Pax(merge(parseKV(str), ex), g); + } +} +exports.Pax = Pax; +const merge = (a, b) => b ? Object.assign({}, b, a) : a; +const parseKV = (str) => str + .replace(/\n$/, '') + .split('\n') + .reduce(parseKVLine, Object.create(null)); +const parseKVLine = (set, line) => { + const n = parseInt(line, 10); + // XXX Values with \n in them will fail this. + // Refactor to not be a naive line-by-line parse. + if (n !== Buffer.byteLength(line) + 1) { + return set; + } + line = line.slice((n + ' ').length); + const kv = line.split('='); + const r = kv.shift(); + if (!r) { + return set; + } + const k = r.replace(/^SCHILY\.(dev|ino|nlink)/, '$1'); + const v = kv.join('='); + set[k] = + /^([A-Z]+\.)?([mac]|birth|creation)time$/.test(k) ? + new Date(Number(v) * 1000) + : /^[0-9]+$/.test(v) ? +v + : v; + return set; +}; +//# sourceMappingURL=pax.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/read-entry.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/read-entry.js new file mode 100644 index 00000000000000..15e2d55c938a43 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/read-entry.js @@ -0,0 +1,140 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.ReadEntry = void 0; +const minipass_1 = require("minipass"); +const normalize_windows_path_js_1 = require("./normalize-windows-path.js"); +class ReadEntry extends minipass_1.Minipass { + extended; + globalExtended; + header; + startBlockSize; + blockRemain; + remain; + type; + meta = false; + ignore = false; + path; + mode; + uid; + gid; + uname; + gname; + size = 0; + mtime; + atime; + ctime; + linkpath; + dev; + ino; + nlink; + invalid = false; + absolute; + unsupported = false; + constructor(header, ex, gex) { + super({}); + // read entries always start life paused. this is to avoid the + // situation where Minipass's auto-ending empty streams results + // in an entry ending before we're ready for it. + this.pause(); + this.extended = ex; + this.globalExtended = gex; + this.header = header; + /* c8 ignore start */ + this.remain = header.size ?? 0; + /* c8 ignore stop */ + this.startBlockSize = 512 * Math.ceil(this.remain / 512); + this.blockRemain = this.startBlockSize; + this.type = header.type; + switch (this.type) { + case 'File': + case 'OldFile': + case 'Link': + case 'SymbolicLink': + case 'CharacterDevice': + case 'BlockDevice': + case 'Directory': + case 'FIFO': + case 'ContiguousFile': + case 'GNUDumpDir': + break; + case 'NextFileHasLongLinkpath': + case 'NextFileHasLongPath': + case 'OldGnuLongPath': + case 'GlobalExtendedHeader': + case 'ExtendedHeader': + case 'OldExtendedHeader': + this.meta = true; + break; + // NOTE: gnutar and bsdtar treat unrecognized types as 'File' + // it may be worth doing the same, but with a warning. + default: + this.ignore = true; + } + /* c8 ignore start */ + if (!header.path) { + throw new Error('no path provided for tar.ReadEntry'); + } + /* c8 ignore stop */ + this.path = (0, normalize_windows_path_js_1.normalizeWindowsPath)(header.path); + this.mode = header.mode; + if (this.mode) { + this.mode = this.mode & 0o7777; + } + this.uid = header.uid; + this.gid = header.gid; + this.uname = header.uname; + this.gname = header.gname; + this.size = this.remain; + this.mtime = header.mtime; + this.atime = header.atime; + this.ctime = header.ctime; + /* c8 ignore start */ + this.linkpath = + header.linkpath ? + (0, normalize_windows_path_js_1.normalizeWindowsPath)(header.linkpath) + : undefined; + /* c8 ignore stop */ + this.uname = header.uname; + this.gname = header.gname; + if (ex) { + this.#slurp(ex); + } + if (gex) { + this.#slurp(gex, true); + } + } + write(data) { + const writeLen = data.length; + if (writeLen > this.blockRemain) { + throw new Error('writing more to entry than is appropriate'); + } + const r = this.remain; + const br = this.blockRemain; + this.remain = Math.max(0, r - writeLen); + this.blockRemain = Math.max(0, br - writeLen); + if (this.ignore) { + return true; + } + if (r >= writeLen) { + return super.write(data); + } + // r < writeLen + return super.write(data.subarray(0, r)); + } + #slurp(ex, gex = false) { + if (ex.path) + ex.path = (0, normalize_windows_path_js_1.normalizeWindowsPath)(ex.path); + if (ex.linkpath) + ex.linkpath = (0, normalize_windows_path_js_1.normalizeWindowsPath)(ex.linkpath); + Object.assign(this, Object.fromEntries(Object.entries(ex).filter(([k, v]) => { + // we slurp in everything except for the path attribute in + // a global extended header, because that's weird. Also, any + // null/undefined values are ignored. + return !(v === null || + v === undefined || + (k === 'path' && gex)); + }))); + } +} +exports.ReadEntry = ReadEntry; +//# sourceMappingURL=read-entry.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/replace.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/replace.js new file mode 100644 index 00000000000000..22eff246d4d75f --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/replace.js @@ -0,0 +1,231 @@ +"use strict"; +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.replace = void 0; +// tar -r +const fs_minipass_1 = require("@isaacs/fs-minipass"); +const node_fs_1 = __importDefault(require("node:fs")); +const node_path_1 = __importDefault(require("node:path")); +const header_js_1 = require("./header.js"); +const list_js_1 = require("./list.js"); +const make_command_js_1 = require("./make-command.js"); +const options_js_1 = require("./options.js"); +const pack_js_1 = require("./pack.js"); +// starting at the head of the file, read a Header +// If the checksum is invalid, that's our position to start writing +// If it is, jump forward by the specified size (round up to 512) +// and try again. +// Write the new Pack stream starting there. +const replaceSync = (opt, files) => { + const p = new pack_js_1.PackSync(opt); + let threw = true; + let fd; + let position; + try { + try { + fd = node_fs_1.default.openSync(opt.file, 'r+'); + } + catch (er) { + if (er?.code === 'ENOENT') { + fd = node_fs_1.default.openSync(opt.file, 'w+'); + } + else { + throw er; + } + } + const st = node_fs_1.default.fstatSync(fd); + const headBuf = Buffer.alloc(512); + POSITION: for (position = 0; position < st.size; position += 512) { + for (let bufPos = 0, bytes = 0; bufPos < 512; bufPos += bytes) { + bytes = node_fs_1.default.readSync(fd, headBuf, bufPos, headBuf.length - bufPos, position + bufPos); + if (position === 0 && + headBuf[0] === 0x1f && + headBuf[1] === 0x8b) { + throw new Error('cannot append to compressed archives'); + } + if (!bytes) { + break POSITION; + } + } + const h = new header_js_1.Header(headBuf); + if (!h.cksumValid) { + break; + } + const entryBlockSize = 512 * Math.ceil((h.size || 0) / 512); + if (position + entryBlockSize + 512 > st.size) { + break; + } + // the 512 for the header we just parsed will be added as well + // also jump ahead all the blocks for the body + position += entryBlockSize; + if (opt.mtimeCache && h.mtime) { + opt.mtimeCache.set(String(h.path), h.mtime); + } + } + threw = false; + streamSync(opt, p, position, fd, files); + } + finally { + if (threw) { + try { + node_fs_1.default.closeSync(fd); + } + catch (er) { } + } + } +}; +const streamSync = (opt, p, position, fd, files) => { + const stream = new fs_minipass_1.WriteStreamSync(opt.file, { + fd: fd, + start: position, + }); + p.pipe(stream); + addFilesSync(p, files); +}; +const replaceAsync = (opt, files) => { + files = Array.from(files); + const p = new pack_js_1.Pack(opt); + const getPos = (fd, size, cb_) => { + const cb = (er, pos) => { + if (er) { + node_fs_1.default.close(fd, _ => cb_(er)); + } + else { + cb_(null, pos); + } + }; + let position = 0; + if (size === 0) { + return cb(null, 0); + } + let bufPos = 0; + const headBuf = Buffer.alloc(512); + const onread = (er, bytes) => { + if (er || typeof bytes === 'undefined') { + return cb(er); + } + bufPos += bytes; + if (bufPos < 512 && bytes) { + return node_fs_1.default.read(fd, headBuf, bufPos, headBuf.length - bufPos, position + bufPos, onread); + } + if (position === 0 && + headBuf[0] === 0x1f && + headBuf[1] === 0x8b) { + return cb(new Error('cannot append to compressed archives')); + } + // truncated header + if (bufPos < 512) { + return cb(null, position); + } + const h = new header_js_1.Header(headBuf); + if (!h.cksumValid) { + return cb(null, position); + } + /* c8 ignore next */ + const entryBlockSize = 512 * Math.ceil((h.size ?? 0) / 512); + if (position + entryBlockSize + 512 > size) { + return cb(null, position); + } + position += entryBlockSize + 512; + if (position >= size) { + return cb(null, position); + } + if (opt.mtimeCache && h.mtime) { + opt.mtimeCache.set(String(h.path), h.mtime); + } + bufPos = 0; + node_fs_1.default.read(fd, headBuf, 0, 512, position, onread); + }; + node_fs_1.default.read(fd, headBuf, 0, 512, position, onread); + }; + const promise = new Promise((resolve, reject) => { + p.on('error', reject); + let flag = 'r+'; + const onopen = (er, fd) => { + if (er && er.code === 'ENOENT' && flag === 'r+') { + flag = 'w+'; + return node_fs_1.default.open(opt.file, flag, onopen); + } + if (er || !fd) { + return reject(er); + } + node_fs_1.default.fstat(fd, (er, st) => { + if (er) { + return node_fs_1.default.close(fd, () => reject(er)); + } + getPos(fd, st.size, (er, position) => { + if (er) { + return reject(er); + } + const stream = new fs_minipass_1.WriteStream(opt.file, { + fd: fd, + start: position, + }); + p.pipe(stream); + stream.on('error', reject); + stream.on('close', resolve); + addFilesAsync(p, files); + }); + }); + }; + node_fs_1.default.open(opt.file, flag, onopen); + }); + return promise; +}; +const addFilesSync = (p, files) => { + files.forEach(file => { + if (file.charAt(0) === '@') { + (0, list_js_1.list)({ + file: node_path_1.default.resolve(p.cwd, file.slice(1)), + sync: true, + noResume: true, + onReadEntry: entry => p.add(entry), + }); + } + else { + p.add(file); + } + }); + p.end(); +}; +const addFilesAsync = async (p, files) => { + for (let i = 0; i < files.length; i++) { + const file = String(files[i]); + if (file.charAt(0) === '@') { + await (0, list_js_1.list)({ + file: node_path_1.default.resolve(String(p.cwd), file.slice(1)), + noResume: true, + onReadEntry: entry => p.add(entry), + }); + } + else { + p.add(file); + } + } + p.end(); +}; +exports.replace = (0, make_command_js_1.makeCommand)(replaceSync, replaceAsync, +/* c8 ignore start */ +() => { + throw new TypeError('file is required'); +}, () => { + throw new TypeError('file is required'); +}, +/* c8 ignore stop */ +(opt, entries) => { + if (!(0, options_js_1.isFile)(opt)) { + throw new TypeError('file is required'); + } + if (opt.gzip || + opt.brotli || + opt.file.endsWith('.br') || + opt.file.endsWith('.tbr')) { + throw new TypeError('cannot append to compressed archives'); + } + if (!entries?.length) { + throw new TypeError('no paths specified to add/replace'); + } +}); +//# sourceMappingURL=replace.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/strip-absolute-path.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/strip-absolute-path.js new file mode 100644 index 00000000000000..bb7639c35a1104 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/strip-absolute-path.js @@ -0,0 +1,29 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.stripAbsolutePath = void 0; +// unix absolute paths are also absolute on win32, so we use this for both +const node_path_1 = require("node:path"); +const { isAbsolute, parse } = node_path_1.win32; +// returns [root, stripped] +// Note that windows will think that //x/y/z/a has a "root" of //x/y, and in +// those cases, we want to sanitize it to x/y/z/a, not z/a, so we strip / +// explicitly if it's the first character. +// drive-specific relative paths on Windows get their root stripped off even +// though they are not absolute, so `c:../foo` becomes ['c:', '../foo'] +const stripAbsolutePath = (path) => { + let r = ''; + let parsed = parse(path); + while (isAbsolute(path) || parsed.root) { + // windows will think that //x/y/z has a "root" of //x/y/ + // but strip the //?/C:/ off of //?/C:/path + const root = path.charAt(0) === '/' && path.slice(0, 4) !== '//?/' ? + '/' + : parsed.root; + path = path.slice(root.length); + r += root; + parsed = parse(path); + } + return [r, path]; +}; +exports.stripAbsolutePath = stripAbsolutePath; +//# sourceMappingURL=strip-absolute-path.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/strip-trailing-slashes.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/strip-trailing-slashes.js new file mode 100644 index 00000000000000..6fa74ad6a4ac93 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/strip-trailing-slashes.js @@ -0,0 +1,18 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.stripTrailingSlashes = void 0; +// warning: extremely hot code path. +// This has been meticulously optimized for use +// within npm install on large package trees. +// Do not edit without careful benchmarking. +const stripTrailingSlashes = (str) => { + let i = str.length - 1; + let slashesStart = -1; + while (i > -1 && str.charAt(i) === '/') { + slashesStart = i; + i--; + } + return slashesStart === -1 ? str : str.slice(0, slashesStart); +}; +exports.stripTrailingSlashes = stripTrailingSlashes; +//# sourceMappingURL=strip-trailing-slashes.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/symlink-error.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/symlink-error.js new file mode 100644 index 00000000000000..cc19ac1a2e3c6b --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/symlink-error.js @@ -0,0 +1,19 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.SymlinkError = void 0; +class SymlinkError extends Error { + path; + symlink; + syscall = 'symlink'; + code = 'TAR_SYMLINK_ERROR'; + constructor(symlink, path) { + super('TAR_SYMLINK_ERROR: Cannot extract through symbolic link'); + this.symlink = symlink; + this.path = path; + } + get name() { + return 'SymlinkError'; + } +} +exports.SymlinkError = SymlinkError; +//# sourceMappingURL=symlink-error.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/types.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/types.js new file mode 100644 index 00000000000000..cb9b684e843b72 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/types.js @@ -0,0 +1,50 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.code = exports.name = exports.isName = exports.isCode = void 0; +const isCode = (c) => exports.name.has(c); +exports.isCode = isCode; +const isName = (c) => exports.code.has(c); +exports.isName = isName; +// map types from key to human-friendly name +exports.name = new Map([ + ['0', 'File'], + // same as File + ['', 'OldFile'], + ['1', 'Link'], + ['2', 'SymbolicLink'], + // Devices and FIFOs aren't fully supported + // they are parsed, but skipped when unpacking + ['3', 'CharacterDevice'], + ['4', 'BlockDevice'], + ['5', 'Directory'], + ['6', 'FIFO'], + // same as File + ['7', 'ContiguousFile'], + // pax headers + ['g', 'GlobalExtendedHeader'], + ['x', 'ExtendedHeader'], + // vendor-specific stuff + // skip + ['A', 'SolarisACL'], + // like 5, but with data, which should be skipped + ['D', 'GNUDumpDir'], + // metadata only, skip + ['I', 'Inode'], + // data = link path of next file + ['K', 'NextFileHasLongLinkpath'], + // data = path of next file + ['L', 'NextFileHasLongPath'], + // skip + ['M', 'ContinuationFile'], + // like L + ['N', 'OldGnuLongPath'], + // skip + ['S', 'SparseFile'], + // skip + ['V', 'TapeVolumeHeader'], + // like x + ['X', 'OldExtendedHeader'], +]); +// map the other direction +exports.code = new Map(Array.from(exports.name).map(kv => [kv[1], kv[0]])); +//# sourceMappingURL=types.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/unpack.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/unpack.js new file mode 100644 index 00000000000000..edf8acbb18c408 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/unpack.js @@ -0,0 +1,919 @@ +"use strict"; +// the PEND/UNPEND stuff tracks whether we're ready to emit end/close yet. +// but the path reservations are required to avoid race conditions where +// parallelized unpack ops may mess with one another, due to dependencies +// (like a Link depending on its target) or destructive operations (like +// clobbering an fs object to create one of a different type.) +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + var desc = Object.getOwnPropertyDescriptor(m, k); + if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { + desc = { enumerable: true, get: function() { return m[k]; } }; + } + Object.defineProperty(o, k2, desc); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); + __setModuleDefault(result, mod); + return result; +}; +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.UnpackSync = exports.Unpack = void 0; +const fsm = __importStar(require("@isaacs/fs-minipass")); +const node_assert_1 = __importDefault(require("node:assert")); +const node_crypto_1 = require("node:crypto"); +const node_fs_1 = __importDefault(require("node:fs")); +const node_path_1 = __importDefault(require("node:path")); +const get_write_flag_js_1 = require("./get-write-flag.js"); +const mkdir_js_1 = require("./mkdir.js"); +const normalize_unicode_js_1 = require("./normalize-unicode.js"); +const normalize_windows_path_js_1 = require("./normalize-windows-path.js"); +const parse_js_1 = require("./parse.js"); +const strip_absolute_path_js_1 = require("./strip-absolute-path.js"); +const strip_trailing_slashes_js_1 = require("./strip-trailing-slashes.js"); +const wc = __importStar(require("./winchars.js")); +const path_reservations_js_1 = require("./path-reservations.js"); +const ONENTRY = Symbol('onEntry'); +const CHECKFS = Symbol('checkFs'); +const CHECKFS2 = Symbol('checkFs2'); +const PRUNECACHE = Symbol('pruneCache'); +const ISREUSABLE = Symbol('isReusable'); +const MAKEFS = Symbol('makeFs'); +const FILE = Symbol('file'); +const DIRECTORY = Symbol('directory'); +const LINK = Symbol('link'); +const SYMLINK = Symbol('symlink'); +const HARDLINK = Symbol('hardlink'); +const UNSUPPORTED = Symbol('unsupported'); +const CHECKPATH = Symbol('checkPath'); +const MKDIR = Symbol('mkdir'); +const ONERROR = Symbol('onError'); +const PENDING = Symbol('pending'); +const PEND = Symbol('pend'); +const UNPEND = Symbol('unpend'); +const ENDED = Symbol('ended'); +const MAYBECLOSE = Symbol('maybeClose'); +const SKIP = Symbol('skip'); +const DOCHOWN = Symbol('doChown'); +const UID = Symbol('uid'); +const GID = Symbol('gid'); +const CHECKED_CWD = Symbol('checkedCwd'); +const platform = process.env.TESTING_TAR_FAKE_PLATFORM || process.platform; +const isWindows = platform === 'win32'; +const DEFAULT_MAX_DEPTH = 1024; +// Unlinks on Windows are not atomic. +// +// This means that if you have a file entry, followed by another +// file entry with an identical name, and you cannot re-use the file +// (because it's a hardlink, or because unlink:true is set, or it's +// Windows, which does not have useful nlink values), then the unlink +// will be committed to the disk AFTER the new file has been written +// over the old one, deleting the new file. +// +// To work around this, on Windows systems, we rename the file and then +// delete the renamed file. It's a sloppy kludge, but frankly, I do not +// know of a better way to do this, given windows' non-atomic unlink +// semantics. +// +// See: https://github.com/npm/node-tar/issues/183 +/* c8 ignore start */ +const unlinkFile = (path, cb) => { + if (!isWindows) { + return node_fs_1.default.unlink(path, cb); + } + const name = path + '.DELETE.' + (0, node_crypto_1.randomBytes)(16).toString('hex'); + node_fs_1.default.rename(path, name, er => { + if (er) { + return cb(er); + } + node_fs_1.default.unlink(name, cb); + }); +}; +/* c8 ignore stop */ +/* c8 ignore start */ +const unlinkFileSync = (path) => { + if (!isWindows) { + return node_fs_1.default.unlinkSync(path); + } + const name = path + '.DELETE.' + (0, node_crypto_1.randomBytes)(16).toString('hex'); + node_fs_1.default.renameSync(path, name); + node_fs_1.default.unlinkSync(name); +}; +/* c8 ignore stop */ +// this.gid, entry.gid, this.processUid +const uint32 = (a, b, c) => a !== undefined && a === a >>> 0 ? a + : b !== undefined && b === b >>> 0 ? b + : c; +// clear the cache if it's a case-insensitive unicode-squashing match. +// we can't know if the current file system is case-sensitive or supports +// unicode fully, so we check for similarity on the maximally compatible +// representation. Err on the side of pruning, since all it's doing is +// preventing lstats, and it's not the end of the world if we get a false +// positive. +// Note that on windows, we always drop the entire cache whenever a +// symbolic link is encountered, because 8.3 filenames are impossible +// to reason about, and collisions are hazards rather than just failures. +const cacheKeyNormalize = (path) => (0, strip_trailing_slashes_js_1.stripTrailingSlashes)((0, normalize_windows_path_js_1.normalizeWindowsPath)((0, normalize_unicode_js_1.normalizeUnicode)(path))).toLowerCase(); +// remove all cache entries matching ${abs}/** +const pruneCache = (cache, abs) => { + abs = cacheKeyNormalize(abs); + for (const path of cache.keys()) { + const pnorm = cacheKeyNormalize(path); + if (pnorm === abs || pnorm.indexOf(abs + '/') === 0) { + cache.delete(path); + } + } +}; +const dropCache = (cache) => { + for (const key of cache.keys()) { + cache.delete(key); + } +}; +class Unpack extends parse_js_1.Parser { + [ENDED] = false; + [CHECKED_CWD] = false; + [PENDING] = 0; + reservations = new path_reservations_js_1.PathReservations(); + transform; + writable = true; + readable = false; + dirCache; + uid; + gid; + setOwner; + preserveOwner; + processGid; + processUid; + maxDepth; + forceChown; + win32; + newer; + keep; + noMtime; + preservePaths; + unlink; + cwd; + strip; + processUmask; + umask; + dmode; + fmode; + chmod; + constructor(opt = {}) { + opt.ondone = () => { + this[ENDED] = true; + this[MAYBECLOSE](); + }; + super(opt); + this.transform = opt.transform; + this.dirCache = opt.dirCache || new Map(); + this.chmod = !!opt.chmod; + if (typeof opt.uid === 'number' || typeof opt.gid === 'number') { + // need both or neither + if (typeof opt.uid !== 'number' || + typeof opt.gid !== 'number') { + throw new TypeError('cannot set owner without number uid and gid'); + } + if (opt.preserveOwner) { + throw new TypeError('cannot preserve owner in archive and also set owner explicitly'); + } + this.uid = opt.uid; + this.gid = opt.gid; + this.setOwner = true; + } + else { + this.uid = undefined; + this.gid = undefined; + this.setOwner = false; + } + // default true for root + if (opt.preserveOwner === undefined && + typeof opt.uid !== 'number') { + this.preserveOwner = !!(process.getuid && process.getuid() === 0); + } + else { + this.preserveOwner = !!opt.preserveOwner; + } + this.processUid = + (this.preserveOwner || this.setOwner) && process.getuid ? + process.getuid() + : undefined; + this.processGid = + (this.preserveOwner || this.setOwner) && process.getgid ? + process.getgid() + : undefined; + // prevent excessively deep nesting of subfolders + // set to `Infinity` to remove this restriction + this.maxDepth = + typeof opt.maxDepth === 'number' ? + opt.maxDepth + : DEFAULT_MAX_DEPTH; + // mostly just for testing, but useful in some cases. + // Forcibly trigger a chown on every entry, no matter what + this.forceChown = opt.forceChown === true; + // turn > this[ONENTRY](entry)); + } + // a bad or damaged archive is a warning for Parser, but an error + // when extracting. Mark those errors as unrecoverable, because + // the Unpack contract cannot be met. + warn(code, msg, data = {}) { + if (code === 'TAR_BAD_ARCHIVE' || code === 'TAR_ABORT') { + data.recoverable = false; + } + return super.warn(code, msg, data); + } + [MAYBECLOSE]() { + if (this[ENDED] && this[PENDING] === 0) { + this.emit('prefinish'); + this.emit('finish'); + this.emit('end'); + } + } + [CHECKPATH](entry) { + const p = (0, normalize_windows_path_js_1.normalizeWindowsPath)(entry.path); + const parts = p.split('/'); + if (this.strip) { + if (parts.length < this.strip) { + return false; + } + if (entry.type === 'Link') { + const linkparts = (0, normalize_windows_path_js_1.normalizeWindowsPath)(String(entry.linkpath)).split('/'); + if (linkparts.length >= this.strip) { + entry.linkpath = linkparts.slice(this.strip).join('/'); + } + else { + return false; + } + } + parts.splice(0, this.strip); + entry.path = parts.join('/'); + } + if (isFinite(this.maxDepth) && parts.length > this.maxDepth) { + this.warn('TAR_ENTRY_ERROR', 'path excessively deep', { + entry, + path: p, + depth: parts.length, + maxDepth: this.maxDepth, + }); + return false; + } + if (!this.preservePaths) { + if (parts.includes('..') || + /* c8 ignore next */ + (isWindows && /^[a-z]:\.\.$/i.test(parts[0] ?? ''))) { + this.warn('TAR_ENTRY_ERROR', `path contains '..'`, { + entry, + path: p, + }); + return false; + } + // strip off the root + const [root, stripped] = (0, strip_absolute_path_js_1.stripAbsolutePath)(p); + if (root) { + entry.path = String(stripped); + this.warn('TAR_ENTRY_INFO', `stripping ${root} from absolute path`, { + entry, + path: p, + }); + } + } + if (node_path_1.default.isAbsolute(entry.path)) { + entry.absolute = (0, normalize_windows_path_js_1.normalizeWindowsPath)(node_path_1.default.resolve(entry.path)); + } + else { + entry.absolute = (0, normalize_windows_path_js_1.normalizeWindowsPath)(node_path_1.default.resolve(this.cwd, entry.path)); + } + // if we somehow ended up with a path that escapes the cwd, and we are + // not in preservePaths mode, then something is fishy! This should have + // been prevented above, so ignore this for coverage. + /* c8 ignore start - defense in depth */ + if (!this.preservePaths && + typeof entry.absolute === 'string' && + entry.absolute.indexOf(this.cwd + '/') !== 0 && + entry.absolute !== this.cwd) { + this.warn('TAR_ENTRY_ERROR', 'path escaped extraction target', { + entry, + path: (0, normalize_windows_path_js_1.normalizeWindowsPath)(entry.path), + resolvedPath: entry.absolute, + cwd: this.cwd, + }); + return false; + } + /* c8 ignore stop */ + // an archive can set properties on the extraction directory, but it + // may not replace the cwd with a different kind of thing entirely. + if (entry.absolute === this.cwd && + entry.type !== 'Directory' && + entry.type !== 'GNUDumpDir') { + return false; + } + // only encode : chars that aren't drive letter indicators + if (this.win32) { + const { root: aRoot } = node_path_1.default.win32.parse(String(entry.absolute)); + entry.absolute = + aRoot + wc.encode(String(entry.absolute).slice(aRoot.length)); + const { root: pRoot } = node_path_1.default.win32.parse(entry.path); + entry.path = pRoot + wc.encode(entry.path.slice(pRoot.length)); + } + return true; + } + [ONENTRY](entry) { + if (!this[CHECKPATH](entry)) { + return entry.resume(); + } + node_assert_1.default.equal(typeof entry.absolute, 'string'); + switch (entry.type) { + case 'Directory': + case 'GNUDumpDir': + if (entry.mode) { + entry.mode = entry.mode | 0o700; + } + // eslint-disable-next-line no-fallthrough + case 'File': + case 'OldFile': + case 'ContiguousFile': + case 'Link': + case 'SymbolicLink': + return this[CHECKFS](entry); + case 'CharacterDevice': + case 'BlockDevice': + case 'FIFO': + default: + return this[UNSUPPORTED](entry); + } + } + [ONERROR](er, entry) { + // Cwd has to exist, or else nothing works. That's serious. + // Other errors are warnings, which raise the error in strict + // mode, but otherwise continue on. + if (er.name === 'CwdError') { + this.emit('error', er); + } + else { + this.warn('TAR_ENTRY_ERROR', er, { entry }); + this[UNPEND](); + entry.resume(); + } + } + [MKDIR](dir, mode, cb) { + (0, mkdir_js_1.mkdir)((0, normalize_windows_path_js_1.normalizeWindowsPath)(dir), { + uid: this.uid, + gid: this.gid, + processUid: this.processUid, + processGid: this.processGid, + umask: this.processUmask, + preserve: this.preservePaths, + unlink: this.unlink, + cache: this.dirCache, + cwd: this.cwd, + mode: mode, + }, cb); + } + [DOCHOWN](entry) { + // in preserve owner mode, chown if the entry doesn't match process + // in set owner mode, chown if setting doesn't match process + return (this.forceChown || + (this.preserveOwner && + ((typeof entry.uid === 'number' && + entry.uid !== this.processUid) || + (typeof entry.gid === 'number' && + entry.gid !== this.processGid))) || + (typeof this.uid === 'number' && + this.uid !== this.processUid) || + (typeof this.gid === 'number' && this.gid !== this.processGid)); + } + [UID](entry) { + return uint32(this.uid, entry.uid, this.processUid); + } + [GID](entry) { + return uint32(this.gid, entry.gid, this.processGid); + } + [FILE](entry, fullyDone) { + const mode = typeof entry.mode === 'number' ? + entry.mode & 0o7777 + : this.fmode; + const stream = new fsm.WriteStream(String(entry.absolute), { + // slight lie, but it can be numeric flags + flags: (0, get_write_flag_js_1.getWriteFlag)(entry.size), + mode: mode, + autoClose: false, + }); + stream.on('error', (er) => { + if (stream.fd) { + node_fs_1.default.close(stream.fd, () => { }); + } + // flush all the data out so that we aren't left hanging + // if the error wasn't actually fatal. otherwise the parse + // is blocked, and we never proceed. + stream.write = () => true; + this[ONERROR](er, entry); + fullyDone(); + }); + let actions = 1; + const done = (er) => { + if (er) { + /* c8 ignore start - we should always have a fd by now */ + if (stream.fd) { + node_fs_1.default.close(stream.fd, () => { }); + } + /* c8 ignore stop */ + this[ONERROR](er, entry); + fullyDone(); + return; + } + if (--actions === 0) { + if (stream.fd !== undefined) { + node_fs_1.default.close(stream.fd, er => { + if (er) { + this[ONERROR](er, entry); + } + else { + this[UNPEND](); + } + fullyDone(); + }); + } + } + }; + stream.on('finish', () => { + // if futimes fails, try utimes + // if utimes fails, fail with the original error + // same for fchown/chown + const abs = String(entry.absolute); + const fd = stream.fd; + if (typeof fd === 'number' && entry.mtime && !this.noMtime) { + actions++; + const atime = entry.atime || new Date(); + const mtime = entry.mtime; + node_fs_1.default.futimes(fd, atime, mtime, er => er ? + node_fs_1.default.utimes(abs, atime, mtime, er2 => done(er2 && er)) + : done()); + } + if (typeof fd === 'number' && this[DOCHOWN](entry)) { + actions++; + const uid = this[UID](entry); + const gid = this[GID](entry); + if (typeof uid === 'number' && typeof gid === 'number') { + node_fs_1.default.fchown(fd, uid, gid, er => er ? + node_fs_1.default.chown(abs, uid, gid, er2 => done(er2 && er)) + : done()); + } + } + done(); + }); + const tx = this.transform ? this.transform(entry) || entry : entry; + if (tx !== entry) { + tx.on('error', (er) => { + this[ONERROR](er, entry); + fullyDone(); + }); + entry.pipe(tx); + } + tx.pipe(stream); + } + [DIRECTORY](entry, fullyDone) { + const mode = typeof entry.mode === 'number' ? + entry.mode & 0o7777 + : this.dmode; + this[MKDIR](String(entry.absolute), mode, er => { + if (er) { + this[ONERROR](er, entry); + fullyDone(); + return; + } + let actions = 1; + const done = () => { + if (--actions === 0) { + fullyDone(); + this[UNPEND](); + entry.resume(); + } + }; + if (entry.mtime && !this.noMtime) { + actions++; + node_fs_1.default.utimes(String(entry.absolute), entry.atime || new Date(), entry.mtime, done); + } + if (this[DOCHOWN](entry)) { + actions++; + node_fs_1.default.chown(String(entry.absolute), Number(this[UID](entry)), Number(this[GID](entry)), done); + } + done(); + }); + } + [UNSUPPORTED](entry) { + entry.unsupported = true; + this.warn('TAR_ENTRY_UNSUPPORTED', `unsupported entry type: ${entry.type}`, { entry }); + entry.resume(); + } + [SYMLINK](entry, done) { + this[LINK](entry, String(entry.linkpath), 'symlink', done); + } + [HARDLINK](entry, done) { + const linkpath = (0, normalize_windows_path_js_1.normalizeWindowsPath)(node_path_1.default.resolve(this.cwd, String(entry.linkpath))); + this[LINK](entry, linkpath, 'link', done); + } + [PEND]() { + this[PENDING]++; + } + [UNPEND]() { + this[PENDING]--; + this[MAYBECLOSE](); + } + [SKIP](entry) { + this[UNPEND](); + entry.resume(); + } + // Check if we can reuse an existing filesystem entry safely and + // overwrite it, rather than unlinking and recreating + // Windows doesn't report a useful nlink, so we just never reuse entries + [ISREUSABLE](entry, st) { + return (entry.type === 'File' && + !this.unlink && + st.isFile() && + st.nlink <= 1 && + !isWindows); + } + // check if a thing is there, and if so, try to clobber it + [CHECKFS](entry) { + this[PEND](); + const paths = [entry.path]; + if (entry.linkpath) { + paths.push(entry.linkpath); + } + this.reservations.reserve(paths, done => this[CHECKFS2](entry, done)); + } + [PRUNECACHE](entry) { + // if we are not creating a directory, and the path is in the dirCache, + // then that means we are about to delete the directory we created + // previously, and it is no longer going to be a directory, and neither + // is any of its children. + // If a symbolic link is encountered, all bets are off. There is no + // reasonable way to sanitize the cache in such a way we will be able to + // avoid having filesystem collisions. If this happens with a non-symlink + // entry, it'll just fail to unpack, but a symlink to a directory, using an + // 8.3 shortname or certain unicode attacks, can evade detection and lead + // to arbitrary writes to anywhere on the system. + if (entry.type === 'SymbolicLink') { + dropCache(this.dirCache); + } + else if (entry.type !== 'Directory') { + pruneCache(this.dirCache, String(entry.absolute)); + } + } + [CHECKFS2](entry, fullyDone) { + this[PRUNECACHE](entry); + const done = (er) => { + this[PRUNECACHE](entry); + fullyDone(er); + }; + const checkCwd = () => { + this[MKDIR](this.cwd, this.dmode, er => { + if (er) { + this[ONERROR](er, entry); + done(); + return; + } + this[CHECKED_CWD] = true; + start(); + }); + }; + const start = () => { + if (entry.absolute !== this.cwd) { + const parent = (0, normalize_windows_path_js_1.normalizeWindowsPath)(node_path_1.default.dirname(String(entry.absolute))); + if (parent !== this.cwd) { + return this[MKDIR](parent, this.dmode, er => { + if (er) { + this[ONERROR](er, entry); + done(); + return; + } + afterMakeParent(); + }); + } + } + afterMakeParent(); + }; + const afterMakeParent = () => { + node_fs_1.default.lstat(String(entry.absolute), (lstatEr, st) => { + if (st && + (this.keep || + /* c8 ignore next */ + (this.newer && st.mtime > (entry.mtime ?? st.mtime)))) { + this[SKIP](entry); + done(); + return; + } + if (lstatEr || this[ISREUSABLE](entry, st)) { + return this[MAKEFS](null, entry, done); + } + if (st.isDirectory()) { + if (entry.type === 'Directory') { + const needChmod = this.chmod && + entry.mode && + (st.mode & 0o7777) !== entry.mode; + const afterChmod = (er) => this[MAKEFS](er ?? null, entry, done); + if (!needChmod) { + return afterChmod(); + } + return node_fs_1.default.chmod(String(entry.absolute), Number(entry.mode), afterChmod); + } + // Not a dir entry, have to remove it. + // NB: the only way to end up with an entry that is the cwd + // itself, in such a way that == does not detect, is a + // tricky windows absolute path with UNC or 8.3 parts (and + // preservePaths:true, or else it will have been stripped). + // In that case, the user has opted out of path protections + // explicitly, so if they blow away the cwd, c'est la vie. + if (entry.absolute !== this.cwd) { + return node_fs_1.default.rmdir(String(entry.absolute), (er) => this[MAKEFS](er ?? null, entry, done)); + } + } + // not a dir, and not reusable + // don't remove if the cwd, we want that error + if (entry.absolute === this.cwd) { + return this[MAKEFS](null, entry, done); + } + unlinkFile(String(entry.absolute), er => this[MAKEFS](er ?? null, entry, done)); + }); + }; + if (this[CHECKED_CWD]) { + start(); + } + else { + checkCwd(); + } + } + [MAKEFS](er, entry, done) { + if (er) { + this[ONERROR](er, entry); + done(); + return; + } + switch (entry.type) { + case 'File': + case 'OldFile': + case 'ContiguousFile': + return this[FILE](entry, done); + case 'Link': + return this[HARDLINK](entry, done); + case 'SymbolicLink': + return this[SYMLINK](entry, done); + case 'Directory': + case 'GNUDumpDir': + return this[DIRECTORY](entry, done); + } + } + [LINK](entry, linkpath, link, done) { + // XXX: get the type ('symlink' or 'junction') for windows + node_fs_1.default[link](linkpath, String(entry.absolute), er => { + if (er) { + this[ONERROR](er, entry); + } + else { + this[UNPEND](); + entry.resume(); + } + done(); + }); + } +} +exports.Unpack = Unpack; +const callSync = (fn) => { + try { + return [null, fn()]; + } + catch (er) { + return [er, null]; + } +}; +class UnpackSync extends Unpack { + sync = true; + [MAKEFS](er, entry) { + return super[MAKEFS](er, entry, () => { }); + } + [CHECKFS](entry) { + this[PRUNECACHE](entry); + if (!this[CHECKED_CWD]) { + const er = this[MKDIR](this.cwd, this.dmode); + if (er) { + return this[ONERROR](er, entry); + } + this[CHECKED_CWD] = true; + } + // don't bother to make the parent if the current entry is the cwd, + // we've already checked it. + if (entry.absolute !== this.cwd) { + const parent = (0, normalize_windows_path_js_1.normalizeWindowsPath)(node_path_1.default.dirname(String(entry.absolute))); + if (parent !== this.cwd) { + const mkParent = this[MKDIR](parent, this.dmode); + if (mkParent) { + return this[ONERROR](mkParent, entry); + } + } + } + const [lstatEr, st] = callSync(() => node_fs_1.default.lstatSync(String(entry.absolute))); + if (st && + (this.keep || + /* c8 ignore next */ + (this.newer && st.mtime > (entry.mtime ?? st.mtime)))) { + return this[SKIP](entry); + } + if (lstatEr || this[ISREUSABLE](entry, st)) { + return this[MAKEFS](null, entry); + } + if (st.isDirectory()) { + if (entry.type === 'Directory') { + const needChmod = this.chmod && + entry.mode && + (st.mode & 0o7777) !== entry.mode; + const [er] = needChmod ? + callSync(() => { + node_fs_1.default.chmodSync(String(entry.absolute), Number(entry.mode)); + }) + : []; + return this[MAKEFS](er, entry); + } + // not a dir entry, have to remove it + const [er] = callSync(() => node_fs_1.default.rmdirSync(String(entry.absolute))); + this[MAKEFS](er, entry); + } + // not a dir, and not reusable. + // don't remove if it's the cwd, since we want that error. + const [er] = entry.absolute === this.cwd ? + [] + : callSync(() => unlinkFileSync(String(entry.absolute))); + this[MAKEFS](er, entry); + } + [FILE](entry, done) { + const mode = typeof entry.mode === 'number' ? + entry.mode & 0o7777 + : this.fmode; + const oner = (er) => { + let closeError; + try { + node_fs_1.default.closeSync(fd); + } + catch (e) { + closeError = e; + } + if (er || closeError) { + this[ONERROR](er || closeError, entry); + } + done(); + }; + let fd; + try { + fd = node_fs_1.default.openSync(String(entry.absolute), (0, get_write_flag_js_1.getWriteFlag)(entry.size), mode); + } + catch (er) { + return oner(er); + } + const tx = this.transform ? this.transform(entry) || entry : entry; + if (tx !== entry) { + tx.on('error', (er) => this[ONERROR](er, entry)); + entry.pipe(tx); + } + tx.on('data', (chunk) => { + try { + node_fs_1.default.writeSync(fd, chunk, 0, chunk.length); + } + catch (er) { + oner(er); + } + }); + tx.on('end', () => { + let er = null; + // try both, falling futimes back to utimes + // if either fails, handle the first error + if (entry.mtime && !this.noMtime) { + const atime = entry.atime || new Date(); + const mtime = entry.mtime; + try { + node_fs_1.default.futimesSync(fd, atime, mtime); + } + catch (futimeser) { + try { + node_fs_1.default.utimesSync(String(entry.absolute), atime, mtime); + } + catch (utimeser) { + er = futimeser; + } + } + } + if (this[DOCHOWN](entry)) { + const uid = this[UID](entry); + const gid = this[GID](entry); + try { + node_fs_1.default.fchownSync(fd, Number(uid), Number(gid)); + } + catch (fchowner) { + try { + node_fs_1.default.chownSync(String(entry.absolute), Number(uid), Number(gid)); + } + catch (chowner) { + er = er || fchowner; + } + } + } + oner(er); + }); + } + [DIRECTORY](entry, done) { + const mode = typeof entry.mode === 'number' ? + entry.mode & 0o7777 + : this.dmode; + const er = this[MKDIR](String(entry.absolute), mode); + if (er) { + this[ONERROR](er, entry); + done(); + return; + } + if (entry.mtime && !this.noMtime) { + try { + node_fs_1.default.utimesSync(String(entry.absolute), entry.atime || new Date(), entry.mtime); + /* c8 ignore next */ + } + catch (er) { } + } + if (this[DOCHOWN](entry)) { + try { + node_fs_1.default.chownSync(String(entry.absolute), Number(this[UID](entry)), Number(this[GID](entry))); + } + catch (er) { } + } + done(); + entry.resume(); + } + [MKDIR](dir, mode) { + try { + return (0, mkdir_js_1.mkdirSync)((0, normalize_windows_path_js_1.normalizeWindowsPath)(dir), { + uid: this.uid, + gid: this.gid, + processUid: this.processUid, + processGid: this.processGid, + umask: this.processUmask, + preserve: this.preservePaths, + unlink: this.unlink, + cache: this.dirCache, + cwd: this.cwd, + mode: mode, + }); + } + catch (er) { + return er; + } + } + [LINK](entry, linkpath, link, done) { + const ls = `${link}Sync`; + try { + node_fs_1.default[ls](linkpath, String(entry.absolute)); + done(); + entry.resume(); + } + catch (er) { + return this[ONERROR](er, entry); + } + } +} +exports.UnpackSync = UnpackSync; +//# sourceMappingURL=unpack.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/update.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/update.js new file mode 100644 index 00000000000000..7687896f4bfeeb --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/update.js @@ -0,0 +1,33 @@ +"use strict"; +// tar -u +Object.defineProperty(exports, "__esModule", { value: true }); +exports.update = void 0; +const make_command_js_1 = require("./make-command.js"); +const replace_js_1 = require("./replace.js"); +// just call tar.r with the filter and mtimeCache +exports.update = (0, make_command_js_1.makeCommand)(replace_js_1.replace.syncFile, replace_js_1.replace.asyncFile, replace_js_1.replace.syncNoFile, replace_js_1.replace.asyncNoFile, (opt, entries = []) => { + replace_js_1.replace.validate?.(opt, entries); + mtimeFilter(opt); +}); +const mtimeFilter = (opt) => { + const filter = opt.filter; + if (!opt.mtimeCache) { + opt.mtimeCache = new Map(); + } + opt.filter = + filter ? + (path, stat) => filter(path, stat) && + !( + /* c8 ignore start */ + ((opt.mtimeCache?.get(path) ?? stat.mtime ?? 0) > + (stat.mtime ?? 0)) + /* c8 ignore stop */ + ) + : (path, stat) => !( + /* c8 ignore start */ + ((opt.mtimeCache?.get(path) ?? stat.mtime ?? 0) > + (stat.mtime ?? 0)) + /* c8 ignore stop */ + ); +}; +//# sourceMappingURL=update.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/warn-method.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/warn-method.js new file mode 100644 index 00000000000000..f25502776e36a3 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/warn-method.js @@ -0,0 +1,31 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.warnMethod = void 0; +const warnMethod = (self, code, message, data = {}) => { + if (self.file) { + data.file = self.file; + } + if (self.cwd) { + data.cwd = self.cwd; + } + data.code = + (message instanceof Error && + message.code) || + code; + data.tarCode = code; + if (!self.strict && data.recoverable !== false) { + if (message instanceof Error) { + data = Object.assign(message, data); + message = message.message; + } + self.emit('warn', code, message, data); + } + else if (message instanceof Error) { + self.emit('error', Object.assign(message, data)); + } + else { + self.emit('error', Object.assign(new Error(`${code}: ${message}`), data)); + } +}; +exports.warnMethod = warnMethod; +//# sourceMappingURL=warn-method.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/winchars.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/winchars.js new file mode 100644 index 00000000000000..c0a4405812929e --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/winchars.js @@ -0,0 +1,14 @@ +"use strict"; +// When writing files on Windows, translate the characters to their +// 0xf000 higher-encoded versions. +Object.defineProperty(exports, "__esModule", { value: true }); +exports.decode = exports.encode = void 0; +const raw = ['|', '<', '>', '?', ':']; +const win = raw.map(char => String.fromCharCode(0xf000 + char.charCodeAt(0))); +const toWin = new Map(raw.map((char, i) => [char, win[i]])); +const toRaw = new Map(win.map((char, i) => [char, raw[i]])); +const encode = (s) => raw.reduce((s, c) => s.split(c).join(toWin.get(c)), s); +exports.encode = encode; +const decode = (s) => win.reduce((s, c) => s.split(c).join(toRaw.get(c)), s); +exports.decode = decode; +//# sourceMappingURL=winchars.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/write-entry.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/write-entry.js new file mode 100644 index 00000000000000..45b7efeb795027 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/commonjs/write-entry.js @@ -0,0 +1,689 @@ +"use strict"; +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + var desc = Object.getOwnPropertyDescriptor(m, k); + if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { + desc = { enumerable: true, get: function() { return m[k]; } }; + } + Object.defineProperty(o, k2, desc); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); + __setModuleDefault(result, mod); + return result; +}; +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.WriteEntryTar = exports.WriteEntrySync = exports.WriteEntry = void 0; +const fs_1 = __importDefault(require("fs")); +const minipass_1 = require("minipass"); +const path_1 = __importDefault(require("path")); +const header_js_1 = require("./header.js"); +const mode_fix_js_1 = require("./mode-fix.js"); +const normalize_windows_path_js_1 = require("./normalize-windows-path.js"); +const options_js_1 = require("./options.js"); +const pax_js_1 = require("./pax.js"); +const strip_absolute_path_js_1 = require("./strip-absolute-path.js"); +const strip_trailing_slashes_js_1 = require("./strip-trailing-slashes.js"); +const warn_method_js_1 = require("./warn-method.js"); +const winchars = __importStar(require("./winchars.js")); +const prefixPath = (path, prefix) => { + if (!prefix) { + return (0, normalize_windows_path_js_1.normalizeWindowsPath)(path); + } + path = (0, normalize_windows_path_js_1.normalizeWindowsPath)(path).replace(/^\.(\/|$)/, ''); + return (0, strip_trailing_slashes_js_1.stripTrailingSlashes)(prefix) + '/' + path; +}; +const maxReadSize = 16 * 1024 * 1024; +const PROCESS = Symbol('process'); +const FILE = Symbol('file'); +const DIRECTORY = Symbol('directory'); +const SYMLINK = Symbol('symlink'); +const HARDLINK = Symbol('hardlink'); +const HEADER = Symbol('header'); +const READ = Symbol('read'); +const LSTAT = Symbol('lstat'); +const ONLSTAT = Symbol('onlstat'); +const ONREAD = Symbol('onread'); +const ONREADLINK = Symbol('onreadlink'); +const OPENFILE = Symbol('openfile'); +const ONOPENFILE = Symbol('onopenfile'); +const CLOSE = Symbol('close'); +const MODE = Symbol('mode'); +const AWAITDRAIN = Symbol('awaitDrain'); +const ONDRAIN = Symbol('ondrain'); +const PREFIX = Symbol('prefix'); +class WriteEntry extends minipass_1.Minipass { + path; + portable; + myuid = (process.getuid && process.getuid()) || 0; + // until node has builtin pwnam functions, this'll have to do + myuser = process.env.USER || ''; + maxReadSize; + linkCache; + statCache; + preservePaths; + cwd; + strict; + mtime; + noPax; + noMtime; + prefix; + fd; + blockLen = 0; + blockRemain = 0; + buf; + pos = 0; + remain = 0; + length = 0; + offset = 0; + win32; + absolute; + header; + type; + linkpath; + stat; + onWriteEntry; + #hadError = false; + constructor(p, opt_ = {}) { + const opt = (0, options_js_1.dealias)(opt_); + super(); + this.path = (0, normalize_windows_path_js_1.normalizeWindowsPath)(p); + // suppress atime, ctime, uid, gid, uname, gname + this.portable = !!opt.portable; + this.maxReadSize = opt.maxReadSize || maxReadSize; + this.linkCache = opt.linkCache || new Map(); + this.statCache = opt.statCache || new Map(); + this.preservePaths = !!opt.preservePaths; + this.cwd = (0, normalize_windows_path_js_1.normalizeWindowsPath)(opt.cwd || process.cwd()); + this.strict = !!opt.strict; + this.noPax = !!opt.noPax; + this.noMtime = !!opt.noMtime; + this.mtime = opt.mtime; + this.prefix = + opt.prefix ? (0, normalize_windows_path_js_1.normalizeWindowsPath)(opt.prefix) : undefined; + this.onWriteEntry = opt.onWriteEntry; + if (typeof opt.onwarn === 'function') { + this.on('warn', opt.onwarn); + } + let pathWarn = false; + if (!this.preservePaths) { + const [root, stripped] = (0, strip_absolute_path_js_1.stripAbsolutePath)(this.path); + if (root && typeof stripped === 'string') { + this.path = stripped; + pathWarn = root; + } + } + this.win32 = !!opt.win32 || process.platform === 'win32'; + if (this.win32) { + // force the \ to / normalization, since we might not *actually* + // be on windows, but want \ to be considered a path separator. + this.path = winchars.decode(this.path.replace(/\\/g, '/')); + p = p.replace(/\\/g, '/'); + } + this.absolute = (0, normalize_windows_path_js_1.normalizeWindowsPath)(opt.absolute || path_1.default.resolve(this.cwd, p)); + if (this.path === '') { + this.path = './'; + } + if (pathWarn) { + this.warn('TAR_ENTRY_INFO', `stripping ${pathWarn} from absolute path`, { + entry: this, + path: pathWarn + this.path, + }); + } + const cs = this.statCache.get(this.absolute); + if (cs) { + this[ONLSTAT](cs); + } + else { + this[LSTAT](); + } + } + warn(code, message, data = {}) { + return (0, warn_method_js_1.warnMethod)(this, code, message, data); + } + emit(ev, ...data) { + if (ev === 'error') { + this.#hadError = true; + } + return super.emit(ev, ...data); + } + [LSTAT]() { + fs_1.default.lstat(this.absolute, (er, stat) => { + if (er) { + return this.emit('error', er); + } + this[ONLSTAT](stat); + }); + } + [ONLSTAT](stat) { + this.statCache.set(this.absolute, stat); + this.stat = stat; + if (!stat.isFile()) { + stat.size = 0; + } + this.type = getType(stat); + this.emit('stat', stat); + this[PROCESS](); + } + [PROCESS]() { + switch (this.type) { + case 'File': + return this[FILE](); + case 'Directory': + return this[DIRECTORY](); + case 'SymbolicLink': + return this[SYMLINK](); + // unsupported types are ignored. + default: + return this.end(); + } + } + [MODE](mode) { + return (0, mode_fix_js_1.modeFix)(mode, this.type === 'Directory', this.portable); + } + [PREFIX](path) { + return prefixPath(path, this.prefix); + } + [HEADER]() { + /* c8 ignore start */ + if (!this.stat) { + throw new Error('cannot write header before stat'); + } + /* c8 ignore stop */ + if (this.type === 'Directory' && this.portable) { + this.noMtime = true; + } + this.onWriteEntry?.(this); + this.header = new header_js_1.Header({ + path: this[PREFIX](this.path), + // only apply the prefix to hard links. + linkpath: this.type === 'Link' && this.linkpath !== undefined ? + this[PREFIX](this.linkpath) + : this.linkpath, + // only the permissions and setuid/setgid/sticky bitflags + // not the higher-order bits that specify file type + mode: this[MODE](this.stat.mode), + uid: this.portable ? undefined : this.stat.uid, + gid: this.portable ? undefined : this.stat.gid, + size: this.stat.size, + mtime: this.noMtime ? undefined : this.mtime || this.stat.mtime, + /* c8 ignore next */ + type: this.type === 'Unsupported' ? undefined : this.type, + uname: this.portable ? undefined + : this.stat.uid === this.myuid ? this.myuser + : '', + atime: this.portable ? undefined : this.stat.atime, + ctime: this.portable ? undefined : this.stat.ctime, + }); + if (this.header.encode() && !this.noPax) { + super.write(new pax_js_1.Pax({ + atime: this.portable ? undefined : this.header.atime, + ctime: this.portable ? undefined : this.header.ctime, + gid: this.portable ? undefined : this.header.gid, + mtime: this.noMtime ? undefined : (this.mtime || this.header.mtime), + path: this[PREFIX](this.path), + linkpath: this.type === 'Link' && this.linkpath !== undefined ? + this[PREFIX](this.linkpath) + : this.linkpath, + size: this.header.size, + uid: this.portable ? undefined : this.header.uid, + uname: this.portable ? undefined : this.header.uname, + dev: this.portable ? undefined : this.stat.dev, + ino: this.portable ? undefined : this.stat.ino, + nlink: this.portable ? undefined : this.stat.nlink, + }).encode()); + } + const block = this.header?.block; + /* c8 ignore start */ + if (!block) { + throw new Error('failed to encode header'); + } + /* c8 ignore stop */ + super.write(block); + } + [DIRECTORY]() { + /* c8 ignore start */ + if (!this.stat) { + throw new Error('cannot create directory entry without stat'); + } + /* c8 ignore stop */ + if (this.path.slice(-1) !== '/') { + this.path += '/'; + } + this.stat.size = 0; + this[HEADER](); + this.end(); + } + [SYMLINK]() { + fs_1.default.readlink(this.absolute, (er, linkpath) => { + if (er) { + return this.emit('error', er); + } + this[ONREADLINK](linkpath); + }); + } + [ONREADLINK](linkpath) { + this.linkpath = (0, normalize_windows_path_js_1.normalizeWindowsPath)(linkpath); + this[HEADER](); + this.end(); + } + [HARDLINK](linkpath) { + /* c8 ignore start */ + if (!this.stat) { + throw new Error('cannot create link entry without stat'); + } + /* c8 ignore stop */ + this.type = 'Link'; + this.linkpath = (0, normalize_windows_path_js_1.normalizeWindowsPath)(path_1.default.relative(this.cwd, linkpath)); + this.stat.size = 0; + this[HEADER](); + this.end(); + } + [FILE]() { + /* c8 ignore start */ + if (!this.stat) { + throw new Error('cannot create file entry without stat'); + } + /* c8 ignore stop */ + if (this.stat.nlink > 1) { + const linkKey = `${this.stat.dev}:${this.stat.ino}`; + const linkpath = this.linkCache.get(linkKey); + if (linkpath?.indexOf(this.cwd) === 0) { + return this[HARDLINK](linkpath); + } + this.linkCache.set(linkKey, this.absolute); + } + this[HEADER](); + if (this.stat.size === 0) { + return this.end(); + } + this[OPENFILE](); + } + [OPENFILE]() { + fs_1.default.open(this.absolute, 'r', (er, fd) => { + if (er) { + return this.emit('error', er); + } + this[ONOPENFILE](fd); + }); + } + [ONOPENFILE](fd) { + this.fd = fd; + if (this.#hadError) { + return this[CLOSE](); + } + /* c8 ignore start */ + if (!this.stat) { + throw new Error('should stat before calling onopenfile'); + } + /* c8 ignore start */ + this.blockLen = 512 * Math.ceil(this.stat.size / 512); + this.blockRemain = this.blockLen; + const bufLen = Math.min(this.blockLen, this.maxReadSize); + this.buf = Buffer.allocUnsafe(bufLen); + this.offset = 0; + this.pos = 0; + this.remain = this.stat.size; + this.length = this.buf.length; + this[READ](); + } + [READ]() { + const { fd, buf, offset, length, pos } = this; + if (fd === undefined || buf === undefined) { + throw new Error('cannot read file without first opening'); + } + fs_1.default.read(fd, buf, offset, length, pos, (er, bytesRead) => { + if (er) { + // ignoring the error from close(2) is a bad practice, but at + // this point we already have an error, don't need another one + return this[CLOSE](() => this.emit('error', er)); + } + this[ONREAD](bytesRead); + }); + } + /* c8 ignore start */ + [CLOSE](cb = () => { }) { + /* c8 ignore stop */ + if (this.fd !== undefined) + fs_1.default.close(this.fd, cb); + } + [ONREAD](bytesRead) { + if (bytesRead <= 0 && this.remain > 0) { + const er = Object.assign(new Error('encountered unexpected EOF'), { + path: this.absolute, + syscall: 'read', + code: 'EOF', + }); + return this[CLOSE](() => this.emit('error', er)); + } + if (bytesRead > this.remain) { + const er = Object.assign(new Error('did not encounter expected EOF'), { + path: this.absolute, + syscall: 'read', + code: 'EOF', + }); + return this[CLOSE](() => this.emit('error', er)); + } + /* c8 ignore start */ + if (!this.buf) { + throw new Error('should have created buffer prior to reading'); + } + /* c8 ignore stop */ + // null out the rest of the buffer, if we could fit the block padding + // at the end of this loop, we've incremented bytesRead and this.remain + // to be incremented up to the blockRemain level, as if we had expected + // to get a null-padded file, and read it until the end. then we will + // decrement both remain and blockRemain by bytesRead, and know that we + // reached the expected EOF, without any null buffer to append. + if (bytesRead === this.remain) { + for (let i = bytesRead; i < this.length && bytesRead < this.blockRemain; i++) { + this.buf[i + this.offset] = 0; + bytesRead++; + this.remain++; + } + } + const chunk = this.offset === 0 && bytesRead === this.buf.length ? + this.buf + : this.buf.subarray(this.offset, this.offset + bytesRead); + const flushed = this.write(chunk); + if (!flushed) { + this[AWAITDRAIN](() => this[ONDRAIN]()); + } + else { + this[ONDRAIN](); + } + } + [AWAITDRAIN](cb) { + this.once('drain', cb); + } + write(chunk, encoding, cb) { + /* c8 ignore start - just junk to comply with NodeJS.WritableStream */ + if (typeof encoding === 'function') { + cb = encoding; + encoding = undefined; + } + if (typeof chunk === 'string') { + chunk = Buffer.from(chunk, typeof encoding === 'string' ? encoding : 'utf8'); + } + /* c8 ignore stop */ + if (this.blockRemain < chunk.length) { + const er = Object.assign(new Error('writing more data than expected'), { + path: this.absolute, + }); + return this.emit('error', er); + } + this.remain -= chunk.length; + this.blockRemain -= chunk.length; + this.pos += chunk.length; + this.offset += chunk.length; + return super.write(chunk, null, cb); + } + [ONDRAIN]() { + if (!this.remain) { + if (this.blockRemain) { + super.write(Buffer.alloc(this.blockRemain)); + } + return this[CLOSE](er => er ? this.emit('error', er) : this.end()); + } + /* c8 ignore start */ + if (!this.buf) { + throw new Error('buffer lost somehow in ONDRAIN'); + } + /* c8 ignore stop */ + if (this.offset >= this.length) { + // if we only have a smaller bit left to read, alloc a smaller buffer + // otherwise, keep it the same length it was before. + this.buf = Buffer.allocUnsafe(Math.min(this.blockRemain, this.buf.length)); + this.offset = 0; + } + this.length = this.buf.length - this.offset; + this[READ](); + } +} +exports.WriteEntry = WriteEntry; +class WriteEntrySync extends WriteEntry { + sync = true; + [LSTAT]() { + this[ONLSTAT](fs_1.default.lstatSync(this.absolute)); + } + [SYMLINK]() { + this[ONREADLINK](fs_1.default.readlinkSync(this.absolute)); + } + [OPENFILE]() { + this[ONOPENFILE](fs_1.default.openSync(this.absolute, 'r')); + } + [READ]() { + let threw = true; + try { + const { fd, buf, offset, length, pos } = this; + /* c8 ignore start */ + if (fd === undefined || buf === undefined) { + throw new Error('fd and buf must be set in READ method'); + } + /* c8 ignore stop */ + const bytesRead = fs_1.default.readSync(fd, buf, offset, length, pos); + this[ONREAD](bytesRead); + threw = false; + } + finally { + // ignoring the error from close(2) is a bad practice, but at + // this point we already have an error, don't need another one + if (threw) { + try { + this[CLOSE](() => { }); + } + catch (er) { } + } + } + } + [AWAITDRAIN](cb) { + cb(); + } + /* c8 ignore start */ + [CLOSE](cb = () => { }) { + /* c8 ignore stop */ + if (this.fd !== undefined) + fs_1.default.closeSync(this.fd); + cb(); + } +} +exports.WriteEntrySync = WriteEntrySync; +class WriteEntryTar extends minipass_1.Minipass { + blockLen = 0; + blockRemain = 0; + buf = 0; + pos = 0; + remain = 0; + length = 0; + preservePaths; + portable; + strict; + noPax; + noMtime; + readEntry; + type; + prefix; + path; + mode; + uid; + gid; + uname; + gname; + header; + mtime; + atime; + ctime; + linkpath; + size; + onWriteEntry; + warn(code, message, data = {}) { + return (0, warn_method_js_1.warnMethod)(this, code, message, data); + } + constructor(readEntry, opt_ = {}) { + const opt = (0, options_js_1.dealias)(opt_); + super(); + this.preservePaths = !!opt.preservePaths; + this.portable = !!opt.portable; + this.strict = !!opt.strict; + this.noPax = !!opt.noPax; + this.noMtime = !!opt.noMtime; + this.onWriteEntry = opt.onWriteEntry; + this.readEntry = readEntry; + const { type } = readEntry; + /* c8 ignore start */ + if (type === 'Unsupported') { + throw new Error('writing entry that should be ignored'); + } + /* c8 ignore stop */ + this.type = type; + if (this.type === 'Directory' && this.portable) { + this.noMtime = true; + } + this.prefix = opt.prefix; + this.path = (0, normalize_windows_path_js_1.normalizeWindowsPath)(readEntry.path); + this.mode = + readEntry.mode !== undefined ? + this[MODE](readEntry.mode) + : undefined; + this.uid = this.portable ? undefined : readEntry.uid; + this.gid = this.portable ? undefined : readEntry.gid; + this.uname = this.portable ? undefined : readEntry.uname; + this.gname = this.portable ? undefined : readEntry.gname; + this.size = readEntry.size; + this.mtime = + this.noMtime ? undefined : opt.mtime || readEntry.mtime; + this.atime = this.portable ? undefined : readEntry.atime; + this.ctime = this.portable ? undefined : readEntry.ctime; + this.linkpath = + readEntry.linkpath !== undefined ? + (0, normalize_windows_path_js_1.normalizeWindowsPath)(readEntry.linkpath) + : undefined; + if (typeof opt.onwarn === 'function') { + this.on('warn', opt.onwarn); + } + let pathWarn = false; + if (!this.preservePaths) { + const [root, stripped] = (0, strip_absolute_path_js_1.stripAbsolutePath)(this.path); + if (root && typeof stripped === 'string') { + this.path = stripped; + pathWarn = root; + } + } + this.remain = readEntry.size; + this.blockRemain = readEntry.startBlockSize; + this.onWriteEntry?.(this); + this.header = new header_js_1.Header({ + path: this[PREFIX](this.path), + linkpath: this.type === 'Link' && this.linkpath !== undefined ? + this[PREFIX](this.linkpath) + : this.linkpath, + // only the permissions and setuid/setgid/sticky bitflags + // not the higher-order bits that specify file type + mode: this.mode, + uid: this.portable ? undefined : this.uid, + gid: this.portable ? undefined : this.gid, + size: this.size, + mtime: this.noMtime ? undefined : this.mtime, + type: this.type, + uname: this.portable ? undefined : this.uname, + atime: this.portable ? undefined : this.atime, + ctime: this.portable ? undefined : this.ctime, + }); + if (pathWarn) { + this.warn('TAR_ENTRY_INFO', `stripping ${pathWarn} from absolute path`, { + entry: this, + path: pathWarn + this.path, + }); + } + if (this.header.encode() && !this.noPax) { + super.write(new pax_js_1.Pax({ + atime: this.portable ? undefined : this.atime, + ctime: this.portable ? undefined : this.ctime, + gid: this.portable ? undefined : this.gid, + mtime: this.noMtime ? undefined : this.mtime, + path: this[PREFIX](this.path), + linkpath: this.type === 'Link' && this.linkpath !== undefined ? + this[PREFIX](this.linkpath) + : this.linkpath, + size: this.size, + uid: this.portable ? undefined : this.uid, + uname: this.portable ? undefined : this.uname, + dev: this.portable ? undefined : this.readEntry.dev, + ino: this.portable ? undefined : this.readEntry.ino, + nlink: this.portable ? undefined : this.readEntry.nlink, + }).encode()); + } + const b = this.header?.block; + /* c8 ignore start */ + if (!b) + throw new Error('failed to encode header'); + /* c8 ignore stop */ + super.write(b); + readEntry.pipe(this); + } + [PREFIX](path) { + return prefixPath(path, this.prefix); + } + [MODE](mode) { + return (0, mode_fix_js_1.modeFix)(mode, this.type === 'Directory', this.portable); + } + write(chunk, encoding, cb) { + /* c8 ignore start - just junk to comply with NodeJS.WritableStream */ + if (typeof encoding === 'function') { + cb = encoding; + encoding = undefined; + } + if (typeof chunk === 'string') { + chunk = Buffer.from(chunk, typeof encoding === 'string' ? encoding : 'utf8'); + } + /* c8 ignore stop */ + const writeLen = chunk.length; + if (writeLen > this.blockRemain) { + throw new Error('writing more to entry than is appropriate'); + } + this.blockRemain -= writeLen; + return super.write(chunk, cb); + } + end(chunk, encoding, cb) { + if (this.blockRemain) { + super.write(Buffer.alloc(this.blockRemain)); + } + /* c8 ignore start - just junk to comply with NodeJS.WritableStream */ + if (typeof chunk === 'function') { + cb = chunk; + encoding = undefined; + chunk = undefined; + } + if (typeof encoding === 'function') { + cb = encoding; + encoding = undefined; + } + if (typeof chunk === 'string') { + chunk = Buffer.from(chunk, encoding ?? 'utf8'); + } + if (cb) + this.once('finish', cb); + chunk ? super.end(chunk, cb) : super.end(cb); + /* c8 ignore stop */ + return this; + } +} +exports.WriteEntryTar = WriteEntryTar; +const getType = (stat) => stat.isFile() ? 'File' + : stat.isDirectory() ? 'Directory' + : stat.isSymbolicLink() ? 'SymbolicLink' + : 'Unsupported'; +//# sourceMappingURL=write-entry.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/create.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/create.js new file mode 100644 index 00000000000000..512a9911d70d5b --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/create.js @@ -0,0 +1,77 @@ +import { WriteStream, WriteStreamSync } from '@isaacs/fs-minipass'; +import path from 'node:path'; +import { list } from './list.js'; +import { makeCommand } from './make-command.js'; +import { Pack, PackSync } from './pack.js'; +const createFileSync = (opt, files) => { + const p = new PackSync(opt); + const stream = new WriteStreamSync(opt.file, { + mode: opt.mode || 0o666, + }); + p.pipe(stream); + addFilesSync(p, files); +}; +const createFile = (opt, files) => { + const p = new Pack(opt); + const stream = new WriteStream(opt.file, { + mode: opt.mode || 0o666, + }); + p.pipe(stream); + const promise = new Promise((res, rej) => { + stream.on('error', rej); + stream.on('close', res); + p.on('error', rej); + }); + addFilesAsync(p, files); + return promise; +}; +const addFilesSync = (p, files) => { + files.forEach(file => { + if (file.charAt(0) === '@') { + list({ + file: path.resolve(p.cwd, file.slice(1)), + sync: true, + noResume: true, + onReadEntry: entry => p.add(entry), + }); + } + else { + p.add(file); + } + }); + p.end(); +}; +const addFilesAsync = async (p, files) => { + for (let i = 0; i < files.length; i++) { + const file = String(files[i]); + if (file.charAt(0) === '@') { + await list({ + file: path.resolve(String(p.cwd), file.slice(1)), + noResume: true, + onReadEntry: entry => { + p.add(entry); + }, + }); + } + else { + p.add(file); + } + } + p.end(); +}; +const createSync = (opt, files) => { + const p = new PackSync(opt); + addFilesSync(p, files); + return p; +}; +const createAsync = (opt, files) => { + const p = new Pack(opt); + addFilesAsync(p, files); + return p; +}; +export const create = makeCommand(createFileSync, createFile, createSync, createAsync, (_opt, files) => { + if (!files?.length) { + throw new TypeError('no paths specified to add to archive'); + } +}); +//# sourceMappingURL=create.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/cwd-error.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/cwd-error.js new file mode 100644 index 00000000000000..289a066b8e0317 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/cwd-error.js @@ -0,0 +1,14 @@ +export class CwdError extends Error { + path; + code; + syscall = 'chdir'; + constructor(path, code) { + super(`${code}: Cannot cd into '${path}'`); + this.path = path; + this.code = code; + } + get name() { + return 'CwdError'; + } +} +//# sourceMappingURL=cwd-error.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/extract.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/extract.js new file mode 100644 index 00000000000000..2274feef26e78f --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/extract.js @@ -0,0 +1,49 @@ +// tar -x +import * as fsm from '@isaacs/fs-minipass'; +import fs from 'node:fs'; +import { filesFilter } from './list.js'; +import { makeCommand } from './make-command.js'; +import { Unpack, UnpackSync } from './unpack.js'; +const extractFileSync = (opt) => { + const u = new UnpackSync(opt); + const file = opt.file; + const stat = fs.statSync(file); + // This trades a zero-byte read() syscall for a stat + // However, it will usually result in less memory allocation + const readSize = opt.maxReadSize || 16 * 1024 * 1024; + const stream = new fsm.ReadStreamSync(file, { + readSize: readSize, + size: stat.size, + }); + stream.pipe(u); +}; +const extractFile = (opt, _) => { + const u = new Unpack(opt); + const readSize = opt.maxReadSize || 16 * 1024 * 1024; + const file = opt.file; + const p = new Promise((resolve, reject) => { + u.on('error', reject); + u.on('close', resolve); + // This trades a zero-byte read() syscall for a stat + // However, it will usually result in less memory allocation + fs.stat(file, (er, stat) => { + if (er) { + reject(er); + } + else { + const stream = new fsm.ReadStream(file, { + readSize: readSize, + size: stat.size, + }); + stream.on('error', reject); + stream.pipe(u); + } + }); + }); + return p; +}; +export const extract = makeCommand(extractFileSync, extractFile, opt => new UnpackSync(opt), opt => new Unpack(opt), (opt, files) => { + if (files?.length) + filesFilter(opt, files); +}); +//# sourceMappingURL=extract.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/get-write-flag.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/get-write-flag.js new file mode 100644 index 00000000000000..2c7f3e8b28fdaf --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/get-write-flag.js @@ -0,0 +1,23 @@ +// Get the appropriate flag to use for creating files +// We use fmap on Windows platforms for files less than +// 512kb. This is a fairly low limit, but avoids making +// things slower in some cases. Since most of what this +// library is used for is extracting tarballs of many +// relatively small files in npm packages and the like, +// it can be a big boost on Windows platforms. +import fs from 'fs'; +const platform = process.env.__FAKE_PLATFORM__ || process.platform; +const isWindows = platform === 'win32'; +/* c8 ignore start */ +const { O_CREAT, O_TRUNC, O_WRONLY } = fs.constants; +const UV_FS_O_FILEMAP = Number(process.env.__FAKE_FS_O_FILENAME__) || + fs.constants.UV_FS_O_FILEMAP || + 0; +/* c8 ignore stop */ +const fMapEnabled = isWindows && !!UV_FS_O_FILEMAP; +const fMapLimit = 512 * 1024; +const fMapFlag = UV_FS_O_FILEMAP | O_TRUNC | O_CREAT | O_WRONLY; +export const getWriteFlag = !fMapEnabled ? + () => 'w' + : (size) => (size < fMapLimit ? fMapFlag : 'w'); +//# sourceMappingURL=get-write-flag.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/header.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/header.js new file mode 100644 index 00000000000000..e15192b14b16e1 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/header.js @@ -0,0 +1,279 @@ +// parse a 512-byte header block to a data object, or vice-versa +// encode returns `true` if a pax extended header is needed, because +// the data could not be faithfully encoded in a simple header. +// (Also, check header.needPax to see if it needs a pax header.) +import { posix as pathModule } from 'node:path'; +import * as large from './large-numbers.js'; +import * as types from './types.js'; +export class Header { + cksumValid = false; + needPax = false; + nullBlock = false; + block; + path; + mode; + uid; + gid; + size; + cksum; + #type = 'Unsupported'; + linkpath; + uname; + gname; + devmaj = 0; + devmin = 0; + atime; + ctime; + mtime; + charset; + comment; + constructor(data, off = 0, ex, gex) { + if (Buffer.isBuffer(data)) { + this.decode(data, off || 0, ex, gex); + } + else if (data) { + this.#slurp(data); + } + } + decode(buf, off, ex, gex) { + if (!off) { + off = 0; + } + if (!buf || !(buf.length >= off + 512)) { + throw new Error('need 512 bytes for header'); + } + this.path = decString(buf, off, 100); + this.mode = decNumber(buf, off + 100, 8); + this.uid = decNumber(buf, off + 108, 8); + this.gid = decNumber(buf, off + 116, 8); + this.size = decNumber(buf, off + 124, 12); + this.mtime = decDate(buf, off + 136, 12); + this.cksum = decNumber(buf, off + 148, 12); + // if we have extended or global extended headers, apply them now + // See https://github.com/npm/node-tar/pull/187 + // Apply global before local, so it overrides + if (gex) + this.#slurp(gex, true); + if (ex) + this.#slurp(ex); + // old tar versions marked dirs as a file with a trailing / + const t = decString(buf, off + 156, 1); + if (types.isCode(t)) { + this.#type = t || '0'; + } + if (this.#type === '0' && this.path.slice(-1) === '/') { + this.#type = '5'; + } + // tar implementations sometimes incorrectly put the stat(dir).size + // as the size in the tarball, even though Directory entries are + // not able to have any body at all. In the very rare chance that + // it actually DOES have a body, we weren't going to do anything with + // it anyway, and it'll just be a warning about an invalid header. + if (this.#type === '5') { + this.size = 0; + } + this.linkpath = decString(buf, off + 157, 100); + if (buf.subarray(off + 257, off + 265).toString() === + 'ustar\u000000') { + this.uname = decString(buf, off + 265, 32); + this.gname = decString(buf, off + 297, 32); + /* c8 ignore start */ + this.devmaj = decNumber(buf, off + 329, 8) ?? 0; + this.devmin = decNumber(buf, off + 337, 8) ?? 0; + /* c8 ignore stop */ + if (buf[off + 475] !== 0) { + // definitely a prefix, definitely >130 chars. + const prefix = decString(buf, off + 345, 155); + this.path = prefix + '/' + this.path; + } + else { + const prefix = decString(buf, off + 345, 130); + if (prefix) { + this.path = prefix + '/' + this.path; + } + this.atime = decDate(buf, off + 476, 12); + this.ctime = decDate(buf, off + 488, 12); + } + } + let sum = 8 * 0x20; + for (let i = off; i < off + 148; i++) { + sum += buf[i]; + } + for (let i = off + 156; i < off + 512; i++) { + sum += buf[i]; + } + this.cksumValid = sum === this.cksum; + if (this.cksum === undefined && sum === 8 * 0x20) { + this.nullBlock = true; + } + } + #slurp(ex, gex = false) { + Object.assign(this, Object.fromEntries(Object.entries(ex).filter(([k, v]) => { + // we slurp in everything except for the path attribute in + // a global extended header, because that's weird. Also, any + // null/undefined values are ignored. + return !(v === null || + v === undefined || + (k === 'path' && gex) || + (k === 'linkpath' && gex) || + k === 'global'); + }))); + } + encode(buf, off = 0) { + if (!buf) { + buf = this.block = Buffer.alloc(512); + } + if (this.#type === 'Unsupported') { + this.#type = '0'; + } + if (!(buf.length >= off + 512)) { + throw new Error('need 512 bytes for header'); + } + const prefixSize = this.ctime || this.atime ? 130 : 155; + const split = splitPrefix(this.path || '', prefixSize); + const path = split[0]; + const prefix = split[1]; + this.needPax = !!split[2]; + this.needPax = encString(buf, off, 100, path) || this.needPax; + this.needPax = + encNumber(buf, off + 100, 8, this.mode) || this.needPax; + this.needPax = + encNumber(buf, off + 108, 8, this.uid) || this.needPax; + this.needPax = + encNumber(buf, off + 116, 8, this.gid) || this.needPax; + this.needPax = + encNumber(buf, off + 124, 12, this.size) || this.needPax; + this.needPax = + encDate(buf, off + 136, 12, this.mtime) || this.needPax; + buf[off + 156] = this.#type.charCodeAt(0); + this.needPax = + encString(buf, off + 157, 100, this.linkpath) || this.needPax; + buf.write('ustar\u000000', off + 257, 8); + this.needPax = + encString(buf, off + 265, 32, this.uname) || this.needPax; + this.needPax = + encString(buf, off + 297, 32, this.gname) || this.needPax; + this.needPax = + encNumber(buf, off + 329, 8, this.devmaj) || this.needPax; + this.needPax = + encNumber(buf, off + 337, 8, this.devmin) || this.needPax; + this.needPax = + encString(buf, off + 345, prefixSize, prefix) || this.needPax; + if (buf[off + 475] !== 0) { + this.needPax = + encString(buf, off + 345, 155, prefix) || this.needPax; + } + else { + this.needPax = + encString(buf, off + 345, 130, prefix) || this.needPax; + this.needPax = + encDate(buf, off + 476, 12, this.atime) || this.needPax; + this.needPax = + encDate(buf, off + 488, 12, this.ctime) || this.needPax; + } + let sum = 8 * 0x20; + for (let i = off; i < off + 148; i++) { + sum += buf[i]; + } + for (let i = off + 156; i < off + 512; i++) { + sum += buf[i]; + } + this.cksum = sum; + encNumber(buf, off + 148, 8, this.cksum); + this.cksumValid = true; + return this.needPax; + } + get type() { + return (this.#type === 'Unsupported' ? + this.#type + : types.name.get(this.#type)); + } + get typeKey() { + return this.#type; + } + set type(type) { + const c = String(types.code.get(type)); + if (types.isCode(c) || c === 'Unsupported') { + this.#type = c; + } + else if (types.isCode(type)) { + this.#type = type; + } + else { + throw new TypeError('invalid entry type: ' + type); + } + } +} +const splitPrefix = (p, prefixSize) => { + const pathSize = 100; + let pp = p; + let prefix = ''; + let ret = undefined; + const root = pathModule.parse(p).root || '.'; + if (Buffer.byteLength(pp) < pathSize) { + ret = [pp, prefix, false]; + } + else { + // first set prefix to the dir, and path to the base + prefix = pathModule.dirname(pp); + pp = pathModule.basename(pp); + do { + if (Buffer.byteLength(pp) <= pathSize && + Buffer.byteLength(prefix) <= prefixSize) { + // both fit! + ret = [pp, prefix, false]; + } + else if (Buffer.byteLength(pp) > pathSize && + Buffer.byteLength(prefix) <= prefixSize) { + // prefix fits in prefix, but path doesn't fit in path + ret = [pp.slice(0, pathSize - 1), prefix, true]; + } + else { + // make path take a bit from prefix + pp = pathModule.join(pathModule.basename(prefix), pp); + prefix = pathModule.dirname(prefix); + } + } while (prefix !== root && ret === undefined); + // at this point, found no resolution, just truncate + if (!ret) { + ret = [p.slice(0, pathSize - 1), '', true]; + } + } + return ret; +}; +const decString = (buf, off, size) => buf + .subarray(off, off + size) + .toString('utf8') + .replace(/\0.*/, ''); +const decDate = (buf, off, size) => numToDate(decNumber(buf, off, size)); +const numToDate = (num) => num === undefined ? undefined : new Date(num * 1000); +const decNumber = (buf, off, size) => Number(buf[off]) & 0x80 ? + large.parse(buf.subarray(off, off + size)) + : decSmallNumber(buf, off, size); +const nanUndef = (value) => (isNaN(value) ? undefined : value); +const decSmallNumber = (buf, off, size) => nanUndef(parseInt(buf + .subarray(off, off + size) + .toString('utf8') + .replace(/\0.*$/, '') + .trim(), 8)); +// the maximum encodable as a null-terminated octal, by field size +const MAXNUM = { + 12: 0o77777777777, + 8: 0o7777777, +}; +const encNumber = (buf, off, size, num) => num === undefined ? false + : num > MAXNUM[size] || num < 0 ? + (large.encode(num, buf.subarray(off, off + size)), true) + : (encSmallNumber(buf, off, size, num), false); +const encSmallNumber = (buf, off, size, num) => buf.write(octalString(num, size), off, size, 'ascii'); +const octalString = (num, size) => padOctal(Math.floor(num).toString(8), size); +const padOctal = (str, size) => (str.length === size - 1 ? + str + : new Array(size - str.length - 1).join('0') + str + ' ') + '\0'; +const encDate = (buf, off, size, date) => date === undefined ? false : (encNumber(buf, off, size, date.getTime() / 1000)); +// enough to fill the longest string we've got +const NULLS = new Array(156).join('\0'); +// pad with nulls, return true if it's longer or non-ascii +const encString = (buf, off, size, str) => str === undefined ? false : ((buf.write(str + NULLS, off, size, 'utf8'), + str.length !== Buffer.byteLength(str) || str.length > size)); +//# sourceMappingURL=header.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/index.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/index.js new file mode 100644 index 00000000000000..1bac6415c8d732 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/index.js @@ -0,0 +1,20 @@ +export * from './create.js'; +export { create as c } from './create.js'; +export * from './extract.js'; +export { extract as x } from './extract.js'; +export * from './header.js'; +export * from './list.js'; +export { list as t } from './list.js'; +// classes +export * from './pack.js'; +export * from './parse.js'; +export * from './pax.js'; +export * from './read-entry.js'; +export * from './replace.js'; +export { replace as r } from './replace.js'; +export * as types from './types.js'; +export * from './unpack.js'; +export * from './update.js'; +export { update as u } from './update.js'; +export * from './write-entry.js'; +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/large-numbers.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/large-numbers.js new file mode 100644 index 00000000000000..4f2f7e5f14fc1b --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/large-numbers.js @@ -0,0 +1,94 @@ +// Tar can encode large and negative numbers using a leading byte of +// 0xff for negative, and 0x80 for positive. +export const encode = (num, buf) => { + if (!Number.isSafeInteger(num)) { + // The number is so large that javascript cannot represent it with integer + // precision. + throw Error('cannot encode number outside of javascript safe integer range'); + } + else if (num < 0) { + encodeNegative(num, buf); + } + else { + encodePositive(num, buf); + } + return buf; +}; +const encodePositive = (num, buf) => { + buf[0] = 0x80; + for (var i = buf.length; i > 1; i--) { + buf[i - 1] = num & 0xff; + num = Math.floor(num / 0x100); + } +}; +const encodeNegative = (num, buf) => { + buf[0] = 0xff; + var flipped = false; + num = num * -1; + for (var i = buf.length; i > 1; i--) { + var byte = num & 0xff; + num = Math.floor(num / 0x100); + if (flipped) { + buf[i - 1] = onesComp(byte); + } + else if (byte === 0) { + buf[i - 1] = 0; + } + else { + flipped = true; + buf[i - 1] = twosComp(byte); + } + } +}; +export const parse = (buf) => { + const pre = buf[0]; + const value = pre === 0x80 ? pos(buf.subarray(1, buf.length)) + : pre === 0xff ? twos(buf) + : null; + if (value === null) { + throw Error('invalid base256 encoding'); + } + if (!Number.isSafeInteger(value)) { + // The number is so large that javascript cannot represent it with integer + // precision. + throw Error('parsed number outside of javascript safe integer range'); + } + return value; +}; +const twos = (buf) => { + var len = buf.length; + var sum = 0; + var flipped = false; + for (var i = len - 1; i > -1; i--) { + var byte = Number(buf[i]); + var f; + if (flipped) { + f = onesComp(byte); + } + else if (byte === 0) { + f = byte; + } + else { + flipped = true; + f = twosComp(byte); + } + if (f !== 0) { + sum -= f * Math.pow(256, len - i - 1); + } + } + return sum; +}; +const pos = (buf) => { + var len = buf.length; + var sum = 0; + for (var i = len - 1; i > -1; i--) { + var byte = Number(buf[i]); + if (byte !== 0) { + sum += byte * Math.pow(256, len - i - 1); + } + } + return sum; +}; +const onesComp = (byte) => (0xff ^ byte) & 0xff; +const twosComp = (byte) => ((0xff ^ byte) + 1) & 0xff; +//# sourceMappingURL=large-numbers.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/list.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/list.js new file mode 100644 index 00000000000000..f49068400b6c92 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/list.js @@ -0,0 +1,106 @@ +// tar -t +import * as fsm from '@isaacs/fs-minipass'; +import fs from 'node:fs'; +import { dirname, parse } from 'path'; +import { makeCommand } from './make-command.js'; +import { Parser } from './parse.js'; +import { stripTrailingSlashes } from './strip-trailing-slashes.js'; +const onReadEntryFunction = (opt) => { + const onReadEntry = opt.onReadEntry; + opt.onReadEntry = + onReadEntry ? + e => { + onReadEntry(e); + e.resume(); + } + : e => e.resume(); +}; +// construct a filter that limits the file entries listed +// include child entries if a dir is included +export const filesFilter = (opt, files) => { + const map = new Map(files.map(f => [stripTrailingSlashes(f), true])); + const filter = opt.filter; + const mapHas = (file, r = '') => { + const root = r || parse(file).root || '.'; + let ret; + if (file === root) + ret = false; + else { + const m = map.get(file); + if (m !== undefined) { + ret = m; + } + else { + ret = mapHas(dirname(file), root); + } + } + map.set(file, ret); + return ret; + }; + opt.filter = + filter ? + (file, entry) => filter(file, entry) && mapHas(stripTrailingSlashes(file)) + : file => mapHas(stripTrailingSlashes(file)); +}; +const listFileSync = (opt) => { + const p = new Parser(opt); + const file = opt.file; + let fd; + try { + const stat = fs.statSync(file); + const readSize = opt.maxReadSize || 16 * 1024 * 1024; + if (stat.size < readSize) { + p.end(fs.readFileSync(file)); + } + else { + let pos = 0; + const buf = Buffer.allocUnsafe(readSize); + fd = fs.openSync(file, 'r'); + while (pos < stat.size) { + const bytesRead = fs.readSync(fd, buf, 0, readSize, pos); + pos += bytesRead; + p.write(buf.subarray(0, bytesRead)); + } + p.end(); + } + } + finally { + if (typeof fd === 'number') { + try { + fs.closeSync(fd); + /* c8 ignore next */ + } + catch (er) { } + } + } +}; +const listFile = (opt, _files) => { + const parse = new Parser(opt); + const readSize = opt.maxReadSize || 16 * 1024 * 1024; + const file = opt.file; + const p = new Promise((resolve, reject) => { + parse.on('error', reject); + parse.on('end', resolve); + fs.stat(file, (er, stat) => { + if (er) { + reject(er); + } + else { + const stream = new fsm.ReadStream(file, { + readSize: readSize, + size: stat.size, + }); + stream.on('error', reject); + stream.pipe(parse); + } + }); + }); + return p; +}; +export const list = makeCommand(listFileSync, listFile, opt => new Parser(opt), opt => new Parser(opt), (opt, files) => { + if (files?.length) + filesFilter(opt, files); + if (!opt.noResume) + onReadEntryFunction(opt); +}); +//# sourceMappingURL=list.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/make-command.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/make-command.js new file mode 100644 index 00000000000000..f2f737bca78fd7 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/make-command.js @@ -0,0 +1,57 @@ +import { dealias, isAsyncFile, isAsyncNoFile, isSyncFile, isSyncNoFile, } from './options.js'; +export const makeCommand = (syncFile, asyncFile, syncNoFile, asyncNoFile, validate) => { + return Object.assign((opt_ = [], entries, cb) => { + if (Array.isArray(opt_)) { + entries = opt_; + opt_ = {}; + } + if (typeof entries === 'function') { + cb = entries; + entries = undefined; + } + if (!entries) { + entries = []; + } + else { + entries = Array.from(entries); + } + const opt = dealias(opt_); + validate?.(opt, entries); + if (isSyncFile(opt)) { + if (typeof cb === 'function') { + throw new TypeError('callback not supported for sync tar functions'); + } + return syncFile(opt, entries); + } + else if (isAsyncFile(opt)) { + const p = asyncFile(opt, entries); + // weirdness to make TS happy + const c = cb ? cb : undefined; + return c ? p.then(() => c(), c) : p; + } + else if (isSyncNoFile(opt)) { + if (typeof cb === 'function') { + throw new TypeError('callback not supported for sync tar functions'); + } + return syncNoFile(opt, entries); + } + else if (isAsyncNoFile(opt)) { + if (typeof cb === 'function') { + throw new TypeError('callback only supported with file option'); + } + return asyncNoFile(opt, entries); + /* c8 ignore start */ + } + else { + throw new Error('impossible options??'); + } + /* c8 ignore stop */ + }, { + syncFile, + asyncFile, + syncNoFile, + asyncNoFile, + validate, + }); +}; +//# sourceMappingURL=make-command.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/mkdir.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/mkdir.js new file mode 100644 index 00000000000000..13498ef0082f0b --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/mkdir.js @@ -0,0 +1,201 @@ +import { chownr, chownrSync } from 'chownr'; +import fs from 'fs'; +import { mkdirp, mkdirpSync } from 'mkdirp'; +import path from 'node:path'; +import { CwdError } from './cwd-error.js'; +import { normalizeWindowsPath } from './normalize-windows-path.js'; +import { SymlinkError } from './symlink-error.js'; +const cGet = (cache, key) => cache.get(normalizeWindowsPath(key)); +const cSet = (cache, key, val) => cache.set(normalizeWindowsPath(key), val); +const checkCwd = (dir, cb) => { + fs.stat(dir, (er, st) => { + if (er || !st.isDirectory()) { + er = new CwdError(dir, er?.code || 'ENOTDIR'); + } + cb(er); + }); +}; +/** + * Wrapper around mkdirp for tar's needs. + * + * The main purpose is to avoid creating directories if we know that + * they already exist (and track which ones exist for this purpose), + * and prevent entries from being extracted into symlinked folders, + * if `preservePaths` is not set. + */ +export const mkdir = (dir, opt, cb) => { + dir = normalizeWindowsPath(dir); + // if there's any overlap between mask and mode, + // then we'll need an explicit chmod + /* c8 ignore next */ + const umask = opt.umask ?? 0o22; + const mode = opt.mode | 0o0700; + const needChmod = (mode & umask) !== 0; + const uid = opt.uid; + const gid = opt.gid; + const doChown = typeof uid === 'number' && + typeof gid === 'number' && + (uid !== opt.processUid || gid !== opt.processGid); + const preserve = opt.preserve; + const unlink = opt.unlink; + const cache = opt.cache; + const cwd = normalizeWindowsPath(opt.cwd); + const done = (er, created) => { + if (er) { + cb(er); + } + else { + cSet(cache, dir, true); + if (created && doChown) { + chownr(created, uid, gid, er => done(er)); + } + else if (needChmod) { + fs.chmod(dir, mode, cb); + } + else { + cb(); + } + } + }; + if (cache && cGet(cache, dir) === true) { + return done(); + } + if (dir === cwd) { + return checkCwd(dir, done); + } + if (preserve) { + return mkdirp(dir, { mode }).then(made => done(null, made ?? undefined), // oh, ts + done); + } + const sub = normalizeWindowsPath(path.relative(cwd, dir)); + const parts = sub.split('/'); + mkdir_(cwd, parts, mode, cache, unlink, cwd, undefined, done); +}; +const mkdir_ = (base, parts, mode, cache, unlink, cwd, created, cb) => { + if (!parts.length) { + return cb(null, created); + } + const p = parts.shift(); + const part = normalizeWindowsPath(path.resolve(base + '/' + p)); + if (cGet(cache, part)) { + return mkdir_(part, parts, mode, cache, unlink, cwd, created, cb); + } + fs.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cwd, created, cb)); +}; +const onmkdir = (part, parts, mode, cache, unlink, cwd, created, cb) => (er) => { + if (er) { + fs.lstat(part, (statEr, st) => { + if (statEr) { + statEr.path = + statEr.path && normalizeWindowsPath(statEr.path); + cb(statEr); + } + else if (st.isDirectory()) { + mkdir_(part, parts, mode, cache, unlink, cwd, created, cb); + } + else if (unlink) { + fs.unlink(part, er => { + if (er) { + return cb(er); + } + fs.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cwd, created, cb)); + }); + } + else if (st.isSymbolicLink()) { + return cb(new SymlinkError(part, part + '/' + parts.join('/'))); + } + else { + cb(er); + } + }); + } + else { + created = created || part; + mkdir_(part, parts, mode, cache, unlink, cwd, created, cb); + } +}; +const checkCwdSync = (dir) => { + let ok = false; + let code = undefined; + try { + ok = fs.statSync(dir).isDirectory(); + } + catch (er) { + code = er?.code; + } + finally { + if (!ok) { + throw new CwdError(dir, code ?? 'ENOTDIR'); + } + } +}; +export const mkdirSync = (dir, opt) => { + dir = normalizeWindowsPath(dir); + // if there's any overlap between mask and mode, + // then we'll need an explicit chmod + /* c8 ignore next */ + const umask = opt.umask ?? 0o22; + const mode = opt.mode | 0o700; + const needChmod = (mode & umask) !== 0; + const uid = opt.uid; + const gid = opt.gid; + const doChown = typeof uid === 'number' && + typeof gid === 'number' && + (uid !== opt.processUid || gid !== opt.processGid); + const preserve = opt.preserve; + const unlink = opt.unlink; + const cache = opt.cache; + const cwd = normalizeWindowsPath(opt.cwd); + const done = (created) => { + cSet(cache, dir, true); + if (created && doChown) { + chownrSync(created, uid, gid); + } + if (needChmod) { + fs.chmodSync(dir, mode); + } + }; + if (cache && cGet(cache, dir) === true) { + return done(); + } + if (dir === cwd) { + checkCwdSync(cwd); + return done(); + } + if (preserve) { + return done(mkdirpSync(dir, mode) ?? undefined); + } + const sub = normalizeWindowsPath(path.relative(cwd, dir)); + const parts = sub.split('/'); + let created = undefined; + for (let p = parts.shift(), part = cwd; p && (part += '/' + p); p = parts.shift()) { + part = normalizeWindowsPath(path.resolve(part)); + if (cGet(cache, part)) { + continue; + } + try { + fs.mkdirSync(part, mode); + created = created || part; + cSet(cache, part, true); + } + catch (er) { + const st = fs.lstatSync(part); + if (st.isDirectory()) { + cSet(cache, part, true); + continue; + } + else if (unlink) { + fs.unlinkSync(part); + fs.mkdirSync(part, mode); + created = created || part; + cSet(cache, part, true); + continue; + } + else if (st.isSymbolicLink()) { + return new SymlinkError(part, part + '/' + parts.join('/')); + } + } + } + return done(created); +}; +//# sourceMappingURL=mkdir.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/mode-fix.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/mode-fix.js new file mode 100644 index 00000000000000..5fd3bb88c1cb25 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/mode-fix.js @@ -0,0 +1,25 @@ +export const modeFix = (mode, isDir, portable) => { + mode &= 0o7777; + // in portable mode, use the minimum reasonable umask + // if this system creates files with 0o664 by default + // (as some linux distros do), then we'll write the + // archive with 0o644 instead. Also, don't ever create + // a file that is not readable/writable by the owner. + if (portable) { + mode = (mode | 0o600) & ~0o22; + } + // if dirs are readable, then they should be listable + if (isDir) { + if (mode & 0o400) { + mode |= 0o100; + } + if (mode & 0o40) { + mode |= 0o10; + } + if (mode & 0o4) { + mode |= 0o1; + } + } + return mode; +}; +//# sourceMappingURL=mode-fix.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/normalize-unicode.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/normalize-unicode.js new file mode 100644 index 00000000000000..94e5095476d6e0 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/normalize-unicode.js @@ -0,0 +1,13 @@ +// warning: extremely hot code path. +// This has been meticulously optimized for use +// within npm install on large package trees. +// Do not edit without careful benchmarking. +const normalizeCache = Object.create(null); +const { hasOwnProperty } = Object.prototype; +export const normalizeUnicode = (s) => { + if (!hasOwnProperty.call(normalizeCache, s)) { + normalizeCache[s] = s.normalize('NFD'); + } + return normalizeCache[s]; +}; +//# sourceMappingURL=normalize-unicode.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/normalize-windows-path.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/normalize-windows-path.js new file mode 100644 index 00000000000000..2d97d2b884e627 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/normalize-windows-path.js @@ -0,0 +1,9 @@ +// on windows, either \ or / are valid directory separators. +// on unix, \ is a valid character in filenames. +// so, on windows, and only on windows, we replace all \ chars with /, +// so that we can use / as our one and only directory separator char. +const platform = process.env.TESTING_TAR_FAKE_PLATFORM || process.platform; +export const normalizeWindowsPath = platform !== 'win32' ? + (p) => p + : (p) => p && p.replace(/\\/g, '/'); +//# sourceMappingURL=normalize-windows-path.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/options.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/options.js new file mode 100644 index 00000000000000..a006d36c23c923 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/options.js @@ -0,0 +1,54 @@ +// turn tar(1) style args like `C` into the more verbose things like `cwd` +const argmap = new Map([ + ['C', 'cwd'], + ['f', 'file'], + ['z', 'gzip'], + ['P', 'preservePaths'], + ['U', 'unlink'], + ['strip-components', 'strip'], + ['stripComponents', 'strip'], + ['keep-newer', 'newer'], + ['keepNewer', 'newer'], + ['keep-newer-files', 'newer'], + ['keepNewerFiles', 'newer'], + ['k', 'keep'], + ['keep-existing', 'keep'], + ['keepExisting', 'keep'], + ['m', 'noMtime'], + ['no-mtime', 'noMtime'], + ['p', 'preserveOwner'], + ['L', 'follow'], + ['h', 'follow'], + ['onentry', 'onReadEntry'], +]); +export const isSyncFile = (o) => !!o.sync && !!o.file; +export const isAsyncFile = (o) => !o.sync && !!o.file; +export const isSyncNoFile = (o) => !!o.sync && !o.file; +export const isAsyncNoFile = (o) => !o.sync && !o.file; +export const isSync = (o) => !!o.sync; +export const isAsync = (o) => !o.sync; +export const isFile = (o) => !!o.file; +export const isNoFile = (o) => !o.file; +const dealiasKey = (k) => { + const d = argmap.get(k); + if (d) + return d; + return k; +}; +export const dealias = (opt = {}) => { + if (!opt) + return {}; + const result = {}; + for (const [key, v] of Object.entries(opt)) { + // TS doesn't know that aliases are going to always be the same type + const k = dealiasKey(key); + result[k] = v; + } + // affordance for deprecated noChmod -> chmod + if (result.chmod === undefined && result.noChmod === false) { + result.chmod = true; + } + delete result.noChmod; + return result; +}; +//# sourceMappingURL=options.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/pack.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/pack.js new file mode 100644 index 00000000000000..f59f32f94201fa --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/pack.js @@ -0,0 +1,445 @@ +// A readable tar stream creator +// Technically, this is a transform stream that you write paths into, +// and tar format comes out of. +// The `add()` method is like `write()` but returns this, +// and end() return `this` as well, so you can +// do `new Pack(opt).add('files').add('dir').end().pipe(output) +// You could also do something like: +// streamOfPaths().pipe(new Pack()).pipe(new fs.WriteStream('out.tar')) +import fs from 'fs'; +import { WriteEntry, WriteEntrySync, WriteEntryTar, } from './write-entry.js'; +export class PackJob { + path; + absolute; + entry; + stat; + readdir; + pending = false; + ignore = false; + piped = false; + constructor(path, absolute) { + this.path = path || './'; + this.absolute = absolute; + } +} +import { Minipass } from 'minipass'; +import * as zlib from 'minizlib'; +import { Yallist } from 'yallist'; +import { ReadEntry } from './read-entry.js'; +import { warnMethod, } from './warn-method.js'; +const EOF = Buffer.alloc(1024); +const ONSTAT = Symbol('onStat'); +const ENDED = Symbol('ended'); +const QUEUE = Symbol('queue'); +const CURRENT = Symbol('current'); +const PROCESS = Symbol('process'); +const PROCESSING = Symbol('processing'); +const PROCESSJOB = Symbol('processJob'); +const JOBS = Symbol('jobs'); +const JOBDONE = Symbol('jobDone'); +const ADDFSENTRY = Symbol('addFSEntry'); +const ADDTARENTRY = Symbol('addTarEntry'); +const STAT = Symbol('stat'); +const READDIR = Symbol('readdir'); +const ONREADDIR = Symbol('onreaddir'); +const PIPE = Symbol('pipe'); +const ENTRY = Symbol('entry'); +const ENTRYOPT = Symbol('entryOpt'); +const WRITEENTRYCLASS = Symbol('writeEntryClass'); +const WRITE = Symbol('write'); +const ONDRAIN = Symbol('ondrain'); +import path from 'path'; +import { normalizeWindowsPath } from './normalize-windows-path.js'; +export class Pack extends Minipass { + opt; + cwd; + maxReadSize; + preservePaths; + strict; + noPax; + prefix; + linkCache; + statCache; + file; + portable; + zip; + readdirCache; + noDirRecurse; + follow; + noMtime; + mtime; + filter; + jobs; + [WRITEENTRYCLASS]; + onWriteEntry; + [QUEUE]; + [JOBS] = 0; + [PROCESSING] = false; + [ENDED] = false; + constructor(opt = {}) { + //@ts-ignore + super(); + this.opt = opt; + this.file = opt.file || ''; + this.cwd = opt.cwd || process.cwd(); + this.maxReadSize = opt.maxReadSize; + this.preservePaths = !!opt.preservePaths; + this.strict = !!opt.strict; + this.noPax = !!opt.noPax; + this.prefix = normalizeWindowsPath(opt.prefix || ''); + this.linkCache = opt.linkCache || new Map(); + this.statCache = opt.statCache || new Map(); + this.readdirCache = opt.readdirCache || new Map(); + this.onWriteEntry = opt.onWriteEntry; + this[WRITEENTRYCLASS] = WriteEntry; + if (typeof opt.onwarn === 'function') { + this.on('warn', opt.onwarn); + } + this.portable = !!opt.portable; + if (opt.gzip || opt.brotli) { + if (opt.gzip && opt.brotli) { + throw new TypeError('gzip and brotli are mutually exclusive'); + } + if (opt.gzip) { + if (typeof opt.gzip !== 'object') { + opt.gzip = {}; + } + if (this.portable) { + opt.gzip.portable = true; + } + this.zip = new zlib.Gzip(opt.gzip); + } + if (opt.brotli) { + if (typeof opt.brotli !== 'object') { + opt.brotli = {}; + } + this.zip = new zlib.BrotliCompress(opt.brotli); + } + /* c8 ignore next */ + if (!this.zip) + throw new Error('impossible'); + const zip = this.zip; + zip.on('data', chunk => super.write(chunk)); + zip.on('end', () => super.end()); + zip.on('drain', () => this[ONDRAIN]()); + this.on('resume', () => zip.resume()); + } + else { + this.on('drain', this[ONDRAIN]); + } + this.noDirRecurse = !!opt.noDirRecurse; + this.follow = !!opt.follow; + this.noMtime = !!opt.noMtime; + if (opt.mtime) + this.mtime = opt.mtime; + this.filter = + typeof opt.filter === 'function' ? opt.filter : () => true; + this[QUEUE] = new Yallist(); + this[JOBS] = 0; + this.jobs = Number(opt.jobs) || 4; + this[PROCESSING] = false; + this[ENDED] = false; + } + [WRITE](chunk) { + return super.write(chunk); + } + add(path) { + this.write(path); + return this; + } + end(path, encoding, cb) { + /* c8 ignore start */ + if (typeof path === 'function') { + cb = path; + path = undefined; + } + if (typeof encoding === 'function') { + cb = encoding; + encoding = undefined; + } + /* c8 ignore stop */ + if (path) { + this.add(path); + } + this[ENDED] = true; + this[PROCESS](); + /* c8 ignore next */ + if (cb) + cb(); + return this; + } + write(path) { + if (this[ENDED]) { + throw new Error('write after end'); + } + if (path instanceof ReadEntry) { + this[ADDTARENTRY](path); + } + else { + this[ADDFSENTRY](path); + } + return this.flowing; + } + [ADDTARENTRY](p) { + const absolute = normalizeWindowsPath(path.resolve(this.cwd, p.path)); + // in this case, we don't have to wait for the stat + if (!this.filter(p.path, p)) { + p.resume(); + } + else { + const job = new PackJob(p.path, absolute); + job.entry = new WriteEntryTar(p, this[ENTRYOPT](job)); + job.entry.on('end', () => this[JOBDONE](job)); + this[JOBS] += 1; + this[QUEUE].push(job); + } + this[PROCESS](); + } + [ADDFSENTRY](p) { + const absolute = normalizeWindowsPath(path.resolve(this.cwd, p)); + this[QUEUE].push(new PackJob(p, absolute)); + this[PROCESS](); + } + [STAT](job) { + job.pending = true; + this[JOBS] += 1; + const stat = this.follow ? 'stat' : 'lstat'; + fs[stat](job.absolute, (er, stat) => { + job.pending = false; + this[JOBS] -= 1; + if (er) { + this.emit('error', er); + } + else { + this[ONSTAT](job, stat); + } + }); + } + [ONSTAT](job, stat) { + this.statCache.set(job.absolute, stat); + job.stat = stat; + // now we have the stat, we can filter it. + if (!this.filter(job.path, stat)) { + job.ignore = true; + } + this[PROCESS](); + } + [READDIR](job) { + job.pending = true; + this[JOBS] += 1; + fs.readdir(job.absolute, (er, entries) => { + job.pending = false; + this[JOBS] -= 1; + if (er) { + return this.emit('error', er); + } + this[ONREADDIR](job, entries); + }); + } + [ONREADDIR](job, entries) { + this.readdirCache.set(job.absolute, entries); + job.readdir = entries; + this[PROCESS](); + } + [PROCESS]() { + if (this[PROCESSING]) { + return; + } + this[PROCESSING] = true; + for (let w = this[QUEUE].head; !!w && this[JOBS] < this.jobs; w = w.next) { + this[PROCESSJOB](w.value); + if (w.value.ignore) { + const p = w.next; + this[QUEUE].removeNode(w); + w.next = p; + } + } + this[PROCESSING] = false; + if (this[ENDED] && !this[QUEUE].length && this[JOBS] === 0) { + if (this.zip) { + this.zip.end(EOF); + } + else { + super.write(EOF); + super.end(); + } + } + } + get [CURRENT]() { + return this[QUEUE] && this[QUEUE].head && this[QUEUE].head.value; + } + [JOBDONE](_job) { + this[QUEUE].shift(); + this[JOBS] -= 1; + this[PROCESS](); + } + [PROCESSJOB](job) { + if (job.pending) { + return; + } + if (job.entry) { + if (job === this[CURRENT] && !job.piped) { + this[PIPE](job); + } + return; + } + if (!job.stat) { + const sc = this.statCache.get(job.absolute); + if (sc) { + this[ONSTAT](job, sc); + } + else { + this[STAT](job); + } + } + if (!job.stat) { + return; + } + // filtered out! + if (job.ignore) { + return; + } + if (!this.noDirRecurse && + job.stat.isDirectory() && + !job.readdir) { + const rc = this.readdirCache.get(job.absolute); + if (rc) { + this[ONREADDIR](job, rc); + } + else { + this[READDIR](job); + } + if (!job.readdir) { + return; + } + } + // we know it doesn't have an entry, because that got checked above + job.entry = this[ENTRY](job); + if (!job.entry) { + job.ignore = true; + return; + } + if (job === this[CURRENT] && !job.piped) { + this[PIPE](job); + } + } + [ENTRYOPT](job) { + return { + onwarn: (code, msg, data) => this.warn(code, msg, data), + noPax: this.noPax, + cwd: this.cwd, + absolute: job.absolute, + preservePaths: this.preservePaths, + maxReadSize: this.maxReadSize, + strict: this.strict, + portable: this.portable, + linkCache: this.linkCache, + statCache: this.statCache, + noMtime: this.noMtime, + mtime: this.mtime, + prefix: this.prefix, + onWriteEntry: this.onWriteEntry, + }; + } + [ENTRY](job) { + this[JOBS] += 1; + try { + const e = new this[WRITEENTRYCLASS](job.path, this[ENTRYOPT](job)); + return e + .on('end', () => this[JOBDONE](job)) + .on('error', er => this.emit('error', er)); + } + catch (er) { + this.emit('error', er); + } + } + [ONDRAIN]() { + if (this[CURRENT] && this[CURRENT].entry) { + this[CURRENT].entry.resume(); + } + } + // like .pipe() but using super, because our write() is special + [PIPE](job) { + job.piped = true; + if (job.readdir) { + job.readdir.forEach(entry => { + const p = job.path; + const base = p === './' ? '' : p.replace(/\/*$/, '/'); + this[ADDFSENTRY](base + entry); + }); + } + const source = job.entry; + const zip = this.zip; + /* c8 ignore start */ + if (!source) + throw new Error('cannot pipe without source'); + /* c8 ignore stop */ + if (zip) { + source.on('data', chunk => { + if (!zip.write(chunk)) { + source.pause(); + } + }); + } + else { + source.on('data', chunk => { + if (!super.write(chunk)) { + source.pause(); + } + }); + } + } + pause() { + if (this.zip) { + this.zip.pause(); + } + return super.pause(); + } + warn(code, message, data = {}) { + warnMethod(this, code, message, data); + } +} +export class PackSync extends Pack { + sync = true; + constructor(opt) { + super(opt); + this[WRITEENTRYCLASS] = WriteEntrySync; + } + // pause/resume are no-ops in sync streams. + pause() { } + resume() { } + [STAT](job) { + const stat = this.follow ? 'statSync' : 'lstatSync'; + this[ONSTAT](job, fs[stat](job.absolute)); + } + [READDIR](job) { + this[ONREADDIR](job, fs.readdirSync(job.absolute)); + } + // gotta get it all in this tick + [PIPE](job) { + const source = job.entry; + const zip = this.zip; + if (job.readdir) { + job.readdir.forEach(entry => { + const p = job.path; + const base = p === './' ? '' : p.replace(/\/*$/, '/'); + this[ADDFSENTRY](base + entry); + }); + } + /* c8 ignore start */ + if (!source) + throw new Error('Cannot pipe without source'); + /* c8 ignore stop */ + if (zip) { + source.on('data', chunk => { + zip.write(chunk); + }); + } + else { + source.on('data', chunk => { + super[WRITE](chunk); + }); + } + } +} +//# sourceMappingURL=pack.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/package.json b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/package.json new file mode 100644 index 00000000000000..3dbc1ca591c055 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/package.json @@ -0,0 +1,3 @@ +{ + "type": "module" +} diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/parse.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/parse.js new file mode 100644 index 00000000000000..f2c802e6eef04d --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/parse.js @@ -0,0 +1,595 @@ +// this[BUFFER] is the remainder of a chunk if we're waiting for +// the full 512 bytes of a header to come in. We will Buffer.concat() +// it to the next write(), which is a mem copy, but a small one. +// +// this[QUEUE] is a Yallist of entries that haven't been emitted +// yet this can only get filled up if the user keeps write()ing after +// a write() returns false, or does a write() with more than one entry +// +// We don't buffer chunks, we always parse them and either create an +// entry, or push it into the active entry. The ReadEntry class knows +// to throw data away if .ignore=true +// +// Shift entry off the buffer when it emits 'end', and emit 'entry' for +// the next one in the list. +// +// At any time, we're pushing body chunks into the entry at WRITEENTRY, +// and waiting for 'end' on the entry at READENTRY +// +// ignored entries get .resume() called on them straight away +import { EventEmitter as EE } from 'events'; +import { BrotliDecompress, Unzip } from 'minizlib'; +import { Yallist } from 'yallist'; +import { Header } from './header.js'; +import { Pax } from './pax.js'; +import { ReadEntry } from './read-entry.js'; +import { warnMethod, } from './warn-method.js'; +const maxMetaEntrySize = 1024 * 1024; +const gzipHeader = Buffer.from([0x1f, 0x8b]); +const STATE = Symbol('state'); +const WRITEENTRY = Symbol('writeEntry'); +const READENTRY = Symbol('readEntry'); +const NEXTENTRY = Symbol('nextEntry'); +const PROCESSENTRY = Symbol('processEntry'); +const EX = Symbol('extendedHeader'); +const GEX = Symbol('globalExtendedHeader'); +const META = Symbol('meta'); +const EMITMETA = Symbol('emitMeta'); +const BUFFER = Symbol('buffer'); +const QUEUE = Symbol('queue'); +const ENDED = Symbol('ended'); +const EMITTEDEND = Symbol('emittedEnd'); +const EMIT = Symbol('emit'); +const UNZIP = Symbol('unzip'); +const CONSUMECHUNK = Symbol('consumeChunk'); +const CONSUMECHUNKSUB = Symbol('consumeChunkSub'); +const CONSUMEBODY = Symbol('consumeBody'); +const CONSUMEMETA = Symbol('consumeMeta'); +const CONSUMEHEADER = Symbol('consumeHeader'); +const CONSUMING = Symbol('consuming'); +const BUFFERCONCAT = Symbol('bufferConcat'); +const MAYBEEND = Symbol('maybeEnd'); +const WRITING = Symbol('writing'); +const ABORTED = Symbol('aborted'); +const DONE = Symbol('onDone'); +const SAW_VALID_ENTRY = Symbol('sawValidEntry'); +const SAW_NULL_BLOCK = Symbol('sawNullBlock'); +const SAW_EOF = Symbol('sawEOF'); +const CLOSESTREAM = Symbol('closeStream'); +const noop = () => true; +export class Parser extends EE { + file; + strict; + maxMetaEntrySize; + filter; + brotli; + writable = true; + readable = false; + [QUEUE] = new Yallist(); + [BUFFER]; + [READENTRY]; + [WRITEENTRY]; + [STATE] = 'begin'; + [META] = ''; + [EX]; + [GEX]; + [ENDED] = false; + [UNZIP]; + [ABORTED] = false; + [SAW_VALID_ENTRY]; + [SAW_NULL_BLOCK] = false; + [SAW_EOF] = false; + [WRITING] = false; + [CONSUMING] = false; + [EMITTEDEND] = false; + constructor(opt = {}) { + super(); + this.file = opt.file || ''; + // these BADARCHIVE errors can't be detected early. listen on DONE. + this.on(DONE, () => { + if (this[STATE] === 'begin' || + this[SAW_VALID_ENTRY] === false) { + // either less than 1 block of data, or all entries were invalid. + // Either way, probably not even a tarball. + this.warn('TAR_BAD_ARCHIVE', 'Unrecognized archive format'); + } + }); + if (opt.ondone) { + this.on(DONE, opt.ondone); + } + else { + this.on(DONE, () => { + this.emit('prefinish'); + this.emit('finish'); + this.emit('end'); + }); + } + this.strict = !!opt.strict; + this.maxMetaEntrySize = opt.maxMetaEntrySize || maxMetaEntrySize; + this.filter = typeof opt.filter === 'function' ? opt.filter : noop; + // Unlike gzip, brotli doesn't have any magic bytes to identify it + // Users need to explicitly tell us they're extracting a brotli file + // Or we infer from the file extension + const isTBR = opt.file && + (opt.file.endsWith('.tar.br') || opt.file.endsWith('.tbr')); + // if it's a tbr file it MIGHT be brotli, but we don't know until + // we look at it and verify it's not a valid tar file. + this.brotli = + !opt.gzip && opt.brotli !== undefined ? opt.brotli + : isTBR ? undefined + : false; + // have to set this so that streams are ok piping into it + this.on('end', () => this[CLOSESTREAM]()); + if (typeof opt.onwarn === 'function') { + this.on('warn', opt.onwarn); + } + if (typeof opt.onReadEntry === 'function') { + this.on('entry', opt.onReadEntry); + } + } + warn(code, message, data = {}) { + warnMethod(this, code, message, data); + } + [CONSUMEHEADER](chunk, position) { + if (this[SAW_VALID_ENTRY] === undefined) { + this[SAW_VALID_ENTRY] = false; + } + let header; + try { + header = new Header(chunk, position, this[EX], this[GEX]); + } + catch (er) { + return this.warn('TAR_ENTRY_INVALID', er); + } + if (header.nullBlock) { + if (this[SAW_NULL_BLOCK]) { + this[SAW_EOF] = true; + // ending an archive with no entries. pointless, but legal. + if (this[STATE] === 'begin') { + this[STATE] = 'header'; + } + this[EMIT]('eof'); + } + else { + this[SAW_NULL_BLOCK] = true; + this[EMIT]('nullBlock'); + } + } + else { + this[SAW_NULL_BLOCK] = false; + if (!header.cksumValid) { + this.warn('TAR_ENTRY_INVALID', 'checksum failure', { header }); + } + else if (!header.path) { + this.warn('TAR_ENTRY_INVALID', 'path is required', { header }); + } + else { + const type = header.type; + if (/^(Symbolic)?Link$/.test(type) && !header.linkpath) { + this.warn('TAR_ENTRY_INVALID', 'linkpath required', { + header, + }); + } + else if (!/^(Symbolic)?Link$/.test(type) && + !/^(Global)?ExtendedHeader$/.test(type) && + header.linkpath) { + this.warn('TAR_ENTRY_INVALID', 'linkpath forbidden', { + header, + }); + } + else { + const entry = (this[WRITEENTRY] = new ReadEntry(header, this[EX], this[GEX])); + // we do this for meta & ignored entries as well, because they + // are still valid tar, or else we wouldn't know to ignore them + if (!this[SAW_VALID_ENTRY]) { + if (entry.remain) { + // this might be the one! + const onend = () => { + if (!entry.invalid) { + this[SAW_VALID_ENTRY] = true; + } + }; + entry.on('end', onend); + } + else { + this[SAW_VALID_ENTRY] = true; + } + } + if (entry.meta) { + if (entry.size > this.maxMetaEntrySize) { + entry.ignore = true; + this[EMIT]('ignoredEntry', entry); + this[STATE] = 'ignore'; + entry.resume(); + } + else if (entry.size > 0) { + this[META] = ''; + entry.on('data', c => (this[META] += c)); + this[STATE] = 'meta'; + } + } + else { + this[EX] = undefined; + entry.ignore = + entry.ignore || !this.filter(entry.path, entry); + if (entry.ignore) { + // probably valid, just not something we care about + this[EMIT]('ignoredEntry', entry); + this[STATE] = entry.remain ? 'ignore' : 'header'; + entry.resume(); + } + else { + if (entry.remain) { + this[STATE] = 'body'; + } + else { + this[STATE] = 'header'; + entry.end(); + } + if (!this[READENTRY]) { + this[QUEUE].push(entry); + this[NEXTENTRY](); + } + else { + this[QUEUE].push(entry); + } + } + } + } + } + } + } + [CLOSESTREAM]() { + queueMicrotask(() => this.emit('close')); + } + [PROCESSENTRY](entry) { + let go = true; + if (!entry) { + this[READENTRY] = undefined; + go = false; + } + else if (Array.isArray(entry)) { + const [ev, ...args] = entry; + this.emit(ev, ...args); + } + else { + this[READENTRY] = entry; + this.emit('entry', entry); + if (!entry.emittedEnd) { + entry.on('end', () => this[NEXTENTRY]()); + go = false; + } + } + return go; + } + [NEXTENTRY]() { + do { } while (this[PROCESSENTRY](this[QUEUE].shift())); + if (!this[QUEUE].length) { + // At this point, there's nothing in the queue, but we may have an + // entry which is being consumed (readEntry). + // If we don't, then we definitely can handle more data. + // If we do, and either it's flowing, or it has never had any data + // written to it, then it needs more. + // The only other possibility is that it has returned false from a + // write() call, so we wait for the next drain to continue. + const re = this[READENTRY]; + const drainNow = !re || re.flowing || re.size === re.remain; + if (drainNow) { + if (!this[WRITING]) { + this.emit('drain'); + } + } + else { + re.once('drain', () => this.emit('drain')); + } + } + } + [CONSUMEBODY](chunk, position) { + // write up to but no more than writeEntry.blockRemain + const entry = this[WRITEENTRY]; + /* c8 ignore start */ + if (!entry) { + throw new Error('attempt to consume body without entry??'); + } + const br = entry.blockRemain ?? 0; + /* c8 ignore stop */ + const c = br >= chunk.length && position === 0 ? + chunk + : chunk.subarray(position, position + br); + entry.write(c); + if (!entry.blockRemain) { + this[STATE] = 'header'; + this[WRITEENTRY] = undefined; + entry.end(); + } + return c.length; + } + [CONSUMEMETA](chunk, position) { + const entry = this[WRITEENTRY]; + const ret = this[CONSUMEBODY](chunk, position); + // if we finished, then the entry is reset + if (!this[WRITEENTRY] && entry) { + this[EMITMETA](entry); + } + return ret; + } + [EMIT](ev, data, extra) { + if (!this[QUEUE].length && !this[READENTRY]) { + this.emit(ev, data, extra); + } + else { + this[QUEUE].push([ev, data, extra]); + } + } + [EMITMETA](entry) { + this[EMIT]('meta', this[META]); + switch (entry.type) { + case 'ExtendedHeader': + case 'OldExtendedHeader': + this[EX] = Pax.parse(this[META], this[EX], false); + break; + case 'GlobalExtendedHeader': + this[GEX] = Pax.parse(this[META], this[GEX], true); + break; + case 'NextFileHasLongPath': + case 'OldGnuLongPath': { + const ex = this[EX] ?? Object.create(null); + this[EX] = ex; + ex.path = this[META].replace(/\0.*/, ''); + break; + } + case 'NextFileHasLongLinkpath': { + const ex = this[EX] || Object.create(null); + this[EX] = ex; + ex.linkpath = this[META].replace(/\0.*/, ''); + break; + } + /* c8 ignore start */ + default: + throw new Error('unknown meta: ' + entry.type); + /* c8 ignore stop */ + } + } + abort(error) { + this[ABORTED] = true; + this.emit('abort', error); + // always throws, even in non-strict mode + this.warn('TAR_ABORT', error, { recoverable: false }); + } + write(chunk, encoding, cb) { + if (typeof encoding === 'function') { + cb = encoding; + encoding = undefined; + } + if (typeof chunk === 'string') { + chunk = Buffer.from(chunk, + /* c8 ignore next */ + typeof encoding === 'string' ? encoding : 'utf8'); + } + if (this[ABORTED]) { + /* c8 ignore next */ + cb?.(); + return false; + } + // first write, might be gzipped + const needSniff = this[UNZIP] === undefined || + (this.brotli === undefined && this[UNZIP] === false); + if (needSniff && chunk) { + if (this[BUFFER]) { + chunk = Buffer.concat([this[BUFFER], chunk]); + this[BUFFER] = undefined; + } + if (chunk.length < gzipHeader.length) { + this[BUFFER] = chunk; + /* c8 ignore next */ + cb?.(); + return true; + } + // look for gzip header + for (let i = 0; this[UNZIP] === undefined && i < gzipHeader.length; i++) { + if (chunk[i] !== gzipHeader[i]) { + this[UNZIP] = false; + } + } + const maybeBrotli = this.brotli === undefined; + if (this[UNZIP] === false && maybeBrotli) { + // read the first header to see if it's a valid tar file. If so, + // we can safely assume that it's not actually brotli, despite the + // .tbr or .tar.br file extension. + // if we ended before getting a full chunk, yes, def brotli + if (chunk.length < 512) { + if (this[ENDED]) { + this.brotli = true; + } + else { + this[BUFFER] = chunk; + /* c8 ignore next */ + cb?.(); + return true; + } + } + else { + // if it's tar, it's pretty reliably not brotli, chances of + // that happening are astronomical. + try { + new Header(chunk.subarray(0, 512)); + this.brotli = false; + } + catch (_) { + this.brotli = true; + } + } + } + if (this[UNZIP] === undefined || + (this[UNZIP] === false && this.brotli)) { + const ended = this[ENDED]; + this[ENDED] = false; + this[UNZIP] = + this[UNZIP] === undefined ? + new Unzip({}) + : new BrotliDecompress({}); + this[UNZIP].on('data', chunk => this[CONSUMECHUNK](chunk)); + this[UNZIP].on('error', er => this.abort(er)); + this[UNZIP].on('end', () => { + this[ENDED] = true; + this[CONSUMECHUNK](); + }); + this[WRITING] = true; + const ret = !!this[UNZIP][ended ? 'end' : 'write'](chunk); + this[WRITING] = false; + cb?.(); + return ret; + } + } + this[WRITING] = true; + if (this[UNZIP]) { + this[UNZIP].write(chunk); + } + else { + this[CONSUMECHUNK](chunk); + } + this[WRITING] = false; + // return false if there's a queue, or if the current entry isn't flowing + const ret = this[QUEUE].length ? false + : this[READENTRY] ? this[READENTRY].flowing + : true; + // if we have no queue, then that means a clogged READENTRY + if (!ret && !this[QUEUE].length) { + this[READENTRY]?.once('drain', () => this.emit('drain')); + } + /* c8 ignore next */ + cb?.(); + return ret; + } + [BUFFERCONCAT](c) { + if (c && !this[ABORTED]) { + this[BUFFER] = + this[BUFFER] ? Buffer.concat([this[BUFFER], c]) : c; + } + } + [MAYBEEND]() { + if (this[ENDED] && + !this[EMITTEDEND] && + !this[ABORTED] && + !this[CONSUMING]) { + this[EMITTEDEND] = true; + const entry = this[WRITEENTRY]; + if (entry && entry.blockRemain) { + // truncated, likely a damaged file + const have = this[BUFFER] ? this[BUFFER].length : 0; + this.warn('TAR_BAD_ARCHIVE', `Truncated input (needed ${entry.blockRemain} more bytes, only ${have} available)`, { entry }); + if (this[BUFFER]) { + entry.write(this[BUFFER]); + } + entry.end(); + } + this[EMIT](DONE); + } + } + [CONSUMECHUNK](chunk) { + if (this[CONSUMING] && chunk) { + this[BUFFERCONCAT](chunk); + } + else if (!chunk && !this[BUFFER]) { + this[MAYBEEND](); + } + else if (chunk) { + this[CONSUMING] = true; + if (this[BUFFER]) { + this[BUFFERCONCAT](chunk); + const c = this[BUFFER]; + this[BUFFER] = undefined; + this[CONSUMECHUNKSUB](c); + } + else { + this[CONSUMECHUNKSUB](chunk); + } + while (this[BUFFER] && + this[BUFFER]?.length >= 512 && + !this[ABORTED] && + !this[SAW_EOF]) { + const c = this[BUFFER]; + this[BUFFER] = undefined; + this[CONSUMECHUNKSUB](c); + } + this[CONSUMING] = false; + } + if (!this[BUFFER] || this[ENDED]) { + this[MAYBEEND](); + } + } + [CONSUMECHUNKSUB](chunk) { + // we know that we are in CONSUMING mode, so anything written goes into + // the buffer. Advance the position and put any remainder in the buffer. + let position = 0; + const length = chunk.length; + while (position + 512 <= length && + !this[ABORTED] && + !this[SAW_EOF]) { + switch (this[STATE]) { + case 'begin': + case 'header': + this[CONSUMEHEADER](chunk, position); + position += 512; + break; + case 'ignore': + case 'body': + position += this[CONSUMEBODY](chunk, position); + break; + case 'meta': + position += this[CONSUMEMETA](chunk, position); + break; + /* c8 ignore start */ + default: + throw new Error('invalid state: ' + this[STATE]); + /* c8 ignore stop */ + } + } + if (position < length) { + if (this[BUFFER]) { + this[BUFFER] = Buffer.concat([ + chunk.subarray(position), + this[BUFFER], + ]); + } + else { + this[BUFFER] = chunk.subarray(position); + } + } + } + end(chunk, encoding, cb) { + if (typeof chunk === 'function') { + cb = chunk; + encoding = undefined; + chunk = undefined; + } + if (typeof encoding === 'function') { + cb = encoding; + encoding = undefined; + } + if (typeof chunk === 'string') { + chunk = Buffer.from(chunk, encoding); + } + if (cb) + this.once('finish', cb); + if (!this[ABORTED]) { + if (this[UNZIP]) { + /* c8 ignore start */ + if (chunk) + this[UNZIP].write(chunk); + /* c8 ignore stop */ + this[UNZIP].end(); + } + else { + this[ENDED] = true; + if (this.brotli === undefined) + chunk = chunk || Buffer.alloc(0); + if (chunk) + this.write(chunk); + this[MAYBEEND](); + } + } + return this; + } +} +//# sourceMappingURL=parse.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/path-reservations.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/path-reservations.js new file mode 100644 index 00000000000000..e63b9c91e9a808 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/path-reservations.js @@ -0,0 +1,166 @@ +// A path exclusive reservation system +// reserve([list, of, paths], fn) +// When the fn is first in line for all its paths, it +// is called with a cb that clears the reservation. +// +// Used by async unpack to avoid clobbering paths in use, +// while still allowing maximal safe parallelization. +import { join } from 'node:path'; +import { normalizeUnicode } from './normalize-unicode.js'; +import { stripTrailingSlashes } from './strip-trailing-slashes.js'; +const platform = process.env.TESTING_TAR_FAKE_PLATFORM || process.platform; +const isWindows = platform === 'win32'; +// return a set of parent dirs for a given path +// '/a/b/c/d' -> ['/', '/a', '/a/b', '/a/b/c', '/a/b/c/d'] +const getDirs = (path) => { + const dirs = path + .split('/') + .slice(0, -1) + .reduce((set, path) => { + const s = set[set.length - 1]; + if (s !== undefined) { + path = join(s, path); + } + set.push(path || '/'); + return set; + }, []); + return dirs; +}; +export class PathReservations { + // path => [function or Set] + // A Set object means a directory reservation + // A fn is a direct reservation on that path + #queues = new Map(); + // fn => {paths:[path,...], dirs:[path, ...]} + #reservations = new Map(); + // functions currently running + #running = new Set(); + reserve(paths, fn) { + paths = + isWindows ? + ['win32 parallelization disabled'] + : paths.map(p => { + // don't need normPath, because we skip this entirely for windows + return stripTrailingSlashes(join(normalizeUnicode(p))).toLowerCase(); + }); + const dirs = new Set(paths.map(path => getDirs(path)).reduce((a, b) => a.concat(b))); + this.#reservations.set(fn, { dirs, paths }); + for (const p of paths) { + const q = this.#queues.get(p); + if (!q) { + this.#queues.set(p, [fn]); + } + else { + q.push(fn); + } + } + for (const dir of dirs) { + const q = this.#queues.get(dir); + if (!q) { + this.#queues.set(dir, [new Set([fn])]); + } + else { + const l = q[q.length - 1]; + if (l instanceof Set) { + l.add(fn); + } + else { + q.push(new Set([fn])); + } + } + } + return this.#run(fn); + } + // return the queues for each path the function cares about + // fn => {paths, dirs} + #getQueues(fn) { + const res = this.#reservations.get(fn); + /* c8 ignore start */ + if (!res) { + throw new Error('function does not have any path reservations'); + } + /* c8 ignore stop */ + return { + paths: res.paths.map((path) => this.#queues.get(path)), + dirs: [...res.dirs].map(path => this.#queues.get(path)), + }; + } + // check if fn is first in line for all its paths, and is + // included in the first set for all its dir queues + check(fn) { + const { paths, dirs } = this.#getQueues(fn); + return (paths.every(q => q && q[0] === fn) && + dirs.every(q => q && q[0] instanceof Set && q[0].has(fn))); + } + // run the function if it's first in line and not already running + #run(fn) { + if (this.#running.has(fn) || !this.check(fn)) { + return false; + } + this.#running.add(fn); + fn(() => this.#clear(fn)); + return true; + } + #clear(fn) { + if (!this.#running.has(fn)) { + return false; + } + const res = this.#reservations.get(fn); + /* c8 ignore start */ + if (!res) { + throw new Error('invalid reservation'); + } + /* c8 ignore stop */ + const { paths, dirs } = res; + const next = new Set(); + for (const path of paths) { + const q = this.#queues.get(path); + /* c8 ignore start */ + if (!q || q?.[0] !== fn) { + continue; + } + /* c8 ignore stop */ + const q0 = q[1]; + if (!q0) { + this.#queues.delete(path); + continue; + } + q.shift(); + if (typeof q0 === 'function') { + next.add(q0); + } + else { + for (const f of q0) { + next.add(f); + } + } + } + for (const dir of dirs) { + const q = this.#queues.get(dir); + const q0 = q?.[0]; + /* c8 ignore next - type safety only */ + if (!q || !(q0 instanceof Set)) + continue; + if (q0.size === 1 && q.length === 1) { + this.#queues.delete(dir); + continue; + } + else if (q0.size === 1) { + q.shift(); + // next one must be a function, + // or else the Set would've been reused + const n = q[0]; + if (typeof n === 'function') { + next.add(n); + } + } + else { + q0.delete(fn); + } + } + this.#running.delete(fn); + next.forEach(fn => this.#run(fn)); + return true; + } +} +//# sourceMappingURL=path-reservations.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/pax.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/pax.js new file mode 100644 index 00000000000000..832808f344da53 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/pax.js @@ -0,0 +1,154 @@ +import { basename } from 'node:path'; +import { Header } from './header.js'; +export class Pax { + atime; + mtime; + ctime; + charset; + comment; + gid; + uid; + gname; + uname; + linkpath; + dev; + ino; + nlink; + path; + size; + mode; + global; + constructor(obj, global = false) { + this.atime = obj.atime; + this.charset = obj.charset; + this.comment = obj.comment; + this.ctime = obj.ctime; + this.dev = obj.dev; + this.gid = obj.gid; + this.global = global; + this.gname = obj.gname; + this.ino = obj.ino; + this.linkpath = obj.linkpath; + this.mtime = obj.mtime; + this.nlink = obj.nlink; + this.path = obj.path; + this.size = obj.size; + this.uid = obj.uid; + this.uname = obj.uname; + } + encode() { + const body = this.encodeBody(); + if (body === '') { + return Buffer.allocUnsafe(0); + } + const bodyLen = Buffer.byteLength(body); + // round up to 512 bytes + // add 512 for header + const bufLen = 512 * Math.ceil(1 + bodyLen / 512); + const buf = Buffer.allocUnsafe(bufLen); + // 0-fill the header section, it might not hit every field + for (let i = 0; i < 512; i++) { + buf[i] = 0; + } + new Header({ + // XXX split the path + // then the path should be PaxHeader + basename, but less than 99, + // prepend with the dirname + /* c8 ignore start */ + path: ('PaxHeader/' + basename(this.path ?? '')).slice(0, 99), + /* c8 ignore stop */ + mode: this.mode || 0o644, + uid: this.uid, + gid: this.gid, + size: bodyLen, + mtime: this.mtime, + type: this.global ? 'GlobalExtendedHeader' : 'ExtendedHeader', + linkpath: '', + uname: this.uname || '', + gname: this.gname || '', + devmaj: 0, + devmin: 0, + atime: this.atime, + ctime: this.ctime, + }).encode(buf); + buf.write(body, 512, bodyLen, 'utf8'); + // null pad after the body + for (let i = bodyLen + 512; i < buf.length; i++) { + buf[i] = 0; + } + return buf; + } + encodeBody() { + return (this.encodeField('path') + + this.encodeField('ctime') + + this.encodeField('atime') + + this.encodeField('dev') + + this.encodeField('ino') + + this.encodeField('nlink') + + this.encodeField('charset') + + this.encodeField('comment') + + this.encodeField('gid') + + this.encodeField('gname') + + this.encodeField('linkpath') + + this.encodeField('mtime') + + this.encodeField('size') + + this.encodeField('uid') + + this.encodeField('uname')); + } + encodeField(field) { + if (this[field] === undefined) { + return ''; + } + const r = this[field]; + const v = r instanceof Date ? r.getTime() / 1000 : r; + const s = ' ' + + (field === 'dev' || field === 'ino' || field === 'nlink' ? + 'SCHILY.' + : '') + + field + + '=' + + v + + '\n'; + const byteLen = Buffer.byteLength(s); + // the digits includes the length of the digits in ascii base-10 + // so if it's 9 characters, then adding 1 for the 9 makes it 10 + // which makes it 11 chars. + let digits = Math.floor(Math.log(byteLen) / Math.log(10)) + 1; + if (byteLen + digits >= Math.pow(10, digits)) { + digits += 1; + } + const len = digits + byteLen; + return len + s; + } + static parse(str, ex, g = false) { + return new Pax(merge(parseKV(str), ex), g); + } +} +const merge = (a, b) => b ? Object.assign({}, b, a) : a; +const parseKV = (str) => str + .replace(/\n$/, '') + .split('\n') + .reduce(parseKVLine, Object.create(null)); +const parseKVLine = (set, line) => { + const n = parseInt(line, 10); + // XXX Values with \n in them will fail this. + // Refactor to not be a naive line-by-line parse. + if (n !== Buffer.byteLength(line) + 1) { + return set; + } + line = line.slice((n + ' ').length); + const kv = line.split('='); + const r = kv.shift(); + if (!r) { + return set; + } + const k = r.replace(/^SCHILY\.(dev|ino|nlink)/, '$1'); + const v = kv.join('='); + set[k] = + /^([A-Z]+\.)?([mac]|birth|creation)time$/.test(k) ? + new Date(Number(v) * 1000) + : /^[0-9]+$/.test(v) ? +v + : v; + return set; +}; +//# sourceMappingURL=pax.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/read-entry.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/read-entry.js new file mode 100644 index 00000000000000..23cc673e610879 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/read-entry.js @@ -0,0 +1,136 @@ +import { Minipass } from 'minipass'; +import { normalizeWindowsPath } from './normalize-windows-path.js'; +export class ReadEntry extends Minipass { + extended; + globalExtended; + header; + startBlockSize; + blockRemain; + remain; + type; + meta = false; + ignore = false; + path; + mode; + uid; + gid; + uname; + gname; + size = 0; + mtime; + atime; + ctime; + linkpath; + dev; + ino; + nlink; + invalid = false; + absolute; + unsupported = false; + constructor(header, ex, gex) { + super({}); + // read entries always start life paused. this is to avoid the + // situation where Minipass's auto-ending empty streams results + // in an entry ending before we're ready for it. + this.pause(); + this.extended = ex; + this.globalExtended = gex; + this.header = header; + /* c8 ignore start */ + this.remain = header.size ?? 0; + /* c8 ignore stop */ + this.startBlockSize = 512 * Math.ceil(this.remain / 512); + this.blockRemain = this.startBlockSize; + this.type = header.type; + switch (this.type) { + case 'File': + case 'OldFile': + case 'Link': + case 'SymbolicLink': + case 'CharacterDevice': + case 'BlockDevice': + case 'Directory': + case 'FIFO': + case 'ContiguousFile': + case 'GNUDumpDir': + break; + case 'NextFileHasLongLinkpath': + case 'NextFileHasLongPath': + case 'OldGnuLongPath': + case 'GlobalExtendedHeader': + case 'ExtendedHeader': + case 'OldExtendedHeader': + this.meta = true; + break; + // NOTE: gnutar and bsdtar treat unrecognized types as 'File' + // it may be worth doing the same, but with a warning. + default: + this.ignore = true; + } + /* c8 ignore start */ + if (!header.path) { + throw new Error('no path provided for tar.ReadEntry'); + } + /* c8 ignore stop */ + this.path = normalizeWindowsPath(header.path); + this.mode = header.mode; + if (this.mode) { + this.mode = this.mode & 0o7777; + } + this.uid = header.uid; + this.gid = header.gid; + this.uname = header.uname; + this.gname = header.gname; + this.size = this.remain; + this.mtime = header.mtime; + this.atime = header.atime; + this.ctime = header.ctime; + /* c8 ignore start */ + this.linkpath = + header.linkpath ? + normalizeWindowsPath(header.linkpath) + : undefined; + /* c8 ignore stop */ + this.uname = header.uname; + this.gname = header.gname; + if (ex) { + this.#slurp(ex); + } + if (gex) { + this.#slurp(gex, true); + } + } + write(data) { + const writeLen = data.length; + if (writeLen > this.blockRemain) { + throw new Error('writing more to entry than is appropriate'); + } + const r = this.remain; + const br = this.blockRemain; + this.remain = Math.max(0, r - writeLen); + this.blockRemain = Math.max(0, br - writeLen); + if (this.ignore) { + return true; + } + if (r >= writeLen) { + return super.write(data); + } + // r < writeLen + return super.write(data.subarray(0, r)); + } + #slurp(ex, gex = false) { + if (ex.path) + ex.path = normalizeWindowsPath(ex.path); + if (ex.linkpath) + ex.linkpath = normalizeWindowsPath(ex.linkpath); + Object.assign(this, Object.fromEntries(Object.entries(ex).filter(([k, v]) => { + // we slurp in everything except for the path attribute in + // a global extended header, because that's weird. Also, any + // null/undefined values are ignored. + return !(v === null || + v === undefined || + (k === 'path' && gex)); + }))); + } +} +//# sourceMappingURL=read-entry.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/replace.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/replace.js new file mode 100644 index 00000000000000..c461a4c7d8b63c --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/replace.js @@ -0,0 +1,225 @@ +// tar -r +import { WriteStream, WriteStreamSync } from '@isaacs/fs-minipass'; +import fs from 'node:fs'; +import path from 'node:path'; +import { Header } from './header.js'; +import { list } from './list.js'; +import { makeCommand } from './make-command.js'; +import { isFile, } from './options.js'; +import { Pack, PackSync } from './pack.js'; +// starting at the head of the file, read a Header +// If the checksum is invalid, that's our position to start writing +// If it is, jump forward by the specified size (round up to 512) +// and try again. +// Write the new Pack stream starting there. +const replaceSync = (opt, files) => { + const p = new PackSync(opt); + let threw = true; + let fd; + let position; + try { + try { + fd = fs.openSync(opt.file, 'r+'); + } + catch (er) { + if (er?.code === 'ENOENT') { + fd = fs.openSync(opt.file, 'w+'); + } + else { + throw er; + } + } + const st = fs.fstatSync(fd); + const headBuf = Buffer.alloc(512); + POSITION: for (position = 0; position < st.size; position += 512) { + for (let bufPos = 0, bytes = 0; bufPos < 512; bufPos += bytes) { + bytes = fs.readSync(fd, headBuf, bufPos, headBuf.length - bufPos, position + bufPos); + if (position === 0 && + headBuf[0] === 0x1f && + headBuf[1] === 0x8b) { + throw new Error('cannot append to compressed archives'); + } + if (!bytes) { + break POSITION; + } + } + const h = new Header(headBuf); + if (!h.cksumValid) { + break; + } + const entryBlockSize = 512 * Math.ceil((h.size || 0) / 512); + if (position + entryBlockSize + 512 > st.size) { + break; + } + // the 512 for the header we just parsed will be added as well + // also jump ahead all the blocks for the body + position += entryBlockSize; + if (opt.mtimeCache && h.mtime) { + opt.mtimeCache.set(String(h.path), h.mtime); + } + } + threw = false; + streamSync(opt, p, position, fd, files); + } + finally { + if (threw) { + try { + fs.closeSync(fd); + } + catch (er) { } + } + } +}; +const streamSync = (opt, p, position, fd, files) => { + const stream = new WriteStreamSync(opt.file, { + fd: fd, + start: position, + }); + p.pipe(stream); + addFilesSync(p, files); +}; +const replaceAsync = (opt, files) => { + files = Array.from(files); + const p = new Pack(opt); + const getPos = (fd, size, cb_) => { + const cb = (er, pos) => { + if (er) { + fs.close(fd, _ => cb_(er)); + } + else { + cb_(null, pos); + } + }; + let position = 0; + if (size === 0) { + return cb(null, 0); + } + let bufPos = 0; + const headBuf = Buffer.alloc(512); + const onread = (er, bytes) => { + if (er || typeof bytes === 'undefined') { + return cb(er); + } + bufPos += bytes; + if (bufPos < 512 && bytes) { + return fs.read(fd, headBuf, bufPos, headBuf.length - bufPos, position + bufPos, onread); + } + if (position === 0 && + headBuf[0] === 0x1f && + headBuf[1] === 0x8b) { + return cb(new Error('cannot append to compressed archives')); + } + // truncated header + if (bufPos < 512) { + return cb(null, position); + } + const h = new Header(headBuf); + if (!h.cksumValid) { + return cb(null, position); + } + /* c8 ignore next */ + const entryBlockSize = 512 * Math.ceil((h.size ?? 0) / 512); + if (position + entryBlockSize + 512 > size) { + return cb(null, position); + } + position += entryBlockSize + 512; + if (position >= size) { + return cb(null, position); + } + if (opt.mtimeCache && h.mtime) { + opt.mtimeCache.set(String(h.path), h.mtime); + } + bufPos = 0; + fs.read(fd, headBuf, 0, 512, position, onread); + }; + fs.read(fd, headBuf, 0, 512, position, onread); + }; + const promise = new Promise((resolve, reject) => { + p.on('error', reject); + let flag = 'r+'; + const onopen = (er, fd) => { + if (er && er.code === 'ENOENT' && flag === 'r+') { + flag = 'w+'; + return fs.open(opt.file, flag, onopen); + } + if (er || !fd) { + return reject(er); + } + fs.fstat(fd, (er, st) => { + if (er) { + return fs.close(fd, () => reject(er)); + } + getPos(fd, st.size, (er, position) => { + if (er) { + return reject(er); + } + const stream = new WriteStream(opt.file, { + fd: fd, + start: position, + }); + p.pipe(stream); + stream.on('error', reject); + stream.on('close', resolve); + addFilesAsync(p, files); + }); + }); + }; + fs.open(opt.file, flag, onopen); + }); + return promise; +}; +const addFilesSync = (p, files) => { + files.forEach(file => { + if (file.charAt(0) === '@') { + list({ + file: path.resolve(p.cwd, file.slice(1)), + sync: true, + noResume: true, + onReadEntry: entry => p.add(entry), + }); + } + else { + p.add(file); + } + }); + p.end(); +}; +const addFilesAsync = async (p, files) => { + for (let i = 0; i < files.length; i++) { + const file = String(files[i]); + if (file.charAt(0) === '@') { + await list({ + file: path.resolve(String(p.cwd), file.slice(1)), + noResume: true, + onReadEntry: entry => p.add(entry), + }); + } + else { + p.add(file); + } + } + p.end(); +}; +export const replace = makeCommand(replaceSync, replaceAsync, +/* c8 ignore start */ +() => { + throw new TypeError('file is required'); +}, () => { + throw new TypeError('file is required'); +}, +/* c8 ignore stop */ +(opt, entries) => { + if (!isFile(opt)) { + throw new TypeError('file is required'); + } + if (opt.gzip || + opt.brotli || + opt.file.endsWith('.br') || + opt.file.endsWith('.tbr')) { + throw new TypeError('cannot append to compressed archives'); + } + if (!entries?.length) { + throw new TypeError('no paths specified to add/replace'); + } +}); +//# sourceMappingURL=replace.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/strip-absolute-path.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/strip-absolute-path.js new file mode 100644 index 00000000000000..cce5ff80b00db3 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/strip-absolute-path.js @@ -0,0 +1,25 @@ +// unix absolute paths are also absolute on win32, so we use this for both +import { win32 } from 'node:path'; +const { isAbsolute, parse } = win32; +// returns [root, stripped] +// Note that windows will think that //x/y/z/a has a "root" of //x/y, and in +// those cases, we want to sanitize it to x/y/z/a, not z/a, so we strip / +// explicitly if it's the first character. +// drive-specific relative paths on Windows get their root stripped off even +// though they are not absolute, so `c:../foo` becomes ['c:', '../foo'] +export const stripAbsolutePath = (path) => { + let r = ''; + let parsed = parse(path); + while (isAbsolute(path) || parsed.root) { + // windows will think that //x/y/z has a "root" of //x/y/ + // but strip the //?/C:/ off of //?/C:/path + const root = path.charAt(0) === '/' && path.slice(0, 4) !== '//?/' ? + '/' + : parsed.root; + path = path.slice(root.length); + r += root; + parsed = parse(path); + } + return [r, path]; +}; +//# sourceMappingURL=strip-absolute-path.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/strip-trailing-slashes.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/strip-trailing-slashes.js new file mode 100644 index 00000000000000..ace4218a7547bf --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/strip-trailing-slashes.js @@ -0,0 +1,14 @@ +// warning: extremely hot code path. +// This has been meticulously optimized for use +// within npm install on large package trees. +// Do not edit without careful benchmarking. +export const stripTrailingSlashes = (str) => { + let i = str.length - 1; + let slashesStart = -1; + while (i > -1 && str.charAt(i) === '/') { + slashesStart = i; + i--; + } + return slashesStart === -1 ? str : str.slice(0, slashesStart); +}; +//# sourceMappingURL=strip-trailing-slashes.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/symlink-error.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/symlink-error.js new file mode 100644 index 00000000000000..d31766e2e0afa0 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/symlink-error.js @@ -0,0 +1,15 @@ +export class SymlinkError extends Error { + path; + symlink; + syscall = 'symlink'; + code = 'TAR_SYMLINK_ERROR'; + constructor(symlink, path) { + super('TAR_SYMLINK_ERROR: Cannot extract through symbolic link'); + this.symlink = symlink; + this.path = path; + } + get name() { + return 'SymlinkError'; + } +} +//# sourceMappingURL=symlink-error.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/types.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/types.js new file mode 100644 index 00000000000000..27b982ae1e0922 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/types.js @@ -0,0 +1,45 @@ +export const isCode = (c) => name.has(c); +export const isName = (c) => code.has(c); +// map types from key to human-friendly name +export const name = new Map([ + ['0', 'File'], + // same as File + ['', 'OldFile'], + ['1', 'Link'], + ['2', 'SymbolicLink'], + // Devices and FIFOs aren't fully supported + // they are parsed, but skipped when unpacking + ['3', 'CharacterDevice'], + ['4', 'BlockDevice'], + ['5', 'Directory'], + ['6', 'FIFO'], + // same as File + ['7', 'ContiguousFile'], + // pax headers + ['g', 'GlobalExtendedHeader'], + ['x', 'ExtendedHeader'], + // vendor-specific stuff + // skip + ['A', 'SolarisACL'], + // like 5, but with data, which should be skipped + ['D', 'GNUDumpDir'], + // metadata only, skip + ['I', 'Inode'], + // data = link path of next file + ['K', 'NextFileHasLongLinkpath'], + // data = path of next file + ['L', 'NextFileHasLongPath'], + // skip + ['M', 'ContinuationFile'], + // like L + ['N', 'OldGnuLongPath'], + // skip + ['S', 'SparseFile'], + // skip + ['V', 'TapeVolumeHeader'], + // like x + ['X', 'OldExtendedHeader'], +]); +// map the other direction +export const code = new Map(Array.from(name).map(kv => [kv[1], kv[0]])); +//# sourceMappingURL=types.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/unpack.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/unpack.js new file mode 100644 index 00000000000000..6e744cfc1a6f9f --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/unpack.js @@ -0,0 +1,888 @@ +// the PEND/UNPEND stuff tracks whether we're ready to emit end/close yet. +// but the path reservations are required to avoid race conditions where +// parallelized unpack ops may mess with one another, due to dependencies +// (like a Link depending on its target) or destructive operations (like +// clobbering an fs object to create one of a different type.) +import * as fsm from '@isaacs/fs-minipass'; +import assert from 'node:assert'; +import { randomBytes } from 'node:crypto'; +import fs from 'node:fs'; +import path from 'node:path'; +import { getWriteFlag } from './get-write-flag.js'; +import { mkdir, mkdirSync } from './mkdir.js'; +import { normalizeUnicode } from './normalize-unicode.js'; +import { normalizeWindowsPath } from './normalize-windows-path.js'; +import { Parser } from './parse.js'; +import { stripAbsolutePath } from './strip-absolute-path.js'; +import { stripTrailingSlashes } from './strip-trailing-slashes.js'; +import * as wc from './winchars.js'; +import { PathReservations } from './path-reservations.js'; +const ONENTRY = Symbol('onEntry'); +const CHECKFS = Symbol('checkFs'); +const CHECKFS2 = Symbol('checkFs2'); +const PRUNECACHE = Symbol('pruneCache'); +const ISREUSABLE = Symbol('isReusable'); +const MAKEFS = Symbol('makeFs'); +const FILE = Symbol('file'); +const DIRECTORY = Symbol('directory'); +const LINK = Symbol('link'); +const SYMLINK = Symbol('symlink'); +const HARDLINK = Symbol('hardlink'); +const UNSUPPORTED = Symbol('unsupported'); +const CHECKPATH = Symbol('checkPath'); +const MKDIR = Symbol('mkdir'); +const ONERROR = Symbol('onError'); +const PENDING = Symbol('pending'); +const PEND = Symbol('pend'); +const UNPEND = Symbol('unpend'); +const ENDED = Symbol('ended'); +const MAYBECLOSE = Symbol('maybeClose'); +const SKIP = Symbol('skip'); +const DOCHOWN = Symbol('doChown'); +const UID = Symbol('uid'); +const GID = Symbol('gid'); +const CHECKED_CWD = Symbol('checkedCwd'); +const platform = process.env.TESTING_TAR_FAKE_PLATFORM || process.platform; +const isWindows = platform === 'win32'; +const DEFAULT_MAX_DEPTH = 1024; +// Unlinks on Windows are not atomic. +// +// This means that if you have a file entry, followed by another +// file entry with an identical name, and you cannot re-use the file +// (because it's a hardlink, or because unlink:true is set, or it's +// Windows, which does not have useful nlink values), then the unlink +// will be committed to the disk AFTER the new file has been written +// over the old one, deleting the new file. +// +// To work around this, on Windows systems, we rename the file and then +// delete the renamed file. It's a sloppy kludge, but frankly, I do not +// know of a better way to do this, given windows' non-atomic unlink +// semantics. +// +// See: https://github.com/npm/node-tar/issues/183 +/* c8 ignore start */ +const unlinkFile = (path, cb) => { + if (!isWindows) { + return fs.unlink(path, cb); + } + const name = path + '.DELETE.' + randomBytes(16).toString('hex'); + fs.rename(path, name, er => { + if (er) { + return cb(er); + } + fs.unlink(name, cb); + }); +}; +/* c8 ignore stop */ +/* c8 ignore start */ +const unlinkFileSync = (path) => { + if (!isWindows) { + return fs.unlinkSync(path); + } + const name = path + '.DELETE.' + randomBytes(16).toString('hex'); + fs.renameSync(path, name); + fs.unlinkSync(name); +}; +/* c8 ignore stop */ +// this.gid, entry.gid, this.processUid +const uint32 = (a, b, c) => a !== undefined && a === a >>> 0 ? a + : b !== undefined && b === b >>> 0 ? b + : c; +// clear the cache if it's a case-insensitive unicode-squashing match. +// we can't know if the current file system is case-sensitive or supports +// unicode fully, so we check for similarity on the maximally compatible +// representation. Err on the side of pruning, since all it's doing is +// preventing lstats, and it's not the end of the world if we get a false +// positive. +// Note that on windows, we always drop the entire cache whenever a +// symbolic link is encountered, because 8.3 filenames are impossible +// to reason about, and collisions are hazards rather than just failures. +const cacheKeyNormalize = (path) => stripTrailingSlashes(normalizeWindowsPath(normalizeUnicode(path))).toLowerCase(); +// remove all cache entries matching ${abs}/** +const pruneCache = (cache, abs) => { + abs = cacheKeyNormalize(abs); + for (const path of cache.keys()) { + const pnorm = cacheKeyNormalize(path); + if (pnorm === abs || pnorm.indexOf(abs + '/') === 0) { + cache.delete(path); + } + } +}; +const dropCache = (cache) => { + for (const key of cache.keys()) { + cache.delete(key); + } +}; +export class Unpack extends Parser { + [ENDED] = false; + [CHECKED_CWD] = false; + [PENDING] = 0; + reservations = new PathReservations(); + transform; + writable = true; + readable = false; + dirCache; + uid; + gid; + setOwner; + preserveOwner; + processGid; + processUid; + maxDepth; + forceChown; + win32; + newer; + keep; + noMtime; + preservePaths; + unlink; + cwd; + strip; + processUmask; + umask; + dmode; + fmode; + chmod; + constructor(opt = {}) { + opt.ondone = () => { + this[ENDED] = true; + this[MAYBECLOSE](); + }; + super(opt); + this.transform = opt.transform; + this.dirCache = opt.dirCache || new Map(); + this.chmod = !!opt.chmod; + if (typeof opt.uid === 'number' || typeof opt.gid === 'number') { + // need both or neither + if (typeof opt.uid !== 'number' || + typeof opt.gid !== 'number') { + throw new TypeError('cannot set owner without number uid and gid'); + } + if (opt.preserveOwner) { + throw new TypeError('cannot preserve owner in archive and also set owner explicitly'); + } + this.uid = opt.uid; + this.gid = opt.gid; + this.setOwner = true; + } + else { + this.uid = undefined; + this.gid = undefined; + this.setOwner = false; + } + // default true for root + if (opt.preserveOwner === undefined && + typeof opt.uid !== 'number') { + this.preserveOwner = !!(process.getuid && process.getuid() === 0); + } + else { + this.preserveOwner = !!opt.preserveOwner; + } + this.processUid = + (this.preserveOwner || this.setOwner) && process.getuid ? + process.getuid() + : undefined; + this.processGid = + (this.preserveOwner || this.setOwner) && process.getgid ? + process.getgid() + : undefined; + // prevent excessively deep nesting of subfolders + // set to `Infinity` to remove this restriction + this.maxDepth = + typeof opt.maxDepth === 'number' ? + opt.maxDepth + : DEFAULT_MAX_DEPTH; + // mostly just for testing, but useful in some cases. + // Forcibly trigger a chown on every entry, no matter what + this.forceChown = opt.forceChown === true; + // turn > this[ONENTRY](entry)); + } + // a bad or damaged archive is a warning for Parser, but an error + // when extracting. Mark those errors as unrecoverable, because + // the Unpack contract cannot be met. + warn(code, msg, data = {}) { + if (code === 'TAR_BAD_ARCHIVE' || code === 'TAR_ABORT') { + data.recoverable = false; + } + return super.warn(code, msg, data); + } + [MAYBECLOSE]() { + if (this[ENDED] && this[PENDING] === 0) { + this.emit('prefinish'); + this.emit('finish'); + this.emit('end'); + } + } + [CHECKPATH](entry) { + const p = normalizeWindowsPath(entry.path); + const parts = p.split('/'); + if (this.strip) { + if (parts.length < this.strip) { + return false; + } + if (entry.type === 'Link') { + const linkparts = normalizeWindowsPath(String(entry.linkpath)).split('/'); + if (linkparts.length >= this.strip) { + entry.linkpath = linkparts.slice(this.strip).join('/'); + } + else { + return false; + } + } + parts.splice(0, this.strip); + entry.path = parts.join('/'); + } + if (isFinite(this.maxDepth) && parts.length > this.maxDepth) { + this.warn('TAR_ENTRY_ERROR', 'path excessively deep', { + entry, + path: p, + depth: parts.length, + maxDepth: this.maxDepth, + }); + return false; + } + if (!this.preservePaths) { + if (parts.includes('..') || + /* c8 ignore next */ + (isWindows && /^[a-z]:\.\.$/i.test(parts[0] ?? ''))) { + this.warn('TAR_ENTRY_ERROR', `path contains '..'`, { + entry, + path: p, + }); + return false; + } + // strip off the root + const [root, stripped] = stripAbsolutePath(p); + if (root) { + entry.path = String(stripped); + this.warn('TAR_ENTRY_INFO', `stripping ${root} from absolute path`, { + entry, + path: p, + }); + } + } + if (path.isAbsolute(entry.path)) { + entry.absolute = normalizeWindowsPath(path.resolve(entry.path)); + } + else { + entry.absolute = normalizeWindowsPath(path.resolve(this.cwd, entry.path)); + } + // if we somehow ended up with a path that escapes the cwd, and we are + // not in preservePaths mode, then something is fishy! This should have + // been prevented above, so ignore this for coverage. + /* c8 ignore start - defense in depth */ + if (!this.preservePaths && + typeof entry.absolute === 'string' && + entry.absolute.indexOf(this.cwd + '/') !== 0 && + entry.absolute !== this.cwd) { + this.warn('TAR_ENTRY_ERROR', 'path escaped extraction target', { + entry, + path: normalizeWindowsPath(entry.path), + resolvedPath: entry.absolute, + cwd: this.cwd, + }); + return false; + } + /* c8 ignore stop */ + // an archive can set properties on the extraction directory, but it + // may not replace the cwd with a different kind of thing entirely. + if (entry.absolute === this.cwd && + entry.type !== 'Directory' && + entry.type !== 'GNUDumpDir') { + return false; + } + // only encode : chars that aren't drive letter indicators + if (this.win32) { + const { root: aRoot } = path.win32.parse(String(entry.absolute)); + entry.absolute = + aRoot + wc.encode(String(entry.absolute).slice(aRoot.length)); + const { root: pRoot } = path.win32.parse(entry.path); + entry.path = pRoot + wc.encode(entry.path.slice(pRoot.length)); + } + return true; + } + [ONENTRY](entry) { + if (!this[CHECKPATH](entry)) { + return entry.resume(); + } + assert.equal(typeof entry.absolute, 'string'); + switch (entry.type) { + case 'Directory': + case 'GNUDumpDir': + if (entry.mode) { + entry.mode = entry.mode | 0o700; + } + // eslint-disable-next-line no-fallthrough + case 'File': + case 'OldFile': + case 'ContiguousFile': + case 'Link': + case 'SymbolicLink': + return this[CHECKFS](entry); + case 'CharacterDevice': + case 'BlockDevice': + case 'FIFO': + default: + return this[UNSUPPORTED](entry); + } + } + [ONERROR](er, entry) { + // Cwd has to exist, or else nothing works. That's serious. + // Other errors are warnings, which raise the error in strict + // mode, but otherwise continue on. + if (er.name === 'CwdError') { + this.emit('error', er); + } + else { + this.warn('TAR_ENTRY_ERROR', er, { entry }); + this[UNPEND](); + entry.resume(); + } + } + [MKDIR](dir, mode, cb) { + mkdir(normalizeWindowsPath(dir), { + uid: this.uid, + gid: this.gid, + processUid: this.processUid, + processGid: this.processGid, + umask: this.processUmask, + preserve: this.preservePaths, + unlink: this.unlink, + cache: this.dirCache, + cwd: this.cwd, + mode: mode, + }, cb); + } + [DOCHOWN](entry) { + // in preserve owner mode, chown if the entry doesn't match process + // in set owner mode, chown if setting doesn't match process + return (this.forceChown || + (this.preserveOwner && + ((typeof entry.uid === 'number' && + entry.uid !== this.processUid) || + (typeof entry.gid === 'number' && + entry.gid !== this.processGid))) || + (typeof this.uid === 'number' && + this.uid !== this.processUid) || + (typeof this.gid === 'number' && this.gid !== this.processGid)); + } + [UID](entry) { + return uint32(this.uid, entry.uid, this.processUid); + } + [GID](entry) { + return uint32(this.gid, entry.gid, this.processGid); + } + [FILE](entry, fullyDone) { + const mode = typeof entry.mode === 'number' ? + entry.mode & 0o7777 + : this.fmode; + const stream = new fsm.WriteStream(String(entry.absolute), { + // slight lie, but it can be numeric flags + flags: getWriteFlag(entry.size), + mode: mode, + autoClose: false, + }); + stream.on('error', (er) => { + if (stream.fd) { + fs.close(stream.fd, () => { }); + } + // flush all the data out so that we aren't left hanging + // if the error wasn't actually fatal. otherwise the parse + // is blocked, and we never proceed. + stream.write = () => true; + this[ONERROR](er, entry); + fullyDone(); + }); + let actions = 1; + const done = (er) => { + if (er) { + /* c8 ignore start - we should always have a fd by now */ + if (stream.fd) { + fs.close(stream.fd, () => { }); + } + /* c8 ignore stop */ + this[ONERROR](er, entry); + fullyDone(); + return; + } + if (--actions === 0) { + if (stream.fd !== undefined) { + fs.close(stream.fd, er => { + if (er) { + this[ONERROR](er, entry); + } + else { + this[UNPEND](); + } + fullyDone(); + }); + } + } + }; + stream.on('finish', () => { + // if futimes fails, try utimes + // if utimes fails, fail with the original error + // same for fchown/chown + const abs = String(entry.absolute); + const fd = stream.fd; + if (typeof fd === 'number' && entry.mtime && !this.noMtime) { + actions++; + const atime = entry.atime || new Date(); + const mtime = entry.mtime; + fs.futimes(fd, atime, mtime, er => er ? + fs.utimes(abs, atime, mtime, er2 => done(er2 && er)) + : done()); + } + if (typeof fd === 'number' && this[DOCHOWN](entry)) { + actions++; + const uid = this[UID](entry); + const gid = this[GID](entry); + if (typeof uid === 'number' && typeof gid === 'number') { + fs.fchown(fd, uid, gid, er => er ? + fs.chown(abs, uid, gid, er2 => done(er2 && er)) + : done()); + } + } + done(); + }); + const tx = this.transform ? this.transform(entry) || entry : entry; + if (tx !== entry) { + tx.on('error', (er) => { + this[ONERROR](er, entry); + fullyDone(); + }); + entry.pipe(tx); + } + tx.pipe(stream); + } + [DIRECTORY](entry, fullyDone) { + const mode = typeof entry.mode === 'number' ? + entry.mode & 0o7777 + : this.dmode; + this[MKDIR](String(entry.absolute), mode, er => { + if (er) { + this[ONERROR](er, entry); + fullyDone(); + return; + } + let actions = 1; + const done = () => { + if (--actions === 0) { + fullyDone(); + this[UNPEND](); + entry.resume(); + } + }; + if (entry.mtime && !this.noMtime) { + actions++; + fs.utimes(String(entry.absolute), entry.atime || new Date(), entry.mtime, done); + } + if (this[DOCHOWN](entry)) { + actions++; + fs.chown(String(entry.absolute), Number(this[UID](entry)), Number(this[GID](entry)), done); + } + done(); + }); + } + [UNSUPPORTED](entry) { + entry.unsupported = true; + this.warn('TAR_ENTRY_UNSUPPORTED', `unsupported entry type: ${entry.type}`, { entry }); + entry.resume(); + } + [SYMLINK](entry, done) { + this[LINK](entry, String(entry.linkpath), 'symlink', done); + } + [HARDLINK](entry, done) { + const linkpath = normalizeWindowsPath(path.resolve(this.cwd, String(entry.linkpath))); + this[LINK](entry, linkpath, 'link', done); + } + [PEND]() { + this[PENDING]++; + } + [UNPEND]() { + this[PENDING]--; + this[MAYBECLOSE](); + } + [SKIP](entry) { + this[UNPEND](); + entry.resume(); + } + // Check if we can reuse an existing filesystem entry safely and + // overwrite it, rather than unlinking and recreating + // Windows doesn't report a useful nlink, so we just never reuse entries + [ISREUSABLE](entry, st) { + return (entry.type === 'File' && + !this.unlink && + st.isFile() && + st.nlink <= 1 && + !isWindows); + } + // check if a thing is there, and if so, try to clobber it + [CHECKFS](entry) { + this[PEND](); + const paths = [entry.path]; + if (entry.linkpath) { + paths.push(entry.linkpath); + } + this.reservations.reserve(paths, done => this[CHECKFS2](entry, done)); + } + [PRUNECACHE](entry) { + // if we are not creating a directory, and the path is in the dirCache, + // then that means we are about to delete the directory we created + // previously, and it is no longer going to be a directory, and neither + // is any of its children. + // If a symbolic link is encountered, all bets are off. There is no + // reasonable way to sanitize the cache in such a way we will be able to + // avoid having filesystem collisions. If this happens with a non-symlink + // entry, it'll just fail to unpack, but a symlink to a directory, using an + // 8.3 shortname or certain unicode attacks, can evade detection and lead + // to arbitrary writes to anywhere on the system. + if (entry.type === 'SymbolicLink') { + dropCache(this.dirCache); + } + else if (entry.type !== 'Directory') { + pruneCache(this.dirCache, String(entry.absolute)); + } + } + [CHECKFS2](entry, fullyDone) { + this[PRUNECACHE](entry); + const done = (er) => { + this[PRUNECACHE](entry); + fullyDone(er); + }; + const checkCwd = () => { + this[MKDIR](this.cwd, this.dmode, er => { + if (er) { + this[ONERROR](er, entry); + done(); + return; + } + this[CHECKED_CWD] = true; + start(); + }); + }; + const start = () => { + if (entry.absolute !== this.cwd) { + const parent = normalizeWindowsPath(path.dirname(String(entry.absolute))); + if (parent !== this.cwd) { + return this[MKDIR](parent, this.dmode, er => { + if (er) { + this[ONERROR](er, entry); + done(); + return; + } + afterMakeParent(); + }); + } + } + afterMakeParent(); + }; + const afterMakeParent = () => { + fs.lstat(String(entry.absolute), (lstatEr, st) => { + if (st && + (this.keep || + /* c8 ignore next */ + (this.newer && st.mtime > (entry.mtime ?? st.mtime)))) { + this[SKIP](entry); + done(); + return; + } + if (lstatEr || this[ISREUSABLE](entry, st)) { + return this[MAKEFS](null, entry, done); + } + if (st.isDirectory()) { + if (entry.type === 'Directory') { + const needChmod = this.chmod && + entry.mode && + (st.mode & 0o7777) !== entry.mode; + const afterChmod = (er) => this[MAKEFS](er ?? null, entry, done); + if (!needChmod) { + return afterChmod(); + } + return fs.chmod(String(entry.absolute), Number(entry.mode), afterChmod); + } + // Not a dir entry, have to remove it. + // NB: the only way to end up with an entry that is the cwd + // itself, in such a way that == does not detect, is a + // tricky windows absolute path with UNC or 8.3 parts (and + // preservePaths:true, or else it will have been stripped). + // In that case, the user has opted out of path protections + // explicitly, so if they blow away the cwd, c'est la vie. + if (entry.absolute !== this.cwd) { + return fs.rmdir(String(entry.absolute), (er) => this[MAKEFS](er ?? null, entry, done)); + } + } + // not a dir, and not reusable + // don't remove if the cwd, we want that error + if (entry.absolute === this.cwd) { + return this[MAKEFS](null, entry, done); + } + unlinkFile(String(entry.absolute), er => this[MAKEFS](er ?? null, entry, done)); + }); + }; + if (this[CHECKED_CWD]) { + start(); + } + else { + checkCwd(); + } + } + [MAKEFS](er, entry, done) { + if (er) { + this[ONERROR](er, entry); + done(); + return; + } + switch (entry.type) { + case 'File': + case 'OldFile': + case 'ContiguousFile': + return this[FILE](entry, done); + case 'Link': + return this[HARDLINK](entry, done); + case 'SymbolicLink': + return this[SYMLINK](entry, done); + case 'Directory': + case 'GNUDumpDir': + return this[DIRECTORY](entry, done); + } + } + [LINK](entry, linkpath, link, done) { + // XXX: get the type ('symlink' or 'junction') for windows + fs[link](linkpath, String(entry.absolute), er => { + if (er) { + this[ONERROR](er, entry); + } + else { + this[UNPEND](); + entry.resume(); + } + done(); + }); + } +} +const callSync = (fn) => { + try { + return [null, fn()]; + } + catch (er) { + return [er, null]; + } +}; +export class UnpackSync extends Unpack { + sync = true; + [MAKEFS](er, entry) { + return super[MAKEFS](er, entry, () => { }); + } + [CHECKFS](entry) { + this[PRUNECACHE](entry); + if (!this[CHECKED_CWD]) { + const er = this[MKDIR](this.cwd, this.dmode); + if (er) { + return this[ONERROR](er, entry); + } + this[CHECKED_CWD] = true; + } + // don't bother to make the parent if the current entry is the cwd, + // we've already checked it. + if (entry.absolute !== this.cwd) { + const parent = normalizeWindowsPath(path.dirname(String(entry.absolute))); + if (parent !== this.cwd) { + const mkParent = this[MKDIR](parent, this.dmode); + if (mkParent) { + return this[ONERROR](mkParent, entry); + } + } + } + const [lstatEr, st] = callSync(() => fs.lstatSync(String(entry.absolute))); + if (st && + (this.keep || + /* c8 ignore next */ + (this.newer && st.mtime > (entry.mtime ?? st.mtime)))) { + return this[SKIP](entry); + } + if (lstatEr || this[ISREUSABLE](entry, st)) { + return this[MAKEFS](null, entry); + } + if (st.isDirectory()) { + if (entry.type === 'Directory') { + const needChmod = this.chmod && + entry.mode && + (st.mode & 0o7777) !== entry.mode; + const [er] = needChmod ? + callSync(() => { + fs.chmodSync(String(entry.absolute), Number(entry.mode)); + }) + : []; + return this[MAKEFS](er, entry); + } + // not a dir entry, have to remove it + const [er] = callSync(() => fs.rmdirSync(String(entry.absolute))); + this[MAKEFS](er, entry); + } + // not a dir, and not reusable. + // don't remove if it's the cwd, since we want that error. + const [er] = entry.absolute === this.cwd ? + [] + : callSync(() => unlinkFileSync(String(entry.absolute))); + this[MAKEFS](er, entry); + } + [FILE](entry, done) { + const mode = typeof entry.mode === 'number' ? + entry.mode & 0o7777 + : this.fmode; + const oner = (er) => { + let closeError; + try { + fs.closeSync(fd); + } + catch (e) { + closeError = e; + } + if (er || closeError) { + this[ONERROR](er || closeError, entry); + } + done(); + }; + let fd; + try { + fd = fs.openSync(String(entry.absolute), getWriteFlag(entry.size), mode); + } + catch (er) { + return oner(er); + } + const tx = this.transform ? this.transform(entry) || entry : entry; + if (tx !== entry) { + tx.on('error', (er) => this[ONERROR](er, entry)); + entry.pipe(tx); + } + tx.on('data', (chunk) => { + try { + fs.writeSync(fd, chunk, 0, chunk.length); + } + catch (er) { + oner(er); + } + }); + tx.on('end', () => { + let er = null; + // try both, falling futimes back to utimes + // if either fails, handle the first error + if (entry.mtime && !this.noMtime) { + const atime = entry.atime || new Date(); + const mtime = entry.mtime; + try { + fs.futimesSync(fd, atime, mtime); + } + catch (futimeser) { + try { + fs.utimesSync(String(entry.absolute), atime, mtime); + } + catch (utimeser) { + er = futimeser; + } + } + } + if (this[DOCHOWN](entry)) { + const uid = this[UID](entry); + const gid = this[GID](entry); + try { + fs.fchownSync(fd, Number(uid), Number(gid)); + } + catch (fchowner) { + try { + fs.chownSync(String(entry.absolute), Number(uid), Number(gid)); + } + catch (chowner) { + er = er || fchowner; + } + } + } + oner(er); + }); + } + [DIRECTORY](entry, done) { + const mode = typeof entry.mode === 'number' ? + entry.mode & 0o7777 + : this.dmode; + const er = this[MKDIR](String(entry.absolute), mode); + if (er) { + this[ONERROR](er, entry); + done(); + return; + } + if (entry.mtime && !this.noMtime) { + try { + fs.utimesSync(String(entry.absolute), entry.atime || new Date(), entry.mtime); + /* c8 ignore next */ + } + catch (er) { } + } + if (this[DOCHOWN](entry)) { + try { + fs.chownSync(String(entry.absolute), Number(this[UID](entry)), Number(this[GID](entry))); + } + catch (er) { } + } + done(); + entry.resume(); + } + [MKDIR](dir, mode) { + try { + return mkdirSync(normalizeWindowsPath(dir), { + uid: this.uid, + gid: this.gid, + processUid: this.processUid, + processGid: this.processGid, + umask: this.processUmask, + preserve: this.preservePaths, + unlink: this.unlink, + cache: this.dirCache, + cwd: this.cwd, + mode: mode, + }); + } + catch (er) { + return er; + } + } + [LINK](entry, linkpath, link, done) { + const ls = `${link}Sync`; + try { + fs[ls](linkpath, String(entry.absolute)); + done(); + entry.resume(); + } + catch (er) { + return this[ONERROR](er, entry); + } + } +} +//# sourceMappingURL=unpack.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/update.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/update.js new file mode 100644 index 00000000000000..21398e9766663d --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/update.js @@ -0,0 +1,30 @@ +// tar -u +import { makeCommand } from './make-command.js'; +import { replace as r } from './replace.js'; +// just call tar.r with the filter and mtimeCache +export const update = makeCommand(r.syncFile, r.asyncFile, r.syncNoFile, r.asyncNoFile, (opt, entries = []) => { + r.validate?.(opt, entries); + mtimeFilter(opt); +}); +const mtimeFilter = (opt) => { + const filter = opt.filter; + if (!opt.mtimeCache) { + opt.mtimeCache = new Map(); + } + opt.filter = + filter ? + (path, stat) => filter(path, stat) && + !( + /* c8 ignore start */ + ((opt.mtimeCache?.get(path) ?? stat.mtime ?? 0) > + (stat.mtime ?? 0)) + /* c8 ignore stop */ + ) + : (path, stat) => !( + /* c8 ignore start */ + ((opt.mtimeCache?.get(path) ?? stat.mtime ?? 0) > + (stat.mtime ?? 0)) + /* c8 ignore stop */ + ); +}; +//# sourceMappingURL=update.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/warn-method.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/warn-method.js new file mode 100644 index 00000000000000..13e798afefc85e --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/warn-method.js @@ -0,0 +1,27 @@ +export const warnMethod = (self, code, message, data = {}) => { + if (self.file) { + data.file = self.file; + } + if (self.cwd) { + data.cwd = self.cwd; + } + data.code = + (message instanceof Error && + message.code) || + code; + data.tarCode = code; + if (!self.strict && data.recoverable !== false) { + if (message instanceof Error) { + data = Object.assign(message, data); + message = message.message; + } + self.emit('warn', code, message, data); + } + else if (message instanceof Error) { + self.emit('error', Object.assign(message, data)); + } + else { + self.emit('error', Object.assign(new Error(`${code}: ${message}`), data)); + } +}; +//# sourceMappingURL=warn-method.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/winchars.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/winchars.js new file mode 100644 index 00000000000000..c41eb86d69a4bb --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/winchars.js @@ -0,0 +1,9 @@ +// When writing files on Windows, translate the characters to their +// 0xf000 higher-encoded versions. +const raw = ['|', '<', '>', '?', ':']; +const win = raw.map(char => String.fromCharCode(0xf000 + char.charCodeAt(0))); +const toWin = new Map(raw.map((char, i) => [char, win[i]])); +const toRaw = new Map(win.map((char, i) => [char, raw[i]])); +export const encode = (s) => raw.reduce((s, c) => s.split(c).join(toWin.get(c)), s); +export const decode = (s) => win.reduce((s, c) => s.split(c).join(toRaw.get(c)), s); +//# sourceMappingURL=winchars.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/write-entry.js b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/write-entry.js new file mode 100644 index 00000000000000..9028cd676b4cd2 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/dist/esm/write-entry.js @@ -0,0 +1,657 @@ +import fs from 'fs'; +import { Minipass } from 'minipass'; +import path from 'path'; +import { Header } from './header.js'; +import { modeFix } from './mode-fix.js'; +import { normalizeWindowsPath } from './normalize-windows-path.js'; +import { dealias, } from './options.js'; +import { Pax } from './pax.js'; +import { stripAbsolutePath } from './strip-absolute-path.js'; +import { stripTrailingSlashes } from './strip-trailing-slashes.js'; +import { warnMethod, } from './warn-method.js'; +import * as winchars from './winchars.js'; +const prefixPath = (path, prefix) => { + if (!prefix) { + return normalizeWindowsPath(path); + } + path = normalizeWindowsPath(path).replace(/^\.(\/|$)/, ''); + return stripTrailingSlashes(prefix) + '/' + path; +}; +const maxReadSize = 16 * 1024 * 1024; +const PROCESS = Symbol('process'); +const FILE = Symbol('file'); +const DIRECTORY = Symbol('directory'); +const SYMLINK = Symbol('symlink'); +const HARDLINK = Symbol('hardlink'); +const HEADER = Symbol('header'); +const READ = Symbol('read'); +const LSTAT = Symbol('lstat'); +const ONLSTAT = Symbol('onlstat'); +const ONREAD = Symbol('onread'); +const ONREADLINK = Symbol('onreadlink'); +const OPENFILE = Symbol('openfile'); +const ONOPENFILE = Symbol('onopenfile'); +const CLOSE = Symbol('close'); +const MODE = Symbol('mode'); +const AWAITDRAIN = Symbol('awaitDrain'); +const ONDRAIN = Symbol('ondrain'); +const PREFIX = Symbol('prefix'); +export class WriteEntry extends Minipass { + path; + portable; + myuid = (process.getuid && process.getuid()) || 0; + // until node has builtin pwnam functions, this'll have to do + myuser = process.env.USER || ''; + maxReadSize; + linkCache; + statCache; + preservePaths; + cwd; + strict; + mtime; + noPax; + noMtime; + prefix; + fd; + blockLen = 0; + blockRemain = 0; + buf; + pos = 0; + remain = 0; + length = 0; + offset = 0; + win32; + absolute; + header; + type; + linkpath; + stat; + onWriteEntry; + #hadError = false; + constructor(p, opt_ = {}) { + const opt = dealias(opt_); + super(); + this.path = normalizeWindowsPath(p); + // suppress atime, ctime, uid, gid, uname, gname + this.portable = !!opt.portable; + this.maxReadSize = opt.maxReadSize || maxReadSize; + this.linkCache = opt.linkCache || new Map(); + this.statCache = opt.statCache || new Map(); + this.preservePaths = !!opt.preservePaths; + this.cwd = normalizeWindowsPath(opt.cwd || process.cwd()); + this.strict = !!opt.strict; + this.noPax = !!opt.noPax; + this.noMtime = !!opt.noMtime; + this.mtime = opt.mtime; + this.prefix = + opt.prefix ? normalizeWindowsPath(opt.prefix) : undefined; + this.onWriteEntry = opt.onWriteEntry; + if (typeof opt.onwarn === 'function') { + this.on('warn', opt.onwarn); + } + let pathWarn = false; + if (!this.preservePaths) { + const [root, stripped] = stripAbsolutePath(this.path); + if (root && typeof stripped === 'string') { + this.path = stripped; + pathWarn = root; + } + } + this.win32 = !!opt.win32 || process.platform === 'win32'; + if (this.win32) { + // force the \ to / normalization, since we might not *actually* + // be on windows, but want \ to be considered a path separator. + this.path = winchars.decode(this.path.replace(/\\/g, '/')); + p = p.replace(/\\/g, '/'); + } + this.absolute = normalizeWindowsPath(opt.absolute || path.resolve(this.cwd, p)); + if (this.path === '') { + this.path = './'; + } + if (pathWarn) { + this.warn('TAR_ENTRY_INFO', `stripping ${pathWarn} from absolute path`, { + entry: this, + path: pathWarn + this.path, + }); + } + const cs = this.statCache.get(this.absolute); + if (cs) { + this[ONLSTAT](cs); + } + else { + this[LSTAT](); + } + } + warn(code, message, data = {}) { + return warnMethod(this, code, message, data); + } + emit(ev, ...data) { + if (ev === 'error') { + this.#hadError = true; + } + return super.emit(ev, ...data); + } + [LSTAT]() { + fs.lstat(this.absolute, (er, stat) => { + if (er) { + return this.emit('error', er); + } + this[ONLSTAT](stat); + }); + } + [ONLSTAT](stat) { + this.statCache.set(this.absolute, stat); + this.stat = stat; + if (!stat.isFile()) { + stat.size = 0; + } + this.type = getType(stat); + this.emit('stat', stat); + this[PROCESS](); + } + [PROCESS]() { + switch (this.type) { + case 'File': + return this[FILE](); + case 'Directory': + return this[DIRECTORY](); + case 'SymbolicLink': + return this[SYMLINK](); + // unsupported types are ignored. + default: + return this.end(); + } + } + [MODE](mode) { + return modeFix(mode, this.type === 'Directory', this.portable); + } + [PREFIX](path) { + return prefixPath(path, this.prefix); + } + [HEADER]() { + /* c8 ignore start */ + if (!this.stat) { + throw new Error('cannot write header before stat'); + } + /* c8 ignore stop */ + if (this.type === 'Directory' && this.portable) { + this.noMtime = true; + } + this.onWriteEntry?.(this); + this.header = new Header({ + path: this[PREFIX](this.path), + // only apply the prefix to hard links. + linkpath: this.type === 'Link' && this.linkpath !== undefined ? + this[PREFIX](this.linkpath) + : this.linkpath, + // only the permissions and setuid/setgid/sticky bitflags + // not the higher-order bits that specify file type + mode: this[MODE](this.stat.mode), + uid: this.portable ? undefined : this.stat.uid, + gid: this.portable ? undefined : this.stat.gid, + size: this.stat.size, + mtime: this.noMtime ? undefined : this.mtime || this.stat.mtime, + /* c8 ignore next */ + type: this.type === 'Unsupported' ? undefined : this.type, + uname: this.portable ? undefined + : this.stat.uid === this.myuid ? this.myuser + : '', + atime: this.portable ? undefined : this.stat.atime, + ctime: this.portable ? undefined : this.stat.ctime, + }); + if (this.header.encode() && !this.noPax) { + super.write(new Pax({ + atime: this.portable ? undefined : this.header.atime, + ctime: this.portable ? undefined : this.header.ctime, + gid: this.portable ? undefined : this.header.gid, + mtime: this.noMtime ? undefined : (this.mtime || this.header.mtime), + path: this[PREFIX](this.path), + linkpath: this.type === 'Link' && this.linkpath !== undefined ? + this[PREFIX](this.linkpath) + : this.linkpath, + size: this.header.size, + uid: this.portable ? undefined : this.header.uid, + uname: this.portable ? undefined : this.header.uname, + dev: this.portable ? undefined : this.stat.dev, + ino: this.portable ? undefined : this.stat.ino, + nlink: this.portable ? undefined : this.stat.nlink, + }).encode()); + } + const block = this.header?.block; + /* c8 ignore start */ + if (!block) { + throw new Error('failed to encode header'); + } + /* c8 ignore stop */ + super.write(block); + } + [DIRECTORY]() { + /* c8 ignore start */ + if (!this.stat) { + throw new Error('cannot create directory entry without stat'); + } + /* c8 ignore stop */ + if (this.path.slice(-1) !== '/') { + this.path += '/'; + } + this.stat.size = 0; + this[HEADER](); + this.end(); + } + [SYMLINK]() { + fs.readlink(this.absolute, (er, linkpath) => { + if (er) { + return this.emit('error', er); + } + this[ONREADLINK](linkpath); + }); + } + [ONREADLINK](linkpath) { + this.linkpath = normalizeWindowsPath(linkpath); + this[HEADER](); + this.end(); + } + [HARDLINK](linkpath) { + /* c8 ignore start */ + if (!this.stat) { + throw new Error('cannot create link entry without stat'); + } + /* c8 ignore stop */ + this.type = 'Link'; + this.linkpath = normalizeWindowsPath(path.relative(this.cwd, linkpath)); + this.stat.size = 0; + this[HEADER](); + this.end(); + } + [FILE]() { + /* c8 ignore start */ + if (!this.stat) { + throw new Error('cannot create file entry without stat'); + } + /* c8 ignore stop */ + if (this.stat.nlink > 1) { + const linkKey = `${this.stat.dev}:${this.stat.ino}`; + const linkpath = this.linkCache.get(linkKey); + if (linkpath?.indexOf(this.cwd) === 0) { + return this[HARDLINK](linkpath); + } + this.linkCache.set(linkKey, this.absolute); + } + this[HEADER](); + if (this.stat.size === 0) { + return this.end(); + } + this[OPENFILE](); + } + [OPENFILE]() { + fs.open(this.absolute, 'r', (er, fd) => { + if (er) { + return this.emit('error', er); + } + this[ONOPENFILE](fd); + }); + } + [ONOPENFILE](fd) { + this.fd = fd; + if (this.#hadError) { + return this[CLOSE](); + } + /* c8 ignore start */ + if (!this.stat) { + throw new Error('should stat before calling onopenfile'); + } + /* c8 ignore start */ + this.blockLen = 512 * Math.ceil(this.stat.size / 512); + this.blockRemain = this.blockLen; + const bufLen = Math.min(this.blockLen, this.maxReadSize); + this.buf = Buffer.allocUnsafe(bufLen); + this.offset = 0; + this.pos = 0; + this.remain = this.stat.size; + this.length = this.buf.length; + this[READ](); + } + [READ]() { + const { fd, buf, offset, length, pos } = this; + if (fd === undefined || buf === undefined) { + throw new Error('cannot read file without first opening'); + } + fs.read(fd, buf, offset, length, pos, (er, bytesRead) => { + if (er) { + // ignoring the error from close(2) is a bad practice, but at + // this point we already have an error, don't need another one + return this[CLOSE](() => this.emit('error', er)); + } + this[ONREAD](bytesRead); + }); + } + /* c8 ignore start */ + [CLOSE](cb = () => { }) { + /* c8 ignore stop */ + if (this.fd !== undefined) + fs.close(this.fd, cb); + } + [ONREAD](bytesRead) { + if (bytesRead <= 0 && this.remain > 0) { + const er = Object.assign(new Error('encountered unexpected EOF'), { + path: this.absolute, + syscall: 'read', + code: 'EOF', + }); + return this[CLOSE](() => this.emit('error', er)); + } + if (bytesRead > this.remain) { + const er = Object.assign(new Error('did not encounter expected EOF'), { + path: this.absolute, + syscall: 'read', + code: 'EOF', + }); + return this[CLOSE](() => this.emit('error', er)); + } + /* c8 ignore start */ + if (!this.buf) { + throw new Error('should have created buffer prior to reading'); + } + /* c8 ignore stop */ + // null out the rest of the buffer, if we could fit the block padding + // at the end of this loop, we've incremented bytesRead and this.remain + // to be incremented up to the blockRemain level, as if we had expected + // to get a null-padded file, and read it until the end. then we will + // decrement both remain and blockRemain by bytesRead, and know that we + // reached the expected EOF, without any null buffer to append. + if (bytesRead === this.remain) { + for (let i = bytesRead; i < this.length && bytesRead < this.blockRemain; i++) { + this.buf[i + this.offset] = 0; + bytesRead++; + this.remain++; + } + } + const chunk = this.offset === 0 && bytesRead === this.buf.length ? + this.buf + : this.buf.subarray(this.offset, this.offset + bytesRead); + const flushed = this.write(chunk); + if (!flushed) { + this[AWAITDRAIN](() => this[ONDRAIN]()); + } + else { + this[ONDRAIN](); + } + } + [AWAITDRAIN](cb) { + this.once('drain', cb); + } + write(chunk, encoding, cb) { + /* c8 ignore start - just junk to comply with NodeJS.WritableStream */ + if (typeof encoding === 'function') { + cb = encoding; + encoding = undefined; + } + if (typeof chunk === 'string') { + chunk = Buffer.from(chunk, typeof encoding === 'string' ? encoding : 'utf8'); + } + /* c8 ignore stop */ + if (this.blockRemain < chunk.length) { + const er = Object.assign(new Error('writing more data than expected'), { + path: this.absolute, + }); + return this.emit('error', er); + } + this.remain -= chunk.length; + this.blockRemain -= chunk.length; + this.pos += chunk.length; + this.offset += chunk.length; + return super.write(chunk, null, cb); + } + [ONDRAIN]() { + if (!this.remain) { + if (this.blockRemain) { + super.write(Buffer.alloc(this.blockRemain)); + } + return this[CLOSE](er => er ? this.emit('error', er) : this.end()); + } + /* c8 ignore start */ + if (!this.buf) { + throw new Error('buffer lost somehow in ONDRAIN'); + } + /* c8 ignore stop */ + if (this.offset >= this.length) { + // if we only have a smaller bit left to read, alloc a smaller buffer + // otherwise, keep it the same length it was before. + this.buf = Buffer.allocUnsafe(Math.min(this.blockRemain, this.buf.length)); + this.offset = 0; + } + this.length = this.buf.length - this.offset; + this[READ](); + } +} +export class WriteEntrySync extends WriteEntry { + sync = true; + [LSTAT]() { + this[ONLSTAT](fs.lstatSync(this.absolute)); + } + [SYMLINK]() { + this[ONREADLINK](fs.readlinkSync(this.absolute)); + } + [OPENFILE]() { + this[ONOPENFILE](fs.openSync(this.absolute, 'r')); + } + [READ]() { + let threw = true; + try { + const { fd, buf, offset, length, pos } = this; + /* c8 ignore start */ + if (fd === undefined || buf === undefined) { + throw new Error('fd and buf must be set in READ method'); + } + /* c8 ignore stop */ + const bytesRead = fs.readSync(fd, buf, offset, length, pos); + this[ONREAD](bytesRead); + threw = false; + } + finally { + // ignoring the error from close(2) is a bad practice, but at + // this point we already have an error, don't need another one + if (threw) { + try { + this[CLOSE](() => { }); + } + catch (er) { } + } + } + } + [AWAITDRAIN](cb) { + cb(); + } + /* c8 ignore start */ + [CLOSE](cb = () => { }) { + /* c8 ignore stop */ + if (this.fd !== undefined) + fs.closeSync(this.fd); + cb(); + } +} +export class WriteEntryTar extends Minipass { + blockLen = 0; + blockRemain = 0; + buf = 0; + pos = 0; + remain = 0; + length = 0; + preservePaths; + portable; + strict; + noPax; + noMtime; + readEntry; + type; + prefix; + path; + mode; + uid; + gid; + uname; + gname; + header; + mtime; + atime; + ctime; + linkpath; + size; + onWriteEntry; + warn(code, message, data = {}) { + return warnMethod(this, code, message, data); + } + constructor(readEntry, opt_ = {}) { + const opt = dealias(opt_); + super(); + this.preservePaths = !!opt.preservePaths; + this.portable = !!opt.portable; + this.strict = !!opt.strict; + this.noPax = !!opt.noPax; + this.noMtime = !!opt.noMtime; + this.onWriteEntry = opt.onWriteEntry; + this.readEntry = readEntry; + const { type } = readEntry; + /* c8 ignore start */ + if (type === 'Unsupported') { + throw new Error('writing entry that should be ignored'); + } + /* c8 ignore stop */ + this.type = type; + if (this.type === 'Directory' && this.portable) { + this.noMtime = true; + } + this.prefix = opt.prefix; + this.path = normalizeWindowsPath(readEntry.path); + this.mode = + readEntry.mode !== undefined ? + this[MODE](readEntry.mode) + : undefined; + this.uid = this.portable ? undefined : readEntry.uid; + this.gid = this.portable ? undefined : readEntry.gid; + this.uname = this.portable ? undefined : readEntry.uname; + this.gname = this.portable ? undefined : readEntry.gname; + this.size = readEntry.size; + this.mtime = + this.noMtime ? undefined : opt.mtime || readEntry.mtime; + this.atime = this.portable ? undefined : readEntry.atime; + this.ctime = this.portable ? undefined : readEntry.ctime; + this.linkpath = + readEntry.linkpath !== undefined ? + normalizeWindowsPath(readEntry.linkpath) + : undefined; + if (typeof opt.onwarn === 'function') { + this.on('warn', opt.onwarn); + } + let pathWarn = false; + if (!this.preservePaths) { + const [root, stripped] = stripAbsolutePath(this.path); + if (root && typeof stripped === 'string') { + this.path = stripped; + pathWarn = root; + } + } + this.remain = readEntry.size; + this.blockRemain = readEntry.startBlockSize; + this.onWriteEntry?.(this); + this.header = new Header({ + path: this[PREFIX](this.path), + linkpath: this.type === 'Link' && this.linkpath !== undefined ? + this[PREFIX](this.linkpath) + : this.linkpath, + // only the permissions and setuid/setgid/sticky bitflags + // not the higher-order bits that specify file type + mode: this.mode, + uid: this.portable ? undefined : this.uid, + gid: this.portable ? undefined : this.gid, + size: this.size, + mtime: this.noMtime ? undefined : this.mtime, + type: this.type, + uname: this.portable ? undefined : this.uname, + atime: this.portable ? undefined : this.atime, + ctime: this.portable ? undefined : this.ctime, + }); + if (pathWarn) { + this.warn('TAR_ENTRY_INFO', `stripping ${pathWarn} from absolute path`, { + entry: this, + path: pathWarn + this.path, + }); + } + if (this.header.encode() && !this.noPax) { + super.write(new Pax({ + atime: this.portable ? undefined : this.atime, + ctime: this.portable ? undefined : this.ctime, + gid: this.portable ? undefined : this.gid, + mtime: this.noMtime ? undefined : this.mtime, + path: this[PREFIX](this.path), + linkpath: this.type === 'Link' && this.linkpath !== undefined ? + this[PREFIX](this.linkpath) + : this.linkpath, + size: this.size, + uid: this.portable ? undefined : this.uid, + uname: this.portable ? undefined : this.uname, + dev: this.portable ? undefined : this.readEntry.dev, + ino: this.portable ? undefined : this.readEntry.ino, + nlink: this.portable ? undefined : this.readEntry.nlink, + }).encode()); + } + const b = this.header?.block; + /* c8 ignore start */ + if (!b) + throw new Error('failed to encode header'); + /* c8 ignore stop */ + super.write(b); + readEntry.pipe(this); + } + [PREFIX](path) { + return prefixPath(path, this.prefix); + } + [MODE](mode) { + return modeFix(mode, this.type === 'Directory', this.portable); + } + write(chunk, encoding, cb) { + /* c8 ignore start - just junk to comply with NodeJS.WritableStream */ + if (typeof encoding === 'function') { + cb = encoding; + encoding = undefined; + } + if (typeof chunk === 'string') { + chunk = Buffer.from(chunk, typeof encoding === 'string' ? encoding : 'utf8'); + } + /* c8 ignore stop */ + const writeLen = chunk.length; + if (writeLen > this.blockRemain) { + throw new Error('writing more to entry than is appropriate'); + } + this.blockRemain -= writeLen; + return super.write(chunk, cb); + } + end(chunk, encoding, cb) { + if (this.blockRemain) { + super.write(Buffer.alloc(this.blockRemain)); + } + /* c8 ignore start - just junk to comply with NodeJS.WritableStream */ + if (typeof chunk === 'function') { + cb = chunk; + encoding = undefined; + chunk = undefined; + } + if (typeof encoding === 'function') { + cb = encoding; + encoding = undefined; + } + if (typeof chunk === 'string') { + chunk = Buffer.from(chunk, encoding ?? 'utf8'); + } + if (cb) + this.once('finish', cb); + chunk ? super.end(chunk, cb) : super.end(cb); + /* c8 ignore stop */ + return this; + } +} +const getType = (stat) => stat.isFile() ? 'File' + : stat.isDirectory() ? 'Directory' + : stat.isSymbolicLink() ? 'SymbolicLink' + : 'Unsupported'; +//# sourceMappingURL=write-entry.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/tar/package.json b/deps/npm/node_modules/node-gyp/node_modules/tar/package.json new file mode 100644 index 00000000000000..0283103ee9eaf9 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/tar/package.json @@ -0,0 +1,325 @@ +{ + "author": "Isaac Z. Schlueter", + "name": "tar", + "description": "tar for node", + "version": "7.4.3", + "repository": { + "type": "git", + "url": "https://github.com/isaacs/node-tar.git" + }, + "scripts": { + "genparse": "node scripts/generate-parse-fixtures.js", + "snap": "tap", + "test": "tap", + "pretest": "npm run prepare", + "presnap": "npm run prepare", + "prepare": "tshy", + "preversion": "npm test", + "postversion": "npm publish", + "prepublishOnly": "git push origin --follow-tags", + "format": "prettier --write . --log-level warn", + "typedoc": "typedoc --tsconfig .tshy/esm.json ./src/*.ts" + }, + "dependencies": { + "@isaacs/fs-minipass": "^4.0.0", + "chownr": "^3.0.0", + "minipass": "^7.1.2", + "minizlib": "^3.0.1", + "mkdirp": "^3.0.1", + "yallist": "^5.0.0" + }, + "devDependencies": { + "chmodr": "^1.2.0", + "end-of-stream": "^1.4.3", + "events-to-array": "^2.0.3", + "mutate-fs": "^2.1.1", + "nock": "^13.5.4", + "prettier": "^3.2.5", + "rimraf": "^5.0.5", + "tap": "^18.7.2", + "tshy": "^1.13.1", + "typedoc": "^0.25.13" + }, + "license": "ISC", + "engines": { + "node": ">=18" + }, + "files": [ + "dist" + ], + "tap": { + "coverage-map": "map.js", + "timeout": 0, + "typecheck": true + }, + "prettier": { + "experimentalTernaries": true, + "semi": false, + "printWidth": 70, + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "jsxSingleQuote": false, + "bracketSameLine": true, + "arrowParens": "avoid", + "endOfLine": "lf" + }, + "tshy": { + "exports": { + "./package.json": "./package.json", + ".": "./src/index.ts", + "./c": "./src/create.ts", + "./create": "./src/create.ts", + "./replace": "./src/create.ts", + "./r": "./src/create.ts", + "./list": "./src/list.ts", + "./t": "./src/list.ts", + "./update": "./src/update.ts", + "./u": "./src/update.ts", + "./extract": "./src/extract.ts", + "./x": "./src/extract.ts", + "./pack": "./src/pack.ts", + "./unpack": "./src/unpack.ts", + "./parse": "./src/parse.ts", + "./read-entry": "./src/read-entry.ts", + "./write-entry": "./src/write-entry.ts", + "./header": "./src/header.ts", + "./pax": "./src/pax.ts", + "./types": "./src/types.ts" + } + }, + "exports": { + "./package.json": "./package.json", + ".": { + "import": { + "source": "./src/index.ts", + "types": "./dist/esm/index.d.ts", + "default": "./dist/esm/index.js" + }, + "require": { + "source": "./src/index.ts", + "types": "./dist/commonjs/index.d.ts", + "default": "./dist/commonjs/index.js" + } + }, + "./c": { + "import": { + "source": "./src/create.ts", + "types": "./dist/esm/create.d.ts", + "default": "./dist/esm/create.js" + }, + "require": { + "source": "./src/create.ts", + "types": "./dist/commonjs/create.d.ts", + "default": "./dist/commonjs/create.js" + } + }, + "./create": { + "import": { + "source": "./src/create.ts", + "types": "./dist/esm/create.d.ts", + "default": "./dist/esm/create.js" + }, + "require": { + "source": "./src/create.ts", + "types": "./dist/commonjs/create.d.ts", + "default": "./dist/commonjs/create.js" + } + }, + "./replace": { + "import": { + "source": "./src/create.ts", + "types": "./dist/esm/create.d.ts", + "default": "./dist/esm/create.js" + }, + "require": { + "source": "./src/create.ts", + "types": "./dist/commonjs/create.d.ts", + "default": "./dist/commonjs/create.js" + } + }, + "./r": { + "import": { + "source": "./src/create.ts", + "types": "./dist/esm/create.d.ts", + "default": "./dist/esm/create.js" + }, + "require": { + "source": "./src/create.ts", + "types": "./dist/commonjs/create.d.ts", + "default": "./dist/commonjs/create.js" + } + }, + "./list": { + "import": { + "source": "./src/list.ts", + "types": "./dist/esm/list.d.ts", + "default": "./dist/esm/list.js" + }, + "require": { + "source": "./src/list.ts", + "types": "./dist/commonjs/list.d.ts", + "default": "./dist/commonjs/list.js" + } + }, + "./t": { + "import": { + "source": "./src/list.ts", + "types": "./dist/esm/list.d.ts", + "default": "./dist/esm/list.js" + }, + "require": { + "source": "./src/list.ts", + "types": "./dist/commonjs/list.d.ts", + "default": "./dist/commonjs/list.js" + } + }, + "./update": { + "import": { + "source": "./src/update.ts", + "types": "./dist/esm/update.d.ts", + "default": "./dist/esm/update.js" + }, + "require": { + "source": "./src/update.ts", + "types": "./dist/commonjs/update.d.ts", + "default": "./dist/commonjs/update.js" + } + }, + "./u": { + "import": { + "source": "./src/update.ts", + "types": "./dist/esm/update.d.ts", + "default": "./dist/esm/update.js" + }, + "require": { + "source": "./src/update.ts", + "types": "./dist/commonjs/update.d.ts", + "default": "./dist/commonjs/update.js" + } + }, + "./extract": { + "import": { + "source": "./src/extract.ts", + "types": "./dist/esm/extract.d.ts", + "default": "./dist/esm/extract.js" + }, + "require": { + "source": "./src/extract.ts", + "types": "./dist/commonjs/extract.d.ts", + "default": "./dist/commonjs/extract.js" + } + }, + "./x": { + "import": { + "source": "./src/extract.ts", + "types": "./dist/esm/extract.d.ts", + "default": "./dist/esm/extract.js" + }, + "require": { + "source": "./src/extract.ts", + "types": "./dist/commonjs/extract.d.ts", + "default": "./dist/commonjs/extract.js" + } + }, + "./pack": { + "import": { + "source": "./src/pack.ts", + "types": "./dist/esm/pack.d.ts", + "default": "./dist/esm/pack.js" + }, + "require": { + "source": "./src/pack.ts", + "types": "./dist/commonjs/pack.d.ts", + "default": "./dist/commonjs/pack.js" + } + }, + "./unpack": { + "import": { + "source": "./src/unpack.ts", + "types": "./dist/esm/unpack.d.ts", + "default": "./dist/esm/unpack.js" + }, + "require": { + "source": "./src/unpack.ts", + "types": "./dist/commonjs/unpack.d.ts", + "default": "./dist/commonjs/unpack.js" + } + }, + "./parse": { + "import": { + "source": "./src/parse.ts", + "types": "./dist/esm/parse.d.ts", + "default": "./dist/esm/parse.js" + }, + "require": { + "source": "./src/parse.ts", + "types": "./dist/commonjs/parse.d.ts", + "default": "./dist/commonjs/parse.js" + } + }, + "./read-entry": { + "import": { + "source": "./src/read-entry.ts", + "types": "./dist/esm/read-entry.d.ts", + "default": "./dist/esm/read-entry.js" + }, + "require": { + "source": "./src/read-entry.ts", + "types": "./dist/commonjs/read-entry.d.ts", + "default": "./dist/commonjs/read-entry.js" + } + }, + "./write-entry": { + "import": { + "source": "./src/write-entry.ts", + "types": "./dist/esm/write-entry.d.ts", + "default": "./dist/esm/write-entry.js" + }, + "require": { + "source": "./src/write-entry.ts", + "types": "./dist/commonjs/write-entry.d.ts", + "default": "./dist/commonjs/write-entry.js" + } + }, + "./header": { + "import": { + "source": "./src/header.ts", + "types": "./dist/esm/header.d.ts", + "default": "./dist/esm/header.js" + }, + "require": { + "source": "./src/header.ts", + "types": "./dist/commonjs/header.d.ts", + "default": "./dist/commonjs/header.js" + } + }, + "./pax": { + "import": { + "source": "./src/pax.ts", + "types": "./dist/esm/pax.d.ts", + "default": "./dist/esm/pax.js" + }, + "require": { + "source": "./src/pax.ts", + "types": "./dist/commonjs/pax.d.ts", + "default": "./dist/commonjs/pax.js" + } + }, + "./types": { + "import": { + "source": "./src/types.ts", + "types": "./dist/esm/types.d.ts", + "default": "./dist/esm/types.js" + }, + "require": { + "source": "./src/types.ts", + "types": "./dist/commonjs/types.d.ts", + "default": "./dist/commonjs/types.js" + } + } + }, + "type": "module", + "main": "./dist/commonjs/index.js", + "types": "./dist/commonjs/index.d.ts" +} diff --git a/deps/npm/node_modules/node-gyp/node_modules/unique-filename/LICENSE b/deps/npm/node_modules/node-gyp/node_modules/unique-filename/LICENSE deleted file mode 100644 index 69619c125ea7ef..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/unique-filename/LICENSE +++ /dev/null @@ -1,5 +0,0 @@ -Copyright npm, Inc - -Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/node-gyp/node_modules/unique-filename/lib/index.js b/deps/npm/node_modules/node-gyp/node_modules/unique-filename/lib/index.js deleted file mode 100644 index d067d2e709809a..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/unique-filename/lib/index.js +++ /dev/null @@ -1,7 +0,0 @@ -var path = require('path') - -var uniqueSlug = require('unique-slug') - -module.exports = function (filepath, prefix, uniq) { - return path.join(filepath, (prefix ? prefix + '-' : '') + uniqueSlug(uniq)) -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/unique-filename/package.json b/deps/npm/node_modules/node-gyp/node_modules/unique-filename/package.json deleted file mode 100644 index b2fbf0666489a6..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/unique-filename/package.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "name": "unique-filename", - "version": "3.0.0", - "description": "Generate a unique filename for use in temporary directories or caches.", - "main": "lib/index.js", - "scripts": { - "test": "tap", - "lint": "eslint \"**/*.js\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap", - "posttest": "npm run lint" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/unique-filename.git" - }, - "keywords": [], - "author": "GitHub Inc.", - "license": "ISC", - "bugs": { - "url": "https://github.com/iarna/unique-filename/issues" - }, - "homepage": "https://github.com/iarna/unique-filename", - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.5.1", - "tap": "^16.3.0" - }, - "dependencies": { - "unique-slug": "^4.0.0" - }, - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.5.1" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/unique-slug/LICENSE b/deps/npm/node_modules/node-gyp/node_modules/unique-slug/LICENSE deleted file mode 100644 index 7953647e7760b8..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/unique-slug/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -The ISC License - -Copyright npm, Inc - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/node-gyp/node_modules/unique-slug/lib/index.js b/deps/npm/node_modules/node-gyp/node_modules/unique-slug/lib/index.js deleted file mode 100644 index 1bac84d95d7307..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/unique-slug/lib/index.js +++ /dev/null @@ -1,11 +0,0 @@ -'use strict' -var MurmurHash3 = require('imurmurhash') - -module.exports = function (uniq) { - if (uniq) { - var hash = new MurmurHash3(uniq) - return ('00000000' + hash.result().toString(16)).slice(-8) - } else { - return (Math.random().toString(16) + '0000000').slice(2, 10) - } -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/unique-slug/package.json b/deps/npm/node_modules/node-gyp/node_modules/unique-slug/package.json deleted file mode 100644 index 33732cdbb42859..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/unique-slug/package.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "name": "unique-slug", - "version": "4.0.0", - "description": "Generate a unique character string suitible for use in files and URLs.", - "main": "lib/index.js", - "scripts": { - "test": "tap", - "lint": "eslint \"**/*.js\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap", - "posttest": "npm run lint" - }, - "keywords": [], - "author": "GitHub Inc.", - "license": "ISC", - "devDependencies": { - "@npmcli/eslint-config": "^3.1.0", - "@npmcli/template-oss": "4.5.1", - "tap": "^16.3.0" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/unique-slug.git" - }, - "dependencies": { - "imurmurhash": "^0.1.4" - }, - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.5.1" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/which/LICENSE b/deps/npm/node_modules/node-gyp/node_modules/which/LICENSE deleted file mode 100644 index 19129e315fe593..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/which/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -The ISC License - -Copyright (c) Isaac Z. Schlueter and Contributors - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/node-gyp/node_modules/which/README.md b/deps/npm/node_modules/node-gyp/node_modules/which/README.md deleted file mode 100644 index 942bf1e0932b89..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/which/README.md +++ /dev/null @@ -1,51 +0,0 @@ -# which - -Like the unix `which` utility. - -Finds the first instance of a specified executable in the PATH -environment variable. Does not cache the results, so `hash -r` is not -needed when the PATH changes. - -## USAGE - -```javascript -const which = require('which') - -// async usage -// rejects if not found -const resolved = await which('node') - -// if nothrow option is used, returns null if not found -const resolvedOrNull = await which('node', { nothrow: true }) - -// sync usage -// throws if not found -const resolved = which.sync('node') - -// if nothrow option is used, returns null if not found -const resolvedOrNull = which.sync('node', { nothrow: true }) - -// Pass options to override the PATH and PATHEXT environment vars. -await which('node', { path: someOtherPath, pathExt: somePathExt }) -``` - -## CLI USAGE - -Just like the BSD `which(1)` binary but using `node-which`. - -``` -usage: node-which [-as] program ... -``` - -You can learn more about why the binary is `node-which` and not `which` -[here](https://github.com/npm/node-which/pull/67) - -## OPTIONS - -You may pass an options object as the second argument. - -- `path`: Use instead of the `PATH` environment variable. -- `pathExt`: Use instead of the `PATHEXT` environment variable. -- `all`: Return all matches, instead of just the first one. Note that - this means the function returns an array of strings instead of a - single string. diff --git a/deps/npm/node_modules/node-gyp/node_modules/which/bin/which.js b/deps/npm/node_modules/node-gyp/node_modules/which/bin/which.js deleted file mode 100755 index 6df16f21acf930..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/which/bin/which.js +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env node - -const which = require('../lib') -const argv = process.argv.slice(2) - -const usage = (err) => { - if (err) { - console.error(`which: ${err}`) - } - console.error('usage: which [-as] program ...') - process.exit(1) -} - -if (!argv.length) { - return usage() -} - -let dashdash = false -const [commands, flags] = argv.reduce((acc, arg) => { - if (dashdash || arg === '--') { - dashdash = true - return acc - } - - if (!/^-/.test(arg)) { - acc[0].push(arg) - return acc - } - - for (const flag of arg.slice(1).split('')) { - if (flag === 's') { - acc[1].silent = true - } else if (flag === 'a') { - acc[1].all = true - } else { - usage(`illegal option -- ${flag}`) - } - } - - return acc -}, [[], {}]) - -for (const command of commands) { - try { - const res = which.sync(command, { all: flags.all }) - if (!flags.silent) { - console.log([].concat(res).join('\n')) - } - } catch (err) { - process.exitCode = 1 - } -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/which/lib/index.js b/deps/npm/node_modules/node-gyp/node_modules/which/lib/index.js deleted file mode 100644 index 2fd358baf888fd..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/which/lib/index.js +++ /dev/null @@ -1,111 +0,0 @@ -const { isexe, sync: isexeSync } = require('isexe') -const { join, delimiter, sep, posix } = require('path') - -const isWindows = process.platform === 'win32' - -// used to check for slashed in commands passed in. always checks for the posix -// seperator on all platforms, and checks for the current separator when not on -// a posix platform. don't use the isWindows check for this since that is mocked -// in tests but we still need the code to actually work when called. that is also -// why it is ignored from coverage. -/* istanbul ignore next */ -const rSlash = new RegExp(`[${posix.sep}${sep === posix.sep ? '' : sep}]`.replace(/(\\)/g, '\\$1')) -const rRel = new RegExp(`^\\.${rSlash.source}`) - -const getNotFoundError = (cmd) => - Object.assign(new Error(`not found: ${cmd}`), { code: 'ENOENT' }) - -const getPathInfo = (cmd, { - path: optPath = process.env.PATH, - pathExt: optPathExt = process.env.PATHEXT, - delimiter: optDelimiter = delimiter, -}) => { - // If it has a slash, then we don't bother searching the pathenv. - // just check the file itself, and that's it. - const pathEnv = cmd.match(rSlash) ? [''] : [ - // windows always checks the cwd first - ...(isWindows ? [process.cwd()] : []), - ...(optPath || /* istanbul ignore next: very unusual */ '').split(optDelimiter), - ] - - if (isWindows) { - const pathExtExe = optPathExt || - ['.EXE', '.CMD', '.BAT', '.COM'].join(optDelimiter) - const pathExt = pathExtExe.split(optDelimiter).flatMap((item) => [item, item.toLowerCase()]) - if (cmd.includes('.') && pathExt[0] !== '') { - pathExt.unshift('') - } - return { pathEnv, pathExt, pathExtExe } - } - - return { pathEnv, pathExt: [''] } -} - -const getPathPart = (raw, cmd) => { - const pathPart = /^".*"$/.test(raw) ? raw.slice(1, -1) : raw - const prefix = !pathPart && rRel.test(cmd) ? cmd.slice(0, 2) : '' - return prefix + join(pathPart, cmd) -} - -const which = async (cmd, opt = {}) => { - const { pathEnv, pathExt, pathExtExe } = getPathInfo(cmd, opt) - const found = [] - - for (const envPart of pathEnv) { - const p = getPathPart(envPart, cmd) - - for (const ext of pathExt) { - const withExt = p + ext - const is = await isexe(withExt, { pathExt: pathExtExe, ignoreErrors: true }) - if (is) { - if (!opt.all) { - return withExt - } - found.push(withExt) - } - } - } - - if (opt.all && found.length) { - return found - } - - if (opt.nothrow) { - return null - } - - throw getNotFoundError(cmd) -} - -const whichSync = (cmd, opt = {}) => { - const { pathEnv, pathExt, pathExtExe } = getPathInfo(cmd, opt) - const found = [] - - for (const pathEnvPart of pathEnv) { - const p = getPathPart(pathEnvPart, cmd) - - for (const ext of pathExt) { - const withExt = p + ext - const is = isexeSync(withExt, { pathExt: pathExtExe, ignoreErrors: true }) - if (is) { - if (!opt.all) { - return withExt - } - found.push(withExt) - } - } - } - - if (opt.all && found.length) { - return found - } - - if (opt.nothrow) { - return null - } - - throw getNotFoundError(cmd) -} - -module.exports = which -which.sync = whichSync diff --git a/deps/npm/node_modules/node-gyp/node_modules/which/package.json b/deps/npm/node_modules/node-gyp/node_modules/which/package.json deleted file mode 100644 index 515bfb22ca0e1e..00000000000000 --- a/deps/npm/node_modules/node-gyp/node_modules/which/package.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "author": "GitHub Inc.", - "name": "which", - "description": "Like which(1) unix command. Find the first instance of an executable in the PATH.", - "version": "4.0.0", - "repository": { - "type": "git", - "url": "https://github.com/npm/node-which.git" - }, - "main": "lib/index.js", - "bin": { - "node-which": "./bin/which.js" - }, - "license": "ISC", - "dependencies": { - "isexe": "^3.1.1" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.18.0", - "tap": "^16.3.0" - }, - "scripts": { - "test": "tap", - "lint": "eslint \"**/*.js\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap", - "posttest": "npm run lint" - }, - "files": [ - "bin/", - "lib/" - ], - "tap": { - "check-coverage": true, - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "engines": { - "node": "^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "ciVersions": [ - "16.13.0", - "16.x", - "18.0.0", - "18.x" - ], - "version": "4.18.0", - "publish": "true" - } -} diff --git a/deps/npm/node_modules/node-gyp/node_modules/yallist/LICENSE.md b/deps/npm/node_modules/node-gyp/node_modules/yallist/LICENSE.md new file mode 100644 index 00000000000000..881248b6d7f0ca --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/yallist/LICENSE.md @@ -0,0 +1,63 @@ +All packages under `src/` are licensed according to the terms in +their respective `LICENSE` or `LICENSE.md` files. + +The remainder of this project is licensed under the Blue Oak +Model License, as follows: + +----- + +# Blue Oak Model License + +Version 1.0.0 + +## Purpose + +This license gives everyone as much permission to work with +this software as possible, while protecting contributors +from liability. + +## Acceptance + +In order to receive this license, you must agree to its +rules. The rules of this license are both obligations +under that agreement and conditions to your license. +You must not do anything with this software that triggers +a rule that you cannot or will not follow. + +## Copyright + +Each contributor licenses you to do everything with this +software that would otherwise infringe that contributor's +copyright in it. + +## Notices + +You must ensure that everyone who gets a copy of +any part of this software from you, with or without +changes, also gets the text of this license or a link to +. + +## Excuse + +If anyone notifies you in writing that you have not +complied with [Notices](#notices), you can keep your +license by taking all practical steps to comply within 30 +days after the notice. If you do not do so, your license +ends immediately. + +## Patent + +Each contributor licenses you to do everything with this +software that would otherwise infringe any patent claims +they can license or become able to license. + +## Reliability + +No contributor can revoke this license. + +## No Liability + +***As far as the law allows, this software comes as is, +without any warranty or condition, and no contributor +will be liable to anyone for any damages related to this +software or this license, under any kind of legal claim.*** diff --git a/deps/npm/node_modules/node-gyp/node_modules/yallist/dist/commonjs/index.js b/deps/npm/node_modules/node-gyp/node_modules/yallist/dist/commonjs/index.js new file mode 100644 index 00000000000000..c1e1e4741689d9 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/yallist/dist/commonjs/index.js @@ -0,0 +1,384 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.Node = exports.Yallist = void 0; +class Yallist { + tail; + head; + length = 0; + static create(list = []) { + return new Yallist(list); + } + constructor(list = []) { + for (const item of list) { + this.push(item); + } + } + *[Symbol.iterator]() { + for (let walker = this.head; walker; walker = walker.next) { + yield walker.value; + } + } + removeNode(node) { + if (node.list !== this) { + throw new Error('removing node which does not belong to this list'); + } + const next = node.next; + const prev = node.prev; + if (next) { + next.prev = prev; + } + if (prev) { + prev.next = next; + } + if (node === this.head) { + this.head = next; + } + if (node === this.tail) { + this.tail = prev; + } + this.length--; + node.next = undefined; + node.prev = undefined; + node.list = undefined; + return next; + } + unshiftNode(node) { + if (node === this.head) { + return; + } + if (node.list) { + node.list.removeNode(node); + } + const head = this.head; + node.list = this; + node.next = head; + if (head) { + head.prev = node; + } + this.head = node; + if (!this.tail) { + this.tail = node; + } + this.length++; + } + pushNode(node) { + if (node === this.tail) { + return; + } + if (node.list) { + node.list.removeNode(node); + } + const tail = this.tail; + node.list = this; + node.prev = tail; + if (tail) { + tail.next = node; + } + this.tail = node; + if (!this.head) { + this.head = node; + } + this.length++; + } + push(...args) { + for (let i = 0, l = args.length; i < l; i++) { + push(this, args[i]); + } + return this.length; + } + unshift(...args) { + for (var i = 0, l = args.length; i < l; i++) { + unshift(this, args[i]); + } + return this.length; + } + pop() { + if (!this.tail) { + return undefined; + } + const res = this.tail.value; + const t = this.tail; + this.tail = this.tail.prev; + if (this.tail) { + this.tail.next = undefined; + } + else { + this.head = undefined; + } + t.list = undefined; + this.length--; + return res; + } + shift() { + if (!this.head) { + return undefined; + } + const res = this.head.value; + const h = this.head; + this.head = this.head.next; + if (this.head) { + this.head.prev = undefined; + } + else { + this.tail = undefined; + } + h.list = undefined; + this.length--; + return res; + } + forEach(fn, thisp) { + thisp = thisp || this; + for (let walker = this.head, i = 0; !!walker; i++) { + fn.call(thisp, walker.value, i, this); + walker = walker.next; + } + } + forEachReverse(fn, thisp) { + thisp = thisp || this; + for (let walker = this.tail, i = this.length - 1; !!walker; i--) { + fn.call(thisp, walker.value, i, this); + walker = walker.prev; + } + } + get(n) { + let i = 0; + let walker = this.head; + for (; !!walker && i < n; i++) { + walker = walker.next; + } + if (i === n && !!walker) { + return walker.value; + } + } + getReverse(n) { + let i = 0; + let walker = this.tail; + for (; !!walker && i < n; i++) { + // abort out of the list early if we hit a cycle + walker = walker.prev; + } + if (i === n && !!walker) { + return walker.value; + } + } + map(fn, thisp) { + thisp = thisp || this; + const res = new Yallist(); + for (let walker = this.head; !!walker;) { + res.push(fn.call(thisp, walker.value, this)); + walker = walker.next; + } + return res; + } + mapReverse(fn, thisp) { + thisp = thisp || this; + var res = new Yallist(); + for (let walker = this.tail; !!walker;) { + res.push(fn.call(thisp, walker.value, this)); + walker = walker.prev; + } + return res; + } + reduce(fn, initial) { + let acc; + let walker = this.head; + if (arguments.length > 1) { + acc = initial; + } + else if (this.head) { + walker = this.head.next; + acc = this.head.value; + } + else { + throw new TypeError('Reduce of empty list with no initial value'); + } + for (var i = 0; !!walker; i++) { + acc = fn(acc, walker.value, i); + walker = walker.next; + } + return acc; + } + reduceReverse(fn, initial) { + let acc; + let walker = this.tail; + if (arguments.length > 1) { + acc = initial; + } + else if (this.tail) { + walker = this.tail.prev; + acc = this.tail.value; + } + else { + throw new TypeError('Reduce of empty list with no initial value'); + } + for (let i = this.length - 1; !!walker; i--) { + acc = fn(acc, walker.value, i); + walker = walker.prev; + } + return acc; + } + toArray() { + const arr = new Array(this.length); + for (let i = 0, walker = this.head; !!walker; i++) { + arr[i] = walker.value; + walker = walker.next; + } + return arr; + } + toArrayReverse() { + const arr = new Array(this.length); + for (let i = 0, walker = this.tail; !!walker; i++) { + arr[i] = walker.value; + walker = walker.prev; + } + return arr; + } + slice(from = 0, to = this.length) { + if (to < 0) { + to += this.length; + } + if (from < 0) { + from += this.length; + } + const ret = new Yallist(); + if (to < from || to < 0) { + return ret; + } + if (from < 0) { + from = 0; + } + if (to > this.length) { + to = this.length; + } + let walker = this.head; + let i = 0; + for (i = 0; !!walker && i < from; i++) { + walker = walker.next; + } + for (; !!walker && i < to; i++, walker = walker.next) { + ret.push(walker.value); + } + return ret; + } + sliceReverse(from = 0, to = this.length) { + if (to < 0) { + to += this.length; + } + if (from < 0) { + from += this.length; + } + const ret = new Yallist(); + if (to < from || to < 0) { + return ret; + } + if (from < 0) { + from = 0; + } + if (to > this.length) { + to = this.length; + } + let i = this.length; + let walker = this.tail; + for (; !!walker && i > to; i--) { + walker = walker.prev; + } + for (; !!walker && i > from; i--, walker = walker.prev) { + ret.push(walker.value); + } + return ret; + } + splice(start, deleteCount = 0, ...nodes) { + if (start > this.length) { + start = this.length - 1; + } + if (start < 0) { + start = this.length + start; + } + let walker = this.head; + for (let i = 0; !!walker && i < start; i++) { + walker = walker.next; + } + const ret = []; + for (let i = 0; !!walker && i < deleteCount; i++) { + ret.push(walker.value); + walker = this.removeNode(walker); + } + if (!walker) { + walker = this.tail; + } + else if (walker !== this.tail) { + walker = walker.prev; + } + for (const v of nodes) { + walker = insertAfter(this, walker, v); + } + return ret; + } + reverse() { + const head = this.head; + const tail = this.tail; + for (let walker = head; !!walker; walker = walker.prev) { + const p = walker.prev; + walker.prev = walker.next; + walker.next = p; + } + this.head = tail; + this.tail = head; + return this; + } +} +exports.Yallist = Yallist; +// insertAfter undefined means "make the node the new head of list" +function insertAfter(self, node, value) { + const prev = node; + const next = node ? node.next : self.head; + const inserted = new Node(value, prev, next, self); + if (inserted.next === undefined) { + self.tail = inserted; + } + if (inserted.prev === undefined) { + self.head = inserted; + } + self.length++; + return inserted; +} +function push(self, item) { + self.tail = new Node(item, self.tail, undefined, self); + if (!self.head) { + self.head = self.tail; + } + self.length++; +} +function unshift(self, item) { + self.head = new Node(item, undefined, self.head, self); + if (!self.tail) { + self.tail = self.head; + } + self.length++; +} +class Node { + list; + next; + prev; + value; + constructor(value, prev, next, list) { + this.list = list; + this.value = value; + if (prev) { + prev.next = this; + this.prev = prev; + } + else { + this.prev = undefined; + } + if (next) { + next.prev = this; + this.next = next; + } + else { + this.next = undefined; + } + } +} +exports.Node = Node; +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/yallist/dist/commonjs/package.json b/deps/npm/node_modules/node-gyp/node_modules/yallist/dist/commonjs/package.json new file mode 100644 index 00000000000000..5bbefffbabee39 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/yallist/dist/commonjs/package.json @@ -0,0 +1,3 @@ +{ + "type": "commonjs" +} diff --git a/deps/npm/node_modules/node-gyp/node_modules/yallist/dist/esm/index.js b/deps/npm/node_modules/node-gyp/node_modules/yallist/dist/esm/index.js new file mode 100644 index 00000000000000..3d81c5113b93a8 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/yallist/dist/esm/index.js @@ -0,0 +1,379 @@ +export class Yallist { + tail; + head; + length = 0; + static create(list = []) { + return new Yallist(list); + } + constructor(list = []) { + for (const item of list) { + this.push(item); + } + } + *[Symbol.iterator]() { + for (let walker = this.head; walker; walker = walker.next) { + yield walker.value; + } + } + removeNode(node) { + if (node.list !== this) { + throw new Error('removing node which does not belong to this list'); + } + const next = node.next; + const prev = node.prev; + if (next) { + next.prev = prev; + } + if (prev) { + prev.next = next; + } + if (node === this.head) { + this.head = next; + } + if (node === this.tail) { + this.tail = prev; + } + this.length--; + node.next = undefined; + node.prev = undefined; + node.list = undefined; + return next; + } + unshiftNode(node) { + if (node === this.head) { + return; + } + if (node.list) { + node.list.removeNode(node); + } + const head = this.head; + node.list = this; + node.next = head; + if (head) { + head.prev = node; + } + this.head = node; + if (!this.tail) { + this.tail = node; + } + this.length++; + } + pushNode(node) { + if (node === this.tail) { + return; + } + if (node.list) { + node.list.removeNode(node); + } + const tail = this.tail; + node.list = this; + node.prev = tail; + if (tail) { + tail.next = node; + } + this.tail = node; + if (!this.head) { + this.head = node; + } + this.length++; + } + push(...args) { + for (let i = 0, l = args.length; i < l; i++) { + push(this, args[i]); + } + return this.length; + } + unshift(...args) { + for (var i = 0, l = args.length; i < l; i++) { + unshift(this, args[i]); + } + return this.length; + } + pop() { + if (!this.tail) { + return undefined; + } + const res = this.tail.value; + const t = this.tail; + this.tail = this.tail.prev; + if (this.tail) { + this.tail.next = undefined; + } + else { + this.head = undefined; + } + t.list = undefined; + this.length--; + return res; + } + shift() { + if (!this.head) { + return undefined; + } + const res = this.head.value; + const h = this.head; + this.head = this.head.next; + if (this.head) { + this.head.prev = undefined; + } + else { + this.tail = undefined; + } + h.list = undefined; + this.length--; + return res; + } + forEach(fn, thisp) { + thisp = thisp || this; + for (let walker = this.head, i = 0; !!walker; i++) { + fn.call(thisp, walker.value, i, this); + walker = walker.next; + } + } + forEachReverse(fn, thisp) { + thisp = thisp || this; + for (let walker = this.tail, i = this.length - 1; !!walker; i--) { + fn.call(thisp, walker.value, i, this); + walker = walker.prev; + } + } + get(n) { + let i = 0; + let walker = this.head; + for (; !!walker && i < n; i++) { + walker = walker.next; + } + if (i === n && !!walker) { + return walker.value; + } + } + getReverse(n) { + let i = 0; + let walker = this.tail; + for (; !!walker && i < n; i++) { + // abort out of the list early if we hit a cycle + walker = walker.prev; + } + if (i === n && !!walker) { + return walker.value; + } + } + map(fn, thisp) { + thisp = thisp || this; + const res = new Yallist(); + for (let walker = this.head; !!walker;) { + res.push(fn.call(thisp, walker.value, this)); + walker = walker.next; + } + return res; + } + mapReverse(fn, thisp) { + thisp = thisp || this; + var res = new Yallist(); + for (let walker = this.tail; !!walker;) { + res.push(fn.call(thisp, walker.value, this)); + walker = walker.prev; + } + return res; + } + reduce(fn, initial) { + let acc; + let walker = this.head; + if (arguments.length > 1) { + acc = initial; + } + else if (this.head) { + walker = this.head.next; + acc = this.head.value; + } + else { + throw new TypeError('Reduce of empty list with no initial value'); + } + for (var i = 0; !!walker; i++) { + acc = fn(acc, walker.value, i); + walker = walker.next; + } + return acc; + } + reduceReverse(fn, initial) { + let acc; + let walker = this.tail; + if (arguments.length > 1) { + acc = initial; + } + else if (this.tail) { + walker = this.tail.prev; + acc = this.tail.value; + } + else { + throw new TypeError('Reduce of empty list with no initial value'); + } + for (let i = this.length - 1; !!walker; i--) { + acc = fn(acc, walker.value, i); + walker = walker.prev; + } + return acc; + } + toArray() { + const arr = new Array(this.length); + for (let i = 0, walker = this.head; !!walker; i++) { + arr[i] = walker.value; + walker = walker.next; + } + return arr; + } + toArrayReverse() { + const arr = new Array(this.length); + for (let i = 0, walker = this.tail; !!walker; i++) { + arr[i] = walker.value; + walker = walker.prev; + } + return arr; + } + slice(from = 0, to = this.length) { + if (to < 0) { + to += this.length; + } + if (from < 0) { + from += this.length; + } + const ret = new Yallist(); + if (to < from || to < 0) { + return ret; + } + if (from < 0) { + from = 0; + } + if (to > this.length) { + to = this.length; + } + let walker = this.head; + let i = 0; + for (i = 0; !!walker && i < from; i++) { + walker = walker.next; + } + for (; !!walker && i < to; i++, walker = walker.next) { + ret.push(walker.value); + } + return ret; + } + sliceReverse(from = 0, to = this.length) { + if (to < 0) { + to += this.length; + } + if (from < 0) { + from += this.length; + } + const ret = new Yallist(); + if (to < from || to < 0) { + return ret; + } + if (from < 0) { + from = 0; + } + if (to > this.length) { + to = this.length; + } + let i = this.length; + let walker = this.tail; + for (; !!walker && i > to; i--) { + walker = walker.prev; + } + for (; !!walker && i > from; i--, walker = walker.prev) { + ret.push(walker.value); + } + return ret; + } + splice(start, deleteCount = 0, ...nodes) { + if (start > this.length) { + start = this.length - 1; + } + if (start < 0) { + start = this.length + start; + } + let walker = this.head; + for (let i = 0; !!walker && i < start; i++) { + walker = walker.next; + } + const ret = []; + for (let i = 0; !!walker && i < deleteCount; i++) { + ret.push(walker.value); + walker = this.removeNode(walker); + } + if (!walker) { + walker = this.tail; + } + else if (walker !== this.tail) { + walker = walker.prev; + } + for (const v of nodes) { + walker = insertAfter(this, walker, v); + } + return ret; + } + reverse() { + const head = this.head; + const tail = this.tail; + for (let walker = head; !!walker; walker = walker.prev) { + const p = walker.prev; + walker.prev = walker.next; + walker.next = p; + } + this.head = tail; + this.tail = head; + return this; + } +} +// insertAfter undefined means "make the node the new head of list" +function insertAfter(self, node, value) { + const prev = node; + const next = node ? node.next : self.head; + const inserted = new Node(value, prev, next, self); + if (inserted.next === undefined) { + self.tail = inserted; + } + if (inserted.prev === undefined) { + self.head = inserted; + } + self.length++; + return inserted; +} +function push(self, item) { + self.tail = new Node(item, self.tail, undefined, self); + if (!self.head) { + self.head = self.tail; + } + self.length++; +} +function unshift(self, item) { + self.head = new Node(item, undefined, self.head, self); + if (!self.tail) { + self.tail = self.head; + } + self.length++; +} +export class Node { + list; + next; + prev; + value; + constructor(value, prev, next, list) { + this.list = list; + this.value = value; + if (prev) { + prev.next = this; + this.prev = prev; + } + else { + this.prev = undefined; + } + if (next) { + next.prev = this; + this.next = next; + } + else { + this.next = undefined; + } + } +} +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/node_modules/yallist/dist/esm/package.json b/deps/npm/node_modules/node-gyp/node_modules/yallist/dist/esm/package.json new file mode 100644 index 00000000000000..3dbc1ca591c055 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/yallist/dist/esm/package.json @@ -0,0 +1,3 @@ +{ + "type": "module" +} diff --git a/deps/npm/node_modules/node-gyp/node_modules/yallist/package.json b/deps/npm/node_modules/node-gyp/node_modules/yallist/package.json new file mode 100644 index 00000000000000..2f5247808bbea8 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/yallist/package.json @@ -0,0 +1,68 @@ +{ + "name": "yallist", + "version": "5.0.0", + "description": "Yet Another Linked List", + "files": [ + "dist" + ], + "devDependencies": { + "prettier": "^3.2.5", + "tap": "^18.7.2", + "tshy": "^1.13.1", + "typedoc": "^0.25.13" + }, + "scripts": { + "preversion": "npm test", + "postversion": "npm publish", + "prepublishOnly": "git push origin --follow-tags", + "prepare": "tshy", + "pretest": "npm run prepare", + "presnap": "npm run prepare", + "test": "tap", + "snap": "tap", + "format": "prettier --write . --loglevel warn --ignore-path ../../.prettierignore --cache", + "typedoc": "typedoc" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/isaacs/yallist.git" + }, + "author": "Isaac Z. Schlueter (http://blog.izs.me/)", + "license": "BlueOak-1.0.0", + "tshy": { + "exports": { + "./package.json": "./package.json", + ".": "./src/index.ts" + } + }, + "exports": { + "./package.json": "./package.json", + ".": { + "import": { + "types": "./dist/esm/index.d.ts", + "default": "./dist/esm/index.js" + }, + "require": { + "types": "./dist/commonjs/index.d.ts", + "default": "./dist/commonjs/index.js" + } + } + }, + "main": "./dist/commonjs/index.js", + "types": "./dist/commonjs/index.d.ts", + "type": "module", + "prettier": { + "semi": false, + "printWidth": 70, + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "jsxSingleQuote": false, + "bracketSameLine": true, + "arrowParens": "avoid", + "endOfLine": "lf" + }, + "engines": { + "node": ">=18" + } +} diff --git a/deps/npm/node_modules/node-gyp/package.json b/deps/npm/node_modules/node-gyp/package.json index 8e2ea42510dd14..4a1cfb0eb1a283 100644 --- a/deps/npm/node_modules/node-gyp/package.json +++ b/deps/npm/node_modules/node-gyp/package.json @@ -11,7 +11,7 @@ "bindings", "gyp" ], - "version": "10.2.0", + "version": "11.0.0", "installVersion": 11, "author": "Nathan Rajlich (http://tootallnate.net)", "repository": { @@ -26,26 +26,27 @@ "exponential-backoff": "^3.1.1", "glob": "^10.3.10", "graceful-fs": "^4.2.6", - "make-fetch-happen": "^13.0.0", - "nopt": "^7.0.0", - "proc-log": "^4.1.0", + "make-fetch-happen": "^14.0.3", + "nopt": "^8.0.0", + "proc-log": "^5.0.0", "semver": "^7.3.5", - "tar": "^6.2.1", - "which": "^4.0.0" + "tar": "^7.4.3", + "which": "^5.0.0" }, "engines": { - "node": "^16.14.0 || >=18.0.0" + "node": "^18.17.0 || >=20.5.0" }, "devDependencies": { "bindings": "^1.5.0", "cross-env": "^7.0.3", - "mocha": "^10.2.0", + "eslint": "^9.16.0", + "mocha": "^11.0.1", "nan": "^2.14.2", - "require-inject": "^1.4.4", - "standard": "^17.0.0" + "neostandard": "^0.11.9", + "require-inject": "^1.4.4" }, "scripts": { - "lint": "standard \"*/*.js\" \"test/**/*.js\" \".github/**/*.js\"", + "lint": "eslint \"*/*.js\" \"test/**/*.js\" \".github/**/*.js\"", "test": "cross-env NODE_GYP_NULL_LOGGER=true mocha --timeout 15000 test/test-download.js test/test-*" } } diff --git a/deps/npm/node_modules/npm-install-checks/lib/current-env.js b/deps/npm/node_modules/npm-install-checks/lib/current-env.js index 9babde1f277ff1..31f154aac59b32 100644 --- a/deps/npm/node_modules/npm-install-checks/lib/current-env.js +++ b/deps/npm/node_modules/npm-install-checks/lib/current-env.js @@ -1,5 +1,6 @@ const process = require('node:process') const nodeOs = require('node:os') +const fs = require('node:fs') function isMusl (file) { return file.includes('libc.musl-') || file.includes('ld-musl-') @@ -13,12 +14,23 @@ function cpu () { return process.arch } -function libc (osName) { - // this is to make it faster on non linux machines - if (osName !== 'linux') { +const LDD_PATH = '/usr/bin/ldd' +function getFamilyFromFilesystem () { + try { + const content = fs.readFileSync(LDD_PATH, 'utf-8') + if (content.includes('musl')) { + return 'musl' + } + if (content.includes('GNU C Library')) { + return 'glibc' + } + return null + } catch { return undefined } - let family +} + +function getFamilyFromReport () { const originalExclude = process.report.excludeNetwork process.report.excludeNetwork = true const report = process.report.getReport() @@ -27,6 +39,22 @@ function libc (osName) { family = 'glibc' } else if (Array.isArray(report.sharedObjects) && report.sharedObjects.some(isMusl)) { family = 'musl' + } else { + family = null + } + return family +} + +let family +function libc (osName) { + if (osName !== 'linux') { + return undefined + } + if (family === undefined) { + family = getFamilyFromFilesystem() + if (family === undefined) { + family = getFamilyFromReport() + } } return family } diff --git a/deps/npm/node_modules/npm-install-checks/package.json b/deps/npm/node_modules/npm-install-checks/package.json index e9e69575a6dc6d..967f5f659b2fac 100644 --- a/deps/npm/node_modules/npm-install-checks/package.json +++ b/deps/npm/node_modules/npm-install-checks/package.json @@ -1,6 +1,6 @@ { "name": "npm-install-checks", - "version": "7.1.0", + "version": "7.1.1", "description": "Check the engines and platform fields in package.json", "main": "lib/index.js", "dependencies": { @@ -8,7 +8,7 @@ }, "devDependencies": { "@npmcli/eslint-config": "^5.0.0", - "@npmcli/template-oss": "4.23.3", + "@npmcli/template-oss": "4.23.4", "tap": "^16.0.1" }, "scripts": { @@ -40,7 +40,7 @@ "author": "GitHub Inc.", "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.23.3", + "version": "4.23.4", "publish": "true" }, "tap": { diff --git a/deps/npm/node_modules/npm-registry-fetch/lib/check-response.js b/deps/npm/node_modules/npm-registry-fetch/lib/check-response.js index 65eea2963b0b4c..2f183082ab2ce2 100644 --- a/deps/npm/node_modules/npm-registry-fetch/lib/check-response.js +++ b/deps/npm/node_modules/npm-registry-fetch/lib/check-response.js @@ -48,10 +48,18 @@ function logRequest (method, res, startTime) { const cacheStr = cacheStatus ? ` (cache ${cacheStatus})` : '' const urlStr = cleanUrl(res.url) - log.http( - 'fetch', - `${method.toUpperCase()} ${res.status} ${urlStr} ${elapsedTime}ms${attemptStr}${cacheStr}` - ) + // If make-fetch-happen reports a cache hit, then there was no fetch + if (cacheStatus === 'hit') { + log.http( + 'cache', + `${urlStr} ${elapsedTime}ms${attemptStr}${cacheStr}` + ) + } else { + log.http( + 'fetch', + `${method.toUpperCase()} ${res.status} ${urlStr} ${elapsedTime}ms${attemptStr}${cacheStr}` + ) + } } function checkErrors (method, res, startTime, opts) { diff --git a/deps/npm/node_modules/npm-registry-fetch/package.json b/deps/npm/node_modules/npm-registry-fetch/package.json index 559473b964aaa5..bd7a79d35e26ac 100644 --- a/deps/npm/node_modules/npm-registry-fetch/package.json +++ b/deps/npm/node_modules/npm-registry-fetch/package.json @@ -1,6 +1,6 @@ { "name": "npm-registry-fetch", - "version": "18.0.1", + "version": "18.0.2", "description": "Fetch-based http client for use with npm registry APIs", "main": "lib", "files": [ @@ -42,8 +42,8 @@ }, "devDependencies": { "@npmcli/eslint-config": "^5.0.0", - "@npmcli/template-oss": "4.23.3", - "cacache": "^18.0.0", + "@npmcli/template-oss": "4.23.4", + "cacache": "^19.0.1", "nock": "^13.2.4", "require-inject": "^1.4.4", "ssri": "^12.0.0", @@ -62,7 +62,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.23.3", + "version": "4.23.4", "publish": "true" } } diff --git a/deps/npm/node_modules/package-json-from-dist/dist/commonjs/index.js b/deps/npm/node_modules/package-json-from-dist/dist/commonjs/index.js index 5cff210d855cb0..b966ac9fef535b 100644 --- a/deps/npm/node_modules/package-json-from-dist/dist/commonjs/index.js +++ b/deps/npm/node_modules/package-json-from-dist/dist/commonjs/index.js @@ -5,6 +5,8 @@ const node_fs_1 = require("node:fs"); const node_path_1 = require("node:path"); const node_url_1 = require("node:url"); const NM = `${node_path_1.sep}node_modules${node_path_1.sep}`; +const STORE = `.store${node_path_1.sep}`; +const PKG = `${node_path_1.sep}package${node_path_1.sep}`; const DIST = `${node_path_1.sep}dist${node_path_1.sep}`; /** * Find the package.json file, either from a TypeScript file somewhere not @@ -59,8 +61,16 @@ const findPackageJson = (from, pathFromSrc = '../package.json') => { // inside of node_modules. find the dist directly under package name. const nm = __dirname.substring(0, nms + NM.length); const pkgDir = __dirname.substring(nms + NM.length); + // affordance for yarn berry, which puts package contents in + // '.../node_modules/.store/${id}-${hash}/package/...' + if (pkgDir.startsWith(STORE)) { + const pkg = pkgDir.indexOf(PKG, STORE.length); + if (pkg) { + return (0, node_path_1.resolve)(nm, pkgDir.substring(0, pkg + PKG.length), 'package.json'); + } + } const pkgName = pkgDir.startsWith('@') ? - pkgDir.split(node_path_1.sep).slice(0, 2).join(node_path_1.sep) + pkgDir.split(node_path_1.sep, 2).join(node_path_1.sep) : String(pkgDir.split(node_path_1.sep)[0]); return (0, node_path_1.resolve)(nm, pkgName, 'package.json'); } diff --git a/deps/npm/node_modules/package-json-from-dist/dist/esm/index.js b/deps/npm/node_modules/package-json-from-dist/dist/esm/index.js index 0627645f9c35a4..426ad3c2d18597 100644 --- a/deps/npm/node_modules/package-json-from-dist/dist/esm/index.js +++ b/deps/npm/node_modules/package-json-from-dist/dist/esm/index.js @@ -2,6 +2,8 @@ import { readFileSync } from 'node:fs'; import { dirname, resolve, sep } from 'node:path'; import { fileURLToPath } from 'node:url'; const NM = `${sep}node_modules${sep}`; +const STORE = `.store${sep}`; +const PKG = `${sep}package${sep}`; const DIST = `${sep}dist${sep}`; /** * Find the package.json file, either from a TypeScript file somewhere not @@ -56,8 +58,16 @@ export const findPackageJson = (from, pathFromSrc = '../package.json') => { // inside of node_modules. find the dist directly under package name. const nm = __dirname.substring(0, nms + NM.length); const pkgDir = __dirname.substring(nms + NM.length); + // affordance for yarn berry, which puts package contents in + // '.../node_modules/.store/${id}-${hash}/package/...' + if (pkgDir.startsWith(STORE)) { + const pkg = pkgDir.indexOf(PKG, STORE.length); + if (pkg) { + return resolve(nm, pkgDir.substring(0, pkg + PKG.length), 'package.json'); + } + } const pkgName = pkgDir.startsWith('@') ? - pkgDir.split(sep).slice(0, 2).join(sep) + pkgDir.split(sep, 2).join(sep) : String(pkgDir.split(sep)[0]); return resolve(nm, pkgName, 'package.json'); } diff --git a/deps/npm/node_modules/package-json-from-dist/package.json b/deps/npm/node_modules/package-json-from-dist/package.json index 2d5526e87b7fa0..a2d03c3269d72d 100644 --- a/deps/npm/node_modules/package-json-from-dist/package.json +++ b/deps/npm/node_modules/package-json-from-dist/package.json @@ -1,6 +1,6 @@ { "name": "package-json-from-dist", - "version": "1.0.0", + "version": "1.0.1", "description": "Load the local package.json from either src or dist folder", "main": "./dist/commonjs/index.js", "exports": { @@ -28,7 +28,7 @@ "presnap": "npm run prepare", "test": "tap", "snap": "tap", - "format": "prettier --write . --loglevel warn --ignore-path ../../.prettierignore --cache", + "format": "prettier --write . --log-level warn", "typedoc": "typedoc" }, "author": "Isaac Z. Schlueter (https://izs.me)", diff --git a/deps/npm/node_modules/pacote/lib/dir.js b/deps/npm/node_modules/pacote/lib/dir.js index f3229b34e463ab..4ae97c216fe64f 100644 --- a/deps/npm/node_modules/pacote/lib/dir.js +++ b/deps/npm/node_modules/pacote/lib/dir.js @@ -39,6 +39,8 @@ class DirFetcher extends Fetcher { const stdio = this.opts.foregroundScripts ? 'inherit' : 'pipe' return runScript({ + // this || undefined is because runScript will be unhappy with the default null value + scriptShell: this.opts.scriptShell || undefined, pkg: mani, event: 'prepare', path: this.resolved, diff --git a/deps/npm/node_modules/pacote/lib/fetcher.js b/deps/npm/node_modules/pacote/lib/fetcher.js index cc2c2db70c697d..f2ac97619d3af1 100644 --- a/deps/npm/node_modules/pacote/lib/fetcher.js +++ b/deps/npm/node_modules/pacote/lib/fetcher.js @@ -188,7 +188,15 @@ class FetcherBase { // private // Note: cacache will raise a EINTEGRITY error if the integrity doesn't match #tarballFromCache () { - return cacache.get.stream.byDigest(this.cache, this.integrity, this.opts) + const startTime = Date.now() + const stream = cacache.get.stream.byDigest(this.cache, this.integrity, this.opts) + const elapsedTime = Date.now() - startTime + // cache is good, so log it as a hit in particular since there was no fetch logged + log.http( + 'cache', + `${this.spec} ${elapsedTime}ms (cache hit)` + ) + return stream } get [_.cacheFetches] () { diff --git a/deps/npm/node_modules/pacote/package.json b/deps/npm/node_modules/pacote/package.json index 0eb8261af96e0c..71c9aa1ce32572 100644 --- a/deps/npm/node_modules/pacote/package.json +++ b/deps/npm/node_modules/pacote/package.json @@ -1,6 +1,6 @@ { "name": "pacote", - "version": "19.0.0", + "version": "19.0.1", "description": "JavaScript package downloader", "author": "GitHub Inc.", "bin": { @@ -59,7 +59,7 @@ "npm-registry-fetch": "^18.0.0", "proc-log": "^5.0.0", "promise-retry": "^2.0.1", - "sigstore": "^2.2.0", + "sigstore": "^3.0.0", "ssri": "^12.0.0", "tar": "^6.1.11" }, diff --git a/deps/npm/node_modules/promise-call-limit/dist/commonjs/index.js b/deps/npm/node_modules/promise-call-limit/dist/commonjs/index.js index 6ce5cfcef9559e..b32a85bb11aa39 100644 --- a/deps/npm/node_modules/promise-call-limit/dist/commonjs/index.js +++ b/deps/npm/node_modules/promise-call-limit/dist/commonjs/index.js @@ -29,8 +29,8 @@ const os = __importStar(require("node:os")); // cpus() cpus() can return an empty list if /proc is not mounted, use 1 in // this case /* c8 ignore start */ -const defLimit = 'availableParallelism' in os - ? Math.max(1, os.availableParallelism() - 1) +const defLimit = 'availableParallelism' in os ? + Math.max(1, os.availableParallelism() - 1) : Math.max(1, os.cpus().length - 1); const callLimit = (queue, { limit = defLimit, rejectLate } = {}) => new Promise((res, rej) => { let active = 0; diff --git a/deps/npm/node_modules/promise-call-limit/dist/esm/index.js b/deps/npm/node_modules/promise-call-limit/dist/esm/index.js index 030099929b3483..fe709db7fc04cc 100644 --- a/deps/npm/node_modules/promise-call-limit/dist/esm/index.js +++ b/deps/npm/node_modules/promise-call-limit/dist/esm/index.js @@ -3,8 +3,8 @@ import * as os from 'node:os'; // cpus() cpus() can return an empty list if /proc is not mounted, use 1 in // this case /* c8 ignore start */ -const defLimit = 'availableParallelism' in os - ? Math.max(1, os.availableParallelism() - 1) +const defLimit = 'availableParallelism' in os ? + Math.max(1, os.availableParallelism() - 1) : Math.max(1, os.cpus().length - 1); export const callLimit = (queue, { limit = defLimit, rejectLate } = {}) => new Promise((res, rej) => { let active = 0; diff --git a/deps/npm/node_modules/promise-call-limit/package.json b/deps/npm/node_modules/promise-call-limit/package.json index a3aa548d6538ac..ab14595366e223 100644 --- a/deps/npm/node_modules/promise-call-limit/package.json +++ b/deps/npm/node_modules/promise-call-limit/package.json @@ -1,6 +1,6 @@ { "name": "promise-call-limit", - "version": "3.0.1", + "version": "3.0.2", "files": [ "dist" ], @@ -18,16 +18,17 @@ "test": "tap", "preversion": "npm test", "postversion": "npm publish", - "prepublishOnly": "git push origin --follow-tags" + "prepublishOnly": "git push origin --follow-tags", + "format": "prettier --write . --log-level warn --cache" }, "devDependencies": { - "prettier": "^3.2.1", - "tap": "^18.6.1", - "tshy": "^1.8.2", - "format": "prettier --write . --loglevel warn --ignore-path ../../.prettierignore --cache", - "typedoc": "typedoc" + "prettier": "^3.3.3", + "tap": "^21.0.1", + "tshy": "^3.0.2", + "typedoc": "^0.26.6" }, "prettier": { + "experimentalTernaries": true, "semi": false, "printWidth": 70, "tabWidth": 2, @@ -62,5 +63,6 @@ }, "main": "./dist/commonjs/index.js", "types": "./dist/commonjs/index.d.ts", - "type": "module" + "type": "module", + "module": "./dist/esm/index.js" } diff --git a/deps/npm/node_modules/sigstore/dist/config.js b/deps/npm/node_modules/sigstore/dist/config.js index b4f0eea74fa4b4..e8b2392f97f236 100644 --- a/deps/npm/node_modules/sigstore/dist/config.js +++ b/deps/npm/node_modules/sigstore/dist/config.js @@ -1,6 +1,9 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.createVerificationPolicy = exports.createKeyFinder = exports.createBundleBuilder = exports.DEFAULT_TIMEOUT = exports.DEFAULT_RETRY = void 0; +exports.DEFAULT_TIMEOUT = exports.DEFAULT_RETRY = void 0; +exports.createBundleBuilder = createBundleBuilder; +exports.createKeyFinder = createKeyFinder; +exports.createVerificationPolicy = createVerificationPolicy; /* Copyright 2023 The Sigstore Authors. @@ -30,10 +33,12 @@ function createBundleBuilder(bundleType, options) { case 'messageSignature': return new sign_1.MessageSignatureBundleBuilder(bundlerOptions); case 'dsseEnvelope': - return new sign_1.DSSEBundleBuilder(bundlerOptions); + return new sign_1.DSSEBundleBuilder({ + ...bundlerOptions, + certificateChain: options.legacyCompatibility, + }); } } -exports.createBundleBuilder = createBundleBuilder; // Translates the public KeySelector type into the KeyFinderFunc type needed by // the verifier. function createKeyFinder(keySelector) { @@ -51,7 +56,6 @@ function createKeyFinder(keySelector) { }; }; } -exports.createKeyFinder = createKeyFinder; function createVerificationPolicy(options) { const policy = {}; const san = options.certificateIdentityEmail || options.certificateIdentityURI; @@ -63,7 +67,6 @@ function createVerificationPolicy(options) { } return policy; } -exports.createVerificationPolicy = createVerificationPolicy; // Instantiate the FulcioSigner based on the supplied options. function initSigner(options) { return new sign_1.FulcioSigner({ @@ -92,6 +95,7 @@ function initWitnesses(options) { if (isRekorEnabled(options)) { witnesses.push(new sign_1.RekorWitness({ rekorBaseURL: options.rekorURL, + entryType: options.legacyCompatibility ? 'intoto' : 'dsse', fetchOnConflict: false, retry: options.retry ?? exports.DEFAULT_RETRY, timeout: options.timeout ?? exports.DEFAULT_TIMEOUT, diff --git a/deps/npm/node_modules/sigstore/dist/sigstore.js b/deps/npm/node_modules/sigstore/dist/sigstore.js index 79d3440670cd50..c45524bbe21c22 100644 --- a/deps/npm/node_modules/sigstore/dist/sigstore.js +++ b/deps/npm/node_modules/sigstore/dist/sigstore.js @@ -23,7 +23,10 @@ var __importStar = (this && this.__importStar) || function (mod) { return result; }; Object.defineProperty(exports, "__esModule", { value: true }); -exports.createVerifier = exports.verify = exports.attest = exports.sign = void 0; +exports.sign = sign; +exports.attest = attest; +exports.verify = verify; +exports.createVerifier = createVerifier; /* Copyright 2023 The Sigstore Authors. @@ -50,7 +53,6 @@ options = {}) { const bundle = await bundler.create({ data: payload }); return (0, bundle_1.bundleToJSON)(bundle); } -exports.sign = sign; async function attest(payload, payloadType, /* istanbul ignore next */ options = {}) { @@ -58,7 +60,6 @@ options = {}) { const bundle = await bundler.create({ data: payload, type: payloadType }); return (0, bundle_1.bundleToJSON)(bundle); } -exports.attest = attest; async function verify(bundle, dataOrOptions, options) { let data; if (Buffer.isBuffer(dataOrOptions)) { @@ -69,7 +70,6 @@ async function verify(bundle, dataOrOptions, options) { } return createVerifier(options).then((verifier) => verifier.verify(bundle, data)); } -exports.verify = verify; async function createVerifier( /* istanbul ignore next */ options = {}) { @@ -100,4 +100,3 @@ options = {}) { }, }; } -exports.createVerifier = createVerifier; diff --git a/deps/npm/node_modules/@sigstore/bundle/LICENSE b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/LICENSE similarity index 100% rename from deps/npm/node_modules/@sigstore/bundle/LICENSE rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/LICENSE diff --git a/deps/npm/node_modules/@sigstore/bundle/dist/build.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/build.js similarity index 87% rename from deps/npm/node_modules/@sigstore/bundle/dist/build.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/build.js index 65c71b100ad58f..ade736407554c6 100644 --- a/deps/npm/node_modules/@sigstore/bundle/dist/build.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/build.js @@ -1,6 +1,7 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.toDSSEBundle = exports.toMessageSignatureBundle = void 0; +exports.toMessageSignatureBundle = toMessageSignatureBundle; +exports.toDSSEBundle = toDSSEBundle; /* Copyright 2023 The Sigstore Authors. @@ -21,9 +22,9 @@ const bundle_1 = require("./bundle"); // Message signature bundle - $case: 'messageSignature' function toMessageSignatureBundle(options) { return { - mediaType: options.singleCertificate - ? bundle_1.BUNDLE_V03_MEDIA_TYPE - : bundle_1.BUNDLE_V02_MEDIA_TYPE, + mediaType: options.certificateChain + ? bundle_1.BUNDLE_V02_MEDIA_TYPE + : bundle_1.BUNDLE_V03_MEDIA_TYPE, content: { $case: 'messageSignature', messageSignature: { @@ -37,13 +38,12 @@ function toMessageSignatureBundle(options) { verificationMaterial: toVerificationMaterial(options), }; } -exports.toMessageSignatureBundle = toMessageSignatureBundle; // DSSE envelope bundle - $case: 'dsseEnvelope' function toDSSEBundle(options) { return { - mediaType: options.singleCertificate - ? bundle_1.BUNDLE_V03_MEDIA_TYPE - : bundle_1.BUNDLE_V02_MEDIA_TYPE, + mediaType: options.certificateChain + ? bundle_1.BUNDLE_V02_MEDIA_TYPE + : bundle_1.BUNDLE_V03_MEDIA_TYPE, content: { $case: 'dsseEnvelope', dsseEnvelope: toEnvelope(options), @@ -51,7 +51,6 @@ function toDSSEBundle(options) { verificationMaterial: toVerificationMaterial(options), }; } -exports.toDSSEBundle = toDSSEBundle; function toEnvelope(options) { return { payloadType: options.artifactType, @@ -75,13 +74,7 @@ function toVerificationMaterial(options) { } function toKeyContent(options) { if (options.certificate) { - if (options.singleCertificate) { - return { - $case: 'certificate', - certificate: { rawBytes: options.certificate }, - }; - } - else { + if (options.certificateChain) { return { $case: 'x509CertificateChain', x509CertificateChain: { @@ -89,6 +82,12 @@ function toKeyContent(options) { }, }; } + else { + return { + $case: 'certificate', + certificate: { rawBytes: options.certificate }, + }; + } } else { return { diff --git a/deps/npm/node_modules/@sigstore/bundle/dist/bundle.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/bundle.js similarity index 79% rename from deps/npm/node_modules/@sigstore/bundle/dist/bundle.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/bundle.js index dbd35df2ca2bb3..eb67a0ddc17bbb 100644 --- a/deps/npm/node_modules/@sigstore/bundle/dist/bundle.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/bundle.js @@ -1,6 +1,10 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.isBundleWithDsseEnvelope = exports.isBundleWithMessageSignature = exports.isBundleWithPublicKey = exports.isBundleWithCertificateChain = exports.BUNDLE_V03_MEDIA_TYPE = exports.BUNDLE_V03_LEGACY_MEDIA_TYPE = exports.BUNDLE_V02_MEDIA_TYPE = exports.BUNDLE_V01_MEDIA_TYPE = void 0; +exports.BUNDLE_V03_MEDIA_TYPE = exports.BUNDLE_V03_LEGACY_MEDIA_TYPE = exports.BUNDLE_V02_MEDIA_TYPE = exports.BUNDLE_V01_MEDIA_TYPE = void 0; +exports.isBundleWithCertificateChain = isBundleWithCertificateChain; +exports.isBundleWithPublicKey = isBundleWithPublicKey; +exports.isBundleWithMessageSignature = isBundleWithMessageSignature; +exports.isBundleWithDsseEnvelope = isBundleWithDsseEnvelope; exports.BUNDLE_V01_MEDIA_TYPE = 'application/vnd.dev.sigstore.bundle+json;version=0.1'; exports.BUNDLE_V02_MEDIA_TYPE = 'application/vnd.dev.sigstore.bundle+json;version=0.2'; exports.BUNDLE_V03_LEGACY_MEDIA_TYPE = 'application/vnd.dev.sigstore.bundle+json;version=0.3'; @@ -9,16 +13,12 @@ exports.BUNDLE_V03_MEDIA_TYPE = 'application/vnd.dev.sigstore.bundle.v0.3+json'; function isBundleWithCertificateChain(b) { return b.verificationMaterial.content.$case === 'x509CertificateChain'; } -exports.isBundleWithCertificateChain = isBundleWithCertificateChain; function isBundleWithPublicKey(b) { return b.verificationMaterial.content.$case === 'publicKey'; } -exports.isBundleWithPublicKey = isBundleWithPublicKey; function isBundleWithMessageSignature(b) { return b.content.$case === 'messageSignature'; } -exports.isBundleWithMessageSignature = isBundleWithMessageSignature; function isBundleWithDsseEnvelope(b) { return b.content.$case === 'dsseEnvelope'; } -exports.isBundleWithDsseEnvelope = isBundleWithDsseEnvelope; diff --git a/deps/npm/node_modules/@sigstore/bundle/dist/error.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/error.js similarity index 100% rename from deps/npm/node_modules/@sigstore/bundle/dist/error.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/error.js diff --git a/deps/npm/node_modules/@sigstore/bundle/dist/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/bundle/dist/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/index.js diff --git a/deps/npm/node_modules/@sigstore/bundle/dist/serialized.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/serialized.js similarity index 100% rename from deps/npm/node_modules/@sigstore/bundle/dist/serialized.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/serialized.js diff --git a/deps/npm/node_modules/@sigstore/bundle/dist/utility.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/utility.js similarity index 100% rename from deps/npm/node_modules/@sigstore/bundle/dist/utility.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/utility.js diff --git a/deps/npm/node_modules/@sigstore/bundle/dist/validate.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/validate.js similarity index 98% rename from deps/npm/node_modules/@sigstore/bundle/dist/validate.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/validate.js index 67079cd1f680a9..21b8b5ee293ba1 100644 --- a/deps/npm/node_modules/@sigstore/bundle/dist/validate.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/dist/validate.js @@ -1,6 +1,10 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.assertBundleLatest = exports.assertBundleV02 = exports.isBundleV01 = exports.assertBundleV01 = exports.assertBundle = void 0; +exports.assertBundle = assertBundle; +exports.assertBundleV01 = assertBundleV01; +exports.isBundleV01 = isBundleV01; +exports.assertBundleV02 = assertBundleV02; +exports.assertBundleLatest = assertBundleLatest; /* Copyright 2023 The Sigstore Authors. @@ -27,7 +31,6 @@ function assertBundle(b) { throw new error_1.ValidationError('invalid bundle', invalidValues); } } -exports.assertBundle = assertBundle; // Asserts that the given bundle conforms to the v0.1 bundle format. function assertBundleV01(b) { const invalidValues = []; @@ -37,7 +40,6 @@ function assertBundleV01(b) { throw new error_1.ValidationError('invalid v0.1 bundle', invalidValues); } } -exports.assertBundleV01 = assertBundleV01; // Type guard to determine if Bundle is a v0.1 bundle. function isBundleV01(b) { try { @@ -48,7 +50,6 @@ function isBundleV01(b) { return false; } } -exports.isBundleV01 = isBundleV01; // Asserts that the given bundle conforms to the v0.2 bundle format. function assertBundleV02(b) { const invalidValues = []; @@ -58,7 +59,6 @@ function assertBundleV02(b) { throw new error_1.ValidationError('invalid v0.2 bundle', invalidValues); } } -exports.assertBundleV02 = assertBundleV02; // Asserts that the given bundle conforms to the newest (0.3) bundle format. function assertBundleLatest(b) { const invalidValues = []; @@ -69,7 +69,6 @@ function assertBundleLatest(b) { throw new error_1.ValidationError('invalid bundle', invalidValues); } } -exports.assertBundleLatest = assertBundleLatest; function validateBundleBase(b) { const invalidValues = []; // Media type validation @@ -192,6 +191,7 @@ function validateInclusionProof(b) { // Necessary for V03 and later bundles function validateNoCertificateChain(b) { const invalidValues = []; + /* istanbul ignore next */ if (b.verificationMaterial?.content?.$case === 'x509CertificateChain') { invalidValues.push('verificationMaterial.content.$case'); } diff --git a/deps/npm/node_modules/@sigstore/bundle/package.json b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/package.json similarity index 92% rename from deps/npm/node_modules/@sigstore/bundle/package.json rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/package.json index dd853897226d2f..ee5d2b92b801a5 100644 --- a/deps/npm/node_modules/@sigstore/bundle/package.json +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/bundle/package.json @@ -1,6 +1,6 @@ { "name": "@sigstore/bundle", - "version": "2.3.2", + "version": "3.0.0", "description": "Sigstore bundle type", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -30,6 +30,6 @@ "@sigstore/protobuf-specs": "^0.3.2" }, "engines": { - "node": "^16.14.0 || >=18.0.0" + "node": "^18.17.0 || >=20.5.0" } } diff --git a/deps/npm/node_modules/@sigstore/core/LICENSE b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/LICENSE similarity index 100% rename from deps/npm/node_modules/@sigstore/core/LICENSE rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/LICENSE diff --git a/deps/npm/node_modules/@sigstore/core/dist/asn1/error.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/error.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/asn1/error.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/error.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/asn1/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/asn1/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/index.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/asn1/length.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/length.js similarity index 97% rename from deps/npm/node_modules/@sigstore/core/dist/asn1/length.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/length.js index 36fdaf5b9777fd..cb7ebf09dbefa4 100644 --- a/deps/npm/node_modules/@sigstore/core/dist/asn1/length.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/length.js @@ -15,7 +15,8 @@ See the License for the specific language governing permissions and limitations under the License. */ Object.defineProperty(exports, "__esModule", { value: true }); -exports.encodeLength = exports.decodeLength = void 0; +exports.decodeLength = decodeLength; +exports.encodeLength = encodeLength; const error_1 = require("./error"); // Decodes the length of a DER-encoded ANS.1 element from the supplied stream. // https://learn.microsoft.com/en-us/windows/win32/seccertenroll/about-encoded-length-and-value-bytes @@ -44,7 +45,6 @@ function decodeLength(stream) { } return len; } -exports.decodeLength = decodeLength; // Translates the supplied value to a DER-encoded length. function encodeLength(len) { if (len < 128) { @@ -60,4 +60,3 @@ function encodeLength(len) { } return Buffer.from([0x80 | bytes.length, ...bytes]); } -exports.encodeLength = encodeLength; diff --git a/deps/npm/node_modules/@sigstore/core/dist/asn1/obj.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/obj.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/asn1/obj.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/obj.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/asn1/parse.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/parse.js similarity index 96% rename from deps/npm/node_modules/@sigstore/core/dist/asn1/parse.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/parse.js index 482c7239e83162..7fbb42632c60e8 100644 --- a/deps/npm/node_modules/@sigstore/core/dist/asn1/parse.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/parse.js @@ -1,6 +1,11 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.parseBitString = exports.parseBoolean = exports.parseOID = exports.parseTime = exports.parseStringASCII = exports.parseInteger = void 0; +exports.parseInteger = parseInteger; +exports.parseStringASCII = parseStringASCII; +exports.parseTime = parseTime; +exports.parseOID = parseOID; +exports.parseBoolean = parseBoolean; +exports.parseBitString = parseBitString; /* Copyright 2023 The Sigstore Authors. @@ -43,13 +48,11 @@ function parseInteger(buf) { } return n; } -exports.parseInteger = parseInteger; // Parse an ASCII string from the DER-encoded buffer // https://learn.microsoft.com/en-us/windows/win32/seccertenroll/about-basic-types#boolean function parseStringASCII(buf) { return buf.toString('ascii'); } -exports.parseStringASCII = parseStringASCII; // Parse a Date from the DER-encoded buffer // https://www.rfc-editor.org/rfc/rfc5280#section-4.1.2.5.1 function parseTime(buf, shortYear) { @@ -70,7 +73,6 @@ function parseTime(buf, shortYear) { // Translate to ISO8601 format and parse return new Date(`${m[1]}-${m[2]}-${m[3]}T${m[4]}:${m[5]}:${m[6]}Z`); } -exports.parseTime = parseTime; // Parse an OID from the DER-encoded buffer // https://learn.microsoft.com/en-us/windows/win32/seccertenroll/about-object-identifier function parseOID(buf) { @@ -95,13 +97,11 @@ function parseOID(buf) { } return oid; } -exports.parseOID = parseOID; // Parse a boolean from the DER-encoded buffer // https://learn.microsoft.com/en-us/windows/win32/seccertenroll/about-basic-types#boolean function parseBoolean(buf) { return buf[0] !== 0; } -exports.parseBoolean = parseBoolean; // Parse a bit string from the DER-encoded buffer // https://learn.microsoft.com/en-us/windows/win32/seccertenroll/about-bit-string function parseBitString(buf) { @@ -122,4 +122,3 @@ function parseBitString(buf) { } return bits; } -exports.parseBitString = parseBitString; diff --git a/deps/npm/node_modules/@sigstore/core/dist/asn1/tag.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/tag.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/asn1/tag.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/asn1/tag.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/crypto.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/crypto.js similarity index 83% rename from deps/npm/node_modules/@sigstore/core/dist/crypto.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/crypto.js index dbe65b165d3574..296b5ba43e86a0 100644 --- a/deps/npm/node_modules/@sigstore/core/dist/crypto.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/crypto.js @@ -3,7 +3,10 @@ var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); -exports.bufferEqual = exports.verify = exports.hash = exports.digest = exports.createPublicKey = void 0; +exports.createPublicKey = createPublicKey; +exports.digest = digest; +exports.verify = verify; +exports.bufferEqual = bufferEqual; /* Copyright 2023 The Sigstore Authors. @@ -20,7 +23,6 @@ See the License for the specific language governing permissions and limitations under the License. */ const crypto_1 = __importDefault(require("crypto")); -const SHA256_ALGORITHM = 'sha256'; function createPublicKey(key, type = 'spki') { if (typeof key === 'string') { return crypto_1.default.createPublicKey(key); @@ -29,7 +31,6 @@ function createPublicKey(key, type = 'spki') { return crypto_1.default.createPublicKey({ key, format: 'der', type: type }); } } -exports.createPublicKey = createPublicKey; function digest(algorithm, ...data) { const hash = crypto_1.default.createHash(algorithm); for (const d of data) { @@ -37,16 +38,6 @@ function digest(algorithm, ...data) { } return hash.digest(); } -exports.digest = digest; -// TODO: deprecate this in favor of digest() -function hash(...data) { - const hash = crypto_1.default.createHash(SHA256_ALGORITHM); - for (const d of data) { - hash.update(d); - } - return hash.digest(); -} -exports.hash = hash; function verify(data, key, signature, algorithm) { // The try/catch is to work around an issue in Node 14.x where verify throws // an error in some scenarios if the signature is invalid. @@ -58,7 +49,6 @@ function verify(data, key, signature, algorithm) { return false; } } -exports.verify = verify; function bufferEqual(a, b) { try { return crypto_1.default.timingSafeEqual(a, b); @@ -68,4 +58,3 @@ function bufferEqual(a, b) { return false; } } -exports.bufferEqual = bufferEqual; diff --git a/deps/npm/node_modules/@sigstore/core/dist/dsse.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/dsse.js similarity index 96% rename from deps/npm/node_modules/@sigstore/core/dist/dsse.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/dsse.js index a78783c919a256..ca7b63630e2ba9 100644 --- a/deps/npm/node_modules/@sigstore/core/dist/dsse.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/dsse.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.preAuthEncoding = void 0; +exports.preAuthEncoding = preAuthEncoding; /* Copyright 2023 The Sigstore Authors. @@ -28,4 +28,3 @@ function preAuthEncoding(payloadType, payload) { ].join(' '); return Buffer.concat([Buffer.from(prefix, 'ascii'), payload]); } -exports.preAuthEncoding = preAuthEncoding; diff --git a/deps/npm/node_modules/@sigstore/core/dist/encoding.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/encoding.js similarity index 94% rename from deps/npm/node_modules/@sigstore/core/dist/encoding.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/encoding.js index b020ac4d6ecd42..7113af66db4c2d 100644 --- a/deps/npm/node_modules/@sigstore/core/dist/encoding.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/encoding.js @@ -1,6 +1,7 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.base64Decode = exports.base64Encode = void 0; +exports.base64Encode = base64Encode; +exports.base64Decode = base64Decode; /* Copyright 2023 The Sigstore Authors. @@ -21,8 +22,6 @@ const UTF8_ENCODING = 'utf-8'; function base64Encode(str) { return Buffer.from(str, UTF8_ENCODING).toString(BASE64_ENCODING); } -exports.base64Encode = base64Encode; function base64Decode(str) { return Buffer.from(str, BASE64_ENCODING).toString(UTF8_ENCODING); } -exports.base64Decode = base64Decode; diff --git a/deps/npm/node_modules/@sigstore/core/dist/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/index.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/json.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/json.js similarity index 98% rename from deps/npm/node_modules/@sigstore/core/dist/json.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/json.js index a50df7233c7c58..7808d033b98cc9 100644 --- a/deps/npm/node_modules/@sigstore/core/dist/json.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/json.js @@ -15,7 +15,7 @@ See the License for the specific language governing permissions and limitations under the License. */ Object.defineProperty(exports, "__esModule", { value: true }); -exports.canonicalize = void 0; +exports.canonicalize = canonicalize; // JSON canonicalization per https://github.com/cyberphone/json-canonicalization // eslint-disable-next-line @typescript-eslint/no-explicit-any function canonicalize(object) { @@ -58,4 +58,3 @@ function canonicalize(object) { } return buffer; } -exports.canonicalize = canonicalize; diff --git a/deps/npm/node_modules/@sigstore/core/dist/oid.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/oid.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/oid.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/oid.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/pem.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/pem.js similarity index 97% rename from deps/npm/node_modules/@sigstore/core/dist/pem.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/pem.js index f35bc3835bbd10..f1241d28d586ec 100644 --- a/deps/npm/node_modules/@sigstore/core/dist/pem.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/pem.js @@ -1,6 +1,7 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.fromDER = exports.toDER = void 0; +exports.toDER = toDER; +exports.fromDER = fromDER; /* Copyright 2023 The Sigstore Authors. @@ -28,7 +29,6 @@ function toDER(certificate) { }); return Buffer.from(der, 'base64'); } -exports.toDER = toDER; // Translates a DER-encoded buffer into a PEM-encoded string. Standard PEM // encoding dictates that each certificate should have a trailing newline after // the footer. @@ -41,4 +41,3 @@ function fromDER(certificate, type = 'CERTIFICATE') { .join('\n') .concat('\n'); } -exports.fromDER = fromDER; diff --git a/deps/npm/node_modules/@sigstore/core/dist/rfc3161/error.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/rfc3161/error.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/rfc3161/error.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/rfc3161/error.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/rfc3161/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/rfc3161/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/rfc3161/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/rfc3161/index.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/rfc3161/timestamp.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/rfc3161/timestamp.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/rfc3161/timestamp.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/rfc3161/timestamp.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/rfc3161/tstinfo.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/rfc3161/tstinfo.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/rfc3161/tstinfo.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/rfc3161/tstinfo.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/stream.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/stream.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/stream.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/stream.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/x509/cert.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/x509/cert.js similarity index 96% rename from deps/npm/node_modules/@sigstore/core/dist/x509/cert.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/x509/cert.js index 16c0c40d858d8a..72ea8e0738bc83 100644 --- a/deps/npm/node_modules/@sigstore/core/dist/x509/cert.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/x509/cert.js @@ -97,13 +97,15 @@ class X509Certificate { } get subjectAltName() { const ext = this.extSubjectAltName; - return ext?.uri || ext?.rfc822Name; + return ext?.uri || /* istanbul ignore next */ ext?.rfc822Name; } get extensions() { // The extension list is the first (and only) element of the extensions // context specific tag + /* istanbul ignore next */ const extSeq = this.extensionsObj?.subs[0]; - return extSeq?.subs || /* istanbul ignore next */ []; + /* istanbul ignore next */ + return extSeq?.subs || []; } get extKeyUsage() { const ext = this.findExtension(EXTENSION_OID_KEY_USAGE); @@ -135,8 +137,10 @@ class X509Certificate { const ca = this.extBasicConstraints?.isCA || false; // If the KeyUsage extension is present, keyCertSign must be set if (this.extKeyUsage) { - ca && this.extKeyUsage.keyCertSign; + return ca && this.extKeyUsage.keyCertSign; } + // TODO: test coverage for this case + /* istanbul ignore next */ return ca; } extension(oid) { diff --git a/deps/npm/node_modules/@sigstore/core/dist/x509/ext.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/x509/ext.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/x509/ext.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/x509/ext.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/x509/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/x509/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/x509/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/x509/index.js diff --git a/deps/npm/node_modules/@sigstore/core/dist/x509/sct.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/x509/sct.js similarity index 100% rename from deps/npm/node_modules/@sigstore/core/dist/x509/sct.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/dist/x509/sct.js diff --git a/deps/npm/node_modules/@sigstore/core/package.json b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/package.json similarity index 92% rename from deps/npm/node_modules/@sigstore/core/package.json rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/core/package.json index 621ff1715bcd1c..af5dd281ac90e4 100644 --- a/deps/npm/node_modules/@sigstore/core/package.json +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/core/package.json @@ -1,6 +1,6 @@ { "name": "@sigstore/core", - "version": "1.1.0", + "version": "2.0.0", "description": "Base library for Sigstore", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -26,6 +26,6 @@ "provenance": true }, "engines": { - "node": "^16.14.0 || >=18.0.0" + "node": "^18.17.0 || >=20.5.0" } } diff --git a/deps/npm/node_modules/@sigstore/sign/LICENSE b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/LICENSE similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/LICENSE rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/LICENSE diff --git a/deps/npm/node_modules/@sigstore/sign/dist/bundler/base.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/base.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/bundler/base.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/base.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/bundler/bundle.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/bundle.js similarity index 93% rename from deps/npm/node_modules/@sigstore/sign/dist/bundler/bundle.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/bundle.js index 7c2ca9164f0dfe..ed32286ad88efd 100644 --- a/deps/npm/node_modules/@sigstore/sign/dist/bundler/bundle.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/bundle.js @@ -23,7 +23,8 @@ var __importStar = (this && this.__importStar) || function (mod) { return result; }; Object.defineProperty(exports, "__esModule", { value: true }); -exports.toDSSEBundle = exports.toMessageSignatureBundle = void 0; +exports.toMessageSignatureBundle = toMessageSignatureBundle; +exports.toDSSEBundle = toDSSEBundle; /* Copyright 2023 The Sigstore Authors. @@ -44,7 +45,7 @@ const util_1 = require("../util"); // Helper functions for assembling the parts of a Sigstore bundle // Message signature bundle - $case: 'messageSignature' function toMessageSignatureBundle(artifact, signature) { - const digest = util_1.crypto.hash(artifact.data); + const digest = util_1.crypto.digest('sha256', artifact.data); return sigstore.toMessageSignatureBundle({ digest, signature: signature.signature, @@ -52,11 +53,11 @@ function toMessageSignatureBundle(artifact, signature) { ? util_1.pem.toDER(signature.key.certificate) : undefined, keyHint: signature.key.$case === 'publicKey' ? signature.key.hint : undefined, + certificateChain: true, }); } -exports.toMessageSignatureBundle = toMessageSignatureBundle; // DSSE envelope bundle - $case: 'dsseEnvelope' -function toDSSEBundle(artifact, signature, singleCertificate) { +function toDSSEBundle(artifact, signature, certificateChain) { return sigstore.toDSSEBundle({ artifact: artifact.data, artifactType: artifact.type, @@ -65,7 +66,6 @@ function toDSSEBundle(artifact, signature, singleCertificate) { ? util_1.pem.toDER(signature.key.certificate) : undefined, keyHint: signature.key.$case === 'publicKey' ? signature.key.hint : undefined, - singleCertificate, + certificateChain, }); } -exports.toDSSEBundle = toDSSEBundle; diff --git a/deps/npm/node_modules/@sigstore/sign/dist/bundler/dsse.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/dsse.js similarity index 93% rename from deps/npm/node_modules/@sigstore/sign/dist/bundler/dsse.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/dsse.js index 621700df93842a..86046ba8f3013b 100644 --- a/deps/npm/node_modules/@sigstore/sign/dist/bundler/dsse.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/dsse.js @@ -23,7 +23,7 @@ const bundle_1 = require("./bundle"); class DSSEBundleBuilder extends base_1.BaseBundleBuilder { constructor(options) { super(options); - this.singleCertificate = options.singleCertificate ?? false; + this.certificateChain = options.certificateChain ?? false; } // DSSE requires the artifact to be pre-encoded with the payload type // before the signature is generated. @@ -33,7 +33,7 @@ class DSSEBundleBuilder extends base_1.BaseBundleBuilder { } // Packages the artifact and signature into a DSSE bundle async package(artifact, signature) { - return (0, bundle_1.toDSSEBundle)(artifactDefaults(artifact), signature, this.singleCertificate); + return (0, bundle_1.toDSSEBundle)(artifactDefaults(artifact), signature, this.certificateChain); } } exports.DSSEBundleBuilder = DSSEBundleBuilder; diff --git a/deps/npm/node_modules/@sigstore/sign/dist/bundler/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/bundler/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/index.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/bundler/message.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/message.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/bundler/message.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/bundler/message.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/error.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/error.js similarity index 95% rename from deps/npm/node_modules/@sigstore/sign/dist/error.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/error.js index d57e4567fb89ee..d28f1913cc77e9 100644 --- a/deps/npm/node_modules/@sigstore/sign/dist/error.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/error.js @@ -15,7 +15,8 @@ See the License for the specific language governing permissions and limitations under the License. */ Object.defineProperty(exports, "__esModule", { value: true }); -exports.internalError = exports.InternalError = void 0; +exports.InternalError = void 0; +exports.internalError = internalError; const error_1 = require("./external/error"); class InternalError extends Error { constructor({ code, message, cause, }) { @@ -36,4 +37,3 @@ function internalError(err, code, message) { cause: err, }); } -exports.internalError = internalError; diff --git a/deps/npm/node_modules/@sigstore/sign/dist/external/error.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/error.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/external/error.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/error.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/external/fetch.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/fetch.js similarity index 95% rename from deps/npm/node_modules/@sigstore/sign/dist/external/fetch.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/fetch.js index b2d81bde7be16f..116090f3c641ef 100644 --- a/deps/npm/node_modules/@sigstore/sign/dist/external/fetch.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/fetch.js @@ -3,7 +3,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); -exports.fetchWithRetry = void 0; +exports.fetchWithRetry = fetchWithRetry; /* Copyright 2023 The Sigstore Authors. @@ -58,14 +58,13 @@ async function fetchWithRetry(url, options) { } }, retryOpts(options.retry)); } -exports.fetchWithRetry = fetchWithRetry; // Translate a Response into an HTTPError instance. This will attempt to parse // the response body for a message, but will default to the statusText if none // is found. const errorFromResponse = async (response) => { let message = response.statusText; - const location = response.headers?.get(HTTP2_HEADER_LOCATION) || undefined; - const contentType = response.headers?.get(HTTP2_HEADER_CONTENT_TYPE); + const location = response.headers.get(HTTP2_HEADER_LOCATION) || undefined; + const contentType = response.headers.get(HTTP2_HEADER_CONTENT_TYPE); // If response type is JSON, try to parse the body for a message if (contentType?.includes('application/json')) { try { diff --git a/deps/npm/node_modules/@sigstore/sign/dist/external/fulcio.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/fulcio.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/external/fulcio.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/fulcio.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/external/rekor.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/rekor.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/external/rekor.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/rekor.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/external/tsa.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/tsa.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/external/tsa.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/external/tsa.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/identity/ci.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/identity/ci.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/identity/ci.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/identity/ci.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/identity/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/identity/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/identity/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/identity/index.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/identity/provider.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/identity/provider.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/identity/provider.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/identity/provider.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/index.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/signer/fulcio/ca.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/fulcio/ca.js similarity index 96% rename from deps/npm/node_modules/@sigstore/sign/dist/signer/fulcio/ca.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/fulcio/ca.js index 81b421eabadb2e..f01703cfab5645 100644 --- a/deps/npm/node_modules/@sigstore/sign/dist/signer/fulcio/ca.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/fulcio/ca.js @@ -35,7 +35,6 @@ class CAClient { const cert = resp.signedCertificateEmbeddedSct ? resp.signedCertificateEmbeddedSct : resp.signedCertificateDetachedSct; - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion return cert.chain.certificates; } catch (err) { diff --git a/deps/npm/node_modules/@sigstore/sign/dist/signer/fulcio/ephemeral.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/fulcio/ephemeral.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/signer/fulcio/ephemeral.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/fulcio/ephemeral.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/signer/fulcio/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/fulcio/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/signer/fulcio/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/fulcio/index.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/signer/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/signer/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/index.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/signer/signer.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/signer.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/signer/signer.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/signer/signer.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/types/fetch.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/types/fetch.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/types/fetch.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/types/fetch.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/util/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/util/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/util/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/util/index.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/util/oidc.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/util/oidc.js similarity index 96% rename from deps/npm/node_modules/@sigstore/sign/dist/util/oidc.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/util/oidc.js index 2f5947d7b6b878..37c5b168ee12e6 100644 --- a/deps/npm/node_modules/@sigstore/sign/dist/util/oidc.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/util/oidc.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.extractJWTSubject = void 0; +exports.extractJWTSubject = extractJWTSubject; /* Copyright 2023 The Sigstore Authors. @@ -28,4 +28,3 @@ function extractJWTSubject(jwt) { return payload.sub; } } -exports.extractJWTSubject = extractJWTSubject; diff --git a/deps/npm/node_modules/@sigstore/sign/dist/util/ua.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/util/ua.js similarity index 95% rename from deps/npm/node_modules/@sigstore/sign/dist/util/ua.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/util/ua.js index c142330eb8338c..b15ff2070fb9fc 100644 --- a/deps/npm/node_modules/@sigstore/sign/dist/util/ua.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/util/ua.js @@ -23,7 +23,6 @@ const os_1 = __importDefault(require("os")); // Format User-Agent: / () // source: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/User-Agent const getUserAgent = () => { - // eslint-disable-next-line @typescript-eslint/no-var-requires const packageVersion = require('../../package.json').version; const nodeVersion = process.version; const platformName = os_1.default.platform(); diff --git a/deps/npm/node_modules/@sigstore/sign/dist/witness/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/witness/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/index.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/witness/tlog/client.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tlog/client.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/witness/tlog/client.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tlog/client.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/witness/tlog/entry.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tlog/entry.js similarity index 87% rename from deps/npm/node_modules/@sigstore/sign/dist/witness/tlog/entry.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tlog/entry.js index f6c165380ba45d..69a3b477e54429 100644 --- a/deps/npm/node_modules/@sigstore/sign/dist/witness/tlog/entry.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tlog/entry.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.toProposedEntry = void 0; +exports.toProposedEntry = toProposedEntry; /* Copyright 2023 The Sigstore Authors. @@ -18,21 +18,21 @@ limitations under the License. */ const bundle_1 = require("@sigstore/bundle"); const util_1 = require("../../util"); +const SHA256_ALGORITHM = 'sha256'; function toProposedEntry(content, publicKey, // TODO: Remove this parameter once have completely switched to 'dsse' entries -entryType = 'intoto') { +entryType = 'dsse') { switch (content.$case) { case 'dsseEnvelope': - // TODO: Remove this conditional once have completely switched to 'dsse' entries - if (entryType === 'dsse') { - return toProposedDSSEEntry(content.dsseEnvelope, publicKey); + // TODO: Remove this conditional once have completely ditched "intoto" entries + if (entryType === 'intoto') { + return toProposedIntotoEntry(content.dsseEnvelope, publicKey); } - return toProposedIntotoEntry(content.dsseEnvelope, publicKey); + return toProposedDSSEEntry(content.dsseEnvelope, publicKey); case 'messageSignature': return toProposedHashedRekordEntry(content.messageSignature, publicKey); } } -exports.toProposedEntry = toProposedEntry; // Returns a properly formatted Rekor "hashedrekord" entry for the given digest // and signature function toProposedHashedRekordEntry(messageSignature, publicKey) { @@ -45,7 +45,7 @@ function toProposedHashedRekordEntry(messageSignature, publicKey) { spec: { data: { hash: { - algorithm: 'sha256', + algorithm: SHA256_ALGORITHM, value: hexDigest, }, }, @@ -78,7 +78,9 @@ function toProposedDSSEEntry(envelope, publicKey) { // envelope and signature function toProposedIntotoEntry(envelope, publicKey) { // Calculate the value for the payloadHash field in the Rekor entry - const payloadHash = util_1.crypto.hash(envelope.payload).toString('hex'); + const payloadHash = util_1.crypto + .digest(SHA256_ALGORITHM, envelope.payload) + .toString('hex'); // Calculate the value for the hash field in the Rekor entry const envelopeHash = calculateDSSEHash(envelope, publicKey); // Collect values for re-creating the DSSE envelope. @@ -107,8 +109,8 @@ function toProposedIntotoEntry(envelope, publicKey) { spec: { content: { envelope: dsse, - hash: { algorithm: 'sha256', value: envelopeHash }, - payloadHash: { algorithm: 'sha256', value: payloadHash }, + hash: { algorithm: SHA256_ALGORITHM, value: envelopeHash }, + payloadHash: { algorithm: SHA256_ALGORITHM, value: payloadHash }, }, }, }; @@ -132,5 +134,7 @@ function calculateDSSEHash(envelope, publicKey) { if (envelope.signatures[0].keyid.length > 0) { dsse.signatures[0].keyid = envelope.signatures[0].keyid; } - return util_1.crypto.hash(util_1.json.canonicalize(dsse)).toString('hex'); + return util_1.crypto + .digest(SHA256_ALGORITHM, util_1.json.canonicalize(dsse)) + .toString('hex'); } diff --git a/deps/npm/node_modules/@sigstore/sign/dist/witness/tlog/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tlog/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/witness/tlog/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tlog/index.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/witness/tsa/client.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tsa/client.js similarity index 86% rename from deps/npm/node_modules/@sigstore/sign/dist/witness/tsa/client.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tsa/client.js index a334deb00b7756..754de3748dbb36 100644 --- a/deps/npm/node_modules/@sigstore/sign/dist/witness/tsa/client.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tsa/client.js @@ -19,6 +19,7 @@ limitations under the License. const error_1 = require("../../error"); const tsa_1 = require("../../external/tsa"); const util_1 = require("../../util"); +const SHA256_ALGORITHM = 'sha256'; class TSAClient { constructor(options) { this.tsa = new tsa_1.TimestampAuthority({ @@ -29,8 +30,10 @@ class TSAClient { } async createTimestamp(signature) { const request = { - artifactHash: util_1.crypto.hash(signature).toString('base64'), - hashAlgorithm: 'sha256', + artifactHash: util_1.crypto + .digest(SHA256_ALGORITHM, signature) + .toString('base64'), + hashAlgorithm: SHA256_ALGORITHM, }; try { return await this.tsa.createTimestamp(request); diff --git a/deps/npm/node_modules/@sigstore/sign/dist/witness/tsa/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tsa/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/witness/tsa/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/tsa/index.js diff --git a/deps/npm/node_modules/@sigstore/sign/dist/witness/witness.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/witness.js similarity index 100% rename from deps/npm/node_modules/@sigstore/sign/dist/witness/witness.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/dist/witness/witness.js diff --git a/deps/npm/node_modules/@sigstore/sign/package.json b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/package.json similarity index 78% rename from deps/npm/node_modules/@sigstore/sign/package.json rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/package.json index 4adb3d24c6fa68..fe05e8dc2d73ad 100644 --- a/deps/npm/node_modules/@sigstore/sign/package.json +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/sign/package.json @@ -1,6 +1,6 @@ { "name": "@sigstore/sign", - "version": "2.3.2", + "version": "3.0.0", "description": "Sigstore signing library", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -27,20 +27,20 @@ }, "devDependencies": { "@sigstore/jest": "^0.0.0", - "@sigstore/mock": "^0.7.4", - "@sigstore/rekor-types": "^2.0.0", + "@sigstore/mock": "^0.8.0", + "@sigstore/rekor-types": "^3.0.0", "@types/make-fetch-happen": "^10.0.4", "@types/promise-retry": "^1.1.6" }, "dependencies": { - "@sigstore/bundle": "^2.3.2", - "@sigstore/core": "^1.0.0", + "@sigstore/bundle": "^3.0.0", + "@sigstore/core": "^2.0.0", "@sigstore/protobuf-specs": "^0.3.2", - "make-fetch-happen": "^13.0.1", - "proc-log": "^4.2.0", + "make-fetch-happen": "^14.0.1", + "proc-log": "^5.0.0", "promise-retry": "^2.0.1" }, "engines": { - "node": "^16.14.0 || >=18.0.0" + "node": "^18.17.0 || >=20.5.0" } } diff --git a/deps/npm/node_modules/@sigstore/verify/dist/bundle/dsse.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/bundle/dsse.js similarity index 93% rename from deps/npm/node_modules/@sigstore/verify/dist/bundle/dsse.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/bundle/dsse.js index 193f875fd1014e..1033fc422aba09 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/bundle/dsse.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/bundle/dsse.js @@ -22,7 +22,7 @@ class DSSESignatureContent { this.env = env; } compareDigest(digest) { - return core_1.crypto.bufferEqual(digest, core_1.crypto.hash(this.env.payload)); + return core_1.crypto.bufferEqual(digest, core_1.crypto.digest('sha256', this.env.payload)); } compareSignature(signature) { return core_1.crypto.bufferEqual(signature, this.signature); diff --git a/deps/npm/node_modules/@sigstore/verify/dist/bundle/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/bundle/index.js similarity index 97% rename from deps/npm/node_modules/@sigstore/verify/dist/bundle/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/bundle/index.js index 63f8d4c4998811..4287d8032b75f0 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/bundle/index.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/bundle/index.js @@ -1,6 +1,7 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.signatureContent = exports.toSignedEntity = void 0; +exports.toSignedEntity = toSignedEntity; +exports.signatureContent = signatureContent; const core_1 = require("@sigstore/core"); const dsse_1 = require("./dsse"); const message_1 = require("./message"); @@ -26,7 +27,6 @@ function toSignedEntity(bundle, artifact) { timestamps, }; } -exports.toSignedEntity = toSignedEntity; function signatureContent(bundle, artifact) { switch (bundle.content.$case) { case 'dsseEnvelope': @@ -35,7 +35,6 @@ function signatureContent(bundle, artifact) { return new message_1.MessageSignatureContent(bundle.content.messageSignature, artifact); } } -exports.signatureContent = signatureContent; function key(bundle) { switch (bundle.verificationMaterial.content.$case) { case 'publicKey': diff --git a/deps/npm/node_modules/@sigstore/verify/dist/bundle/message.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/bundle/message.js similarity index 100% rename from deps/npm/node_modules/@sigstore/verify/dist/bundle/message.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/bundle/message.js diff --git a/deps/npm/node_modules/@sigstore/verify/dist/error.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/error.js similarity index 100% rename from deps/npm/node_modules/@sigstore/verify/dist/error.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/error.js diff --git a/deps/npm/node_modules/@sigstore/verify/dist/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/index.js similarity index 100% rename from deps/npm/node_modules/@sigstore/verify/dist/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/index.js diff --git a/deps/npm/node_modules/@sigstore/verify/dist/key/certificate.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/key/certificate.js similarity index 99% rename from deps/npm/node_modules/@sigstore/verify/dist/key/certificate.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/key/certificate.js index c9140dd98d58a6..a916de0e51e712 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/key/certificate.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/key/certificate.js @@ -1,6 +1,7 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.CertificateChainVerifier = exports.verifyCertificateChain = void 0; +exports.CertificateChainVerifier = void 0; +exports.verifyCertificateChain = verifyCertificateChain; const error_1 = require("../error"); const trust_1 = require("../trust"); function verifyCertificateChain(leaf, certificateAuthorities) { @@ -32,7 +33,6 @@ function verifyCertificateChain(leaf, certificateAuthorities) { cause: error, }); } -exports.verifyCertificateChain = verifyCertificateChain; class CertificateChainVerifier { constructor(opts) { this.untrustedCert = opts.untrustedCert; diff --git a/deps/npm/node_modules/@sigstore/verify/dist/key/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/key/index.js similarity index 97% rename from deps/npm/node_modules/@sigstore/verify/dist/key/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/key/index.js index 682a306803a991..cc894aab95a5d5 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/key/index.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/key/index.js @@ -1,6 +1,7 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyCertificate = exports.verifyPublicKey = void 0; +exports.verifyPublicKey = verifyPublicKey; +exports.verifyCertificate = verifyCertificate; /* Copyright 2023 The Sigstore Authors. @@ -34,7 +35,6 @@ function verifyPublicKey(hint, timestamps, trustMaterial) { }); return { key: key.publicKey }; } -exports.verifyPublicKey = verifyPublicKey; function verifyCertificate(leaf, timestamps, trustMaterial) { // Check that leaf certificate chains to a trusted CA const path = (0, certificate_1.verifyCertificateChain)(leaf, trustMaterial.certificateAuthorities); @@ -51,10 +51,10 @@ function verifyCertificate(leaf, timestamps, trustMaterial) { signer: getSigner(path[0]), }; } -exports.verifyCertificate = verifyCertificate; function getSigner(cert) { let issuer; const issuerExtension = cert.extension(OID_FULCIO_ISSUER_V2); + /* istanbul ignore next */ if (issuerExtension) { issuer = issuerExtension.valueObj.subs?.[0]?.value.toString('ascii'); } diff --git a/deps/npm/node_modules/@sigstore/verify/dist/key/sct.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/key/sct.js similarity index 97% rename from deps/npm/node_modules/@sigstore/verify/dist/key/sct.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/key/sct.js index aea412840e1039..8eca48738096ee 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/key/sct.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/key/sct.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifySCTs = void 0; +exports.verifySCTs = verifySCTs; /* Copyright 2023 The Sigstore Authors. @@ -52,7 +52,7 @@ function verifySCTs(cert, issuer, ctlogs) { // https://www.rfc-editor.org/rfc/rfc6962#section-3.2 const preCert = new core_1.ByteStream(); // Calculate hash of the issuer's public key - const issuerId = core_1.crypto.hash(issuer.publicKey); + const issuerId = core_1.crypto.digest('sha256', issuer.publicKey); preCert.appendView(issuerId); // Re-encodes the certificate to DER after removing the SCT extension const tbs = clone.tbsCertificate.toDER(); @@ -76,4 +76,3 @@ function verifySCTs(cert, issuer, ctlogs) { return sct.logID; }); } -exports.verifySCTs = verifySCTs; diff --git a/deps/npm/node_modules/@sigstore/verify/dist/policy.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/policy.js similarity index 93% rename from deps/npm/node_modules/@sigstore/verify/dist/policy.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/policy.js index 731e5c83328475..f5960cf047b84b 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/policy.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/policy.js @@ -1,6 +1,7 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyExtensions = exports.verifySubjectAlternativeName = void 0; +exports.verifySubjectAlternativeName = verifySubjectAlternativeName; +exports.verifyExtensions = verifyExtensions; const error_1 = require("./error"); function verifySubjectAlternativeName(policyIdentity, signerIdentity) { if (signerIdentity === undefined || !signerIdentity.match(policyIdentity)) { @@ -10,7 +11,6 @@ function verifySubjectAlternativeName(policyIdentity, signerIdentity) { }); } } -exports.verifySubjectAlternativeName = verifySubjectAlternativeName; function verifyExtensions(policyExtensions, signerExtensions = {}) { let key; for (key in policyExtensions) { @@ -22,4 +22,3 @@ function verifyExtensions(policyExtensions, signerExtensions = {}) { } } } -exports.verifyExtensions = verifyExtensions; diff --git a/deps/npm/node_modules/@sigstore/verify/dist/shared.types.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/shared.types.js similarity index 100% rename from deps/npm/node_modules/@sigstore/verify/dist/shared.types.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/shared.types.js diff --git a/deps/npm/node_modules/@sigstore/verify/dist/timestamp/checkpoint.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/checkpoint.js similarity index 99% rename from deps/npm/node_modules/@sigstore/verify/dist/timestamp/checkpoint.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/checkpoint.js index 04a87383f0fd17..46619b675f8863 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/timestamp/checkpoint.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/checkpoint.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyCheckpoint = void 0; +exports.verifyCheckpoint = verifyCheckpoint; /* Copyright 2023 The Sigstore Authors. @@ -61,7 +61,6 @@ function verifyCheckpoint(entry, tlogs) { }); } } -exports.verifyCheckpoint = verifyCheckpoint; // Verifies the signatures in the SignedNote. For each signature, the // corresponding transparency log is looked up by the key hint and the // signature is verified against the public key in the transparency log. diff --git a/deps/npm/node_modules/@sigstore/verify/dist/timestamp/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/index.js similarity index 96% rename from deps/npm/node_modules/@sigstore/verify/dist/timestamp/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/index.js index 0da554f648d25e..56e948de19338d 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/timestamp/index.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/index.js @@ -1,6 +1,7 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyTLogTimestamp = exports.verifyTSATimestamp = void 0; +exports.verifyTSATimestamp = verifyTSATimestamp; +exports.verifyTLogTimestamp = verifyTLogTimestamp; const error_1 = require("../error"); const checkpoint_1 = require("./checkpoint"); const merkle_1 = require("./merkle"); @@ -14,7 +15,6 @@ function verifyTSATimestamp(timestamp, data, timestampAuthorities) { timestamp: timestamp.signingTime, }; } -exports.verifyTSATimestamp = verifyTSATimestamp; function verifyTLogTimestamp(entry, tlogAuthorities) { let inclusionVerified = false; if (isTLogEntryWithInclusionPromise(entry)) { @@ -38,7 +38,6 @@ function verifyTLogTimestamp(entry, tlogAuthorities) { timestamp: new Date(Number(entry.integratedTime) * 1000), }; } -exports.verifyTLogTimestamp = verifyTLogTimestamp; function isTLogEntryWithInclusionPromise(entry) { return entry.inclusionPromise !== undefined; } diff --git a/deps/npm/node_modules/@sigstore/verify/dist/timestamp/merkle.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/merkle.js similarity index 95% rename from deps/npm/node_modules/@sigstore/verify/dist/timestamp/merkle.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/merkle.js index 9895d01b7abc03..f57cae42002bd0 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/timestamp/merkle.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/merkle.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyMerkleInclusion = void 0; +exports.verifyMerkleInclusion = verifyMerkleInclusion; /* Copyright 2023 The Sigstore Authors. @@ -53,7 +53,6 @@ function verifyMerkleInclusion(entry) { }); } } -exports.verifyMerkleInclusion = verifyMerkleInclusion; // Breaks down inclusion proof for a leaf at the specified index in a tree of // the specified size. The split point is where paths to the index leaf and // the (size - 1) leaf diverge. Returns lengths of the bottom and upper proof @@ -98,8 +97,8 @@ function bitLength(n) { // Hashing logic according to RFC6962. // https://datatracker.ietf.org/doc/html/rfc6962#section-2 function hashChildren(left, right) { - return core_1.crypto.hash(RFC6962_NODE_HASH_PREFIX, left, right); + return core_1.crypto.digest('sha256', RFC6962_NODE_HASH_PREFIX, left, right); } function hashLeaf(leaf) { - return core_1.crypto.hash(RFC6962_LEAF_HASH_PREFIX, leaf); + return core_1.crypto.digest('sha256', RFC6962_LEAF_HASH_PREFIX, leaf); } diff --git a/deps/npm/node_modules/@sigstore/verify/dist/timestamp/set.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/set.js similarity index 98% rename from deps/npm/node_modules/@sigstore/verify/dist/timestamp/set.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/set.js index a6357c06999cba..5d3f47bb88746a 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/timestamp/set.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/set.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyTLogSET = void 0; +exports.verifyTLogSET = verifyTLogSET; /* Copyright 2023 The Sigstore Authors. @@ -46,7 +46,6 @@ function verifyTLogSET(entry, tlogs) { }); } } -exports.verifyTLogSET = verifyTLogSET; // Returns a properly formatted "VerificationPayload" for one of the // transaction log entires in the given bundle which can be used for SET // verification. diff --git a/deps/npm/node_modules/@sigstore/verify/dist/timestamp/tsa.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/tsa.js similarity index 98% rename from deps/npm/node_modules/@sigstore/verify/dist/timestamp/tsa.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/tsa.js index 7b095bc3a7f908..70388cd06c52d6 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/timestamp/tsa.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/timestamp/tsa.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyRFC3161Timestamp = void 0; +exports.verifyRFC3161Timestamp = verifyRFC3161Timestamp; const core_1 = require("@sigstore/core"); const error_1 = require("../error"); const certificate_1 = require("../key/certificate"); @@ -35,7 +35,6 @@ function verifyRFC3161Timestamp(timestamp, data, timestampAuthorities) { }); } } -exports.verifyRFC3161Timestamp = verifyRFC3161Timestamp; function verifyTimestampForCA(timestamp, data, ca) { const [leaf, ...cas] = ca.certChain; const signingKey = core_1.crypto.createPublicKey(leaf.publicKey); diff --git a/deps/npm/node_modules/@sigstore/verify/dist/tlog/dsse.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/dsse.js similarity index 98% rename from deps/npm/node_modules/@sigstore/verify/dist/tlog/dsse.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/dsse.js index bf430e61dde563..d71ed8c6e7ad9a 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/tlog/dsse.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/dsse.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyDSSETLogBody = void 0; +exports.verifyDSSETLogBody = verifyDSSETLogBody; /* Copyright 2023 The Sigstore Authors. @@ -29,7 +29,6 @@ function verifyDSSETLogBody(tlogEntry, content) { }); } } -exports.verifyDSSETLogBody = verifyDSSETLogBody; // Compare the given dsse v0.0.1 tlog entry to the given DSSE envelope. function verifyDSSE001TLogBody(tlogEntry, content) { // Ensure the bundle's DSSE only contains a single signature diff --git a/deps/npm/node_modules/@sigstore/verify/dist/tlog/hashedrekord.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/hashedrekord.js similarity index 97% rename from deps/npm/node_modules/@sigstore/verify/dist/tlog/hashedrekord.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/hashedrekord.js index d1758858f030d8..c4aa345b57ba7a 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/tlog/hashedrekord.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/hashedrekord.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyHashedRekordTLogBody = void 0; +exports.verifyHashedRekordTLogBody = verifyHashedRekordTLogBody; /* Copyright 2023 The Sigstore Authors. @@ -29,7 +29,6 @@ function verifyHashedRekordTLogBody(tlogEntry, content) { }); } } -exports.verifyHashedRekordTLogBody = verifyHashedRekordTLogBody; // Compare the given hashedrekord v0.0.1 tlog entry to the given message // signature function verifyHashedrekord001TLogBody(tlogEntry, content) { diff --git a/deps/npm/node_modules/@sigstore/verify/dist/tlog/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/index.js similarity index 98% rename from deps/npm/node_modules/@sigstore/verify/dist/tlog/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/index.js index adfc70ed51ad05..da235360c594a8 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/tlog/index.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/index.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyTLogBody = void 0; +exports.verifyTLogBody = verifyTLogBody; /* Copyright 2023 The Sigstore Authors. @@ -45,4 +45,3 @@ function verifyTLogBody(entry, sigContent) { }); } } -exports.verifyTLogBody = verifyTLogBody; diff --git a/deps/npm/node_modules/@sigstore/verify/dist/tlog/intoto.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/intoto.js similarity index 98% rename from deps/npm/node_modules/@sigstore/verify/dist/tlog/intoto.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/intoto.js index 74c7f50d763e1d..9096ae9418cc30 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/tlog/intoto.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/tlog/intoto.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.verifyIntotoTLogBody = void 0; +exports.verifyIntotoTLogBody = verifyIntotoTLogBody; /* Copyright 2023 The Sigstore Authors. @@ -29,7 +29,6 @@ function verifyIntotoTLogBody(tlogEntry, content) { }); } } -exports.verifyIntotoTLogBody = verifyIntotoTLogBody; // Compare the given intoto v0.0.2 tlog entry to the given DSSE envelope. function verifyIntoto002TLogBody(tlogEntry, content) { // Ensure the bundle's DSSE contains a single signature diff --git a/deps/npm/node_modules/@sigstore/verify/dist/trust/filter.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/trust/filter.js similarity index 93% rename from deps/npm/node_modules/@sigstore/verify/dist/trust/filter.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/trust/filter.js index c09d055913c4c7..880a16cf1940ea 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/trust/filter.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/trust/filter.js @@ -1,12 +1,12 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.filterTLogAuthorities = exports.filterCertAuthorities = void 0; +exports.filterCertAuthorities = filterCertAuthorities; +exports.filterTLogAuthorities = filterTLogAuthorities; function filterCertAuthorities(certAuthorities, criteria) { return certAuthorities.filter((ca) => { return (ca.validFor.start <= criteria.start && ca.validFor.end >= criteria.end); }); } -exports.filterCertAuthorities = filterCertAuthorities; // Filter the list of tlog instances to only those which match the given log // ID and have public keys which are valid for the given integrated time. function filterTLogAuthorities(tlogAuthorities, criteria) { @@ -21,4 +21,3 @@ function filterTLogAuthorities(tlogAuthorities, criteria) { criteria.targetDate <= tlog.validFor.end); }); } -exports.filterTLogAuthorities = filterTLogAuthorities; diff --git a/deps/npm/node_modules/@sigstore/verify/dist/trust/index.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/trust/index.js similarity index 95% rename from deps/npm/node_modules/@sigstore/verify/dist/trust/index.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/trust/index.js index 954de558415902..bfab2eb4f9975a 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/trust/index.js +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/trust/index.js @@ -1,6 +1,7 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.toTrustMaterial = exports.filterTLogAuthorities = exports.filterCertAuthorities = void 0; +exports.filterTLogAuthorities = exports.filterCertAuthorities = void 0; +exports.toTrustMaterial = toTrustMaterial; /* Copyright 2023 The Sigstore Authors. @@ -34,7 +35,6 @@ function toTrustMaterial(root, keys) { publicKey: keyFinder, }; } -exports.toTrustMaterial = toTrustMaterial; function createTLogAuthority(tlogInstance) { const keyDetails = tlogInstance.publicKey.keyDetails; const keyType = keyDetails === protobuf_specs_1.PublicKeyDetails.PKCS1_RSA_PKCS1V5 || @@ -54,6 +54,7 @@ function createTLogAuthority(tlogInstance) { }; } function createCertAuthority(ca) { + /* istanbul ignore next */ return { certChain: ca.certChain.certificates.map((cert) => { return core_1.X509Certificate.parse(cert.rawBytes); @@ -76,6 +77,7 @@ function keyLocator(keys) { return { publicKey: core_1.crypto.createPublicKey(key.rawBytes), validFor: (date) => { + /* istanbul ignore next */ return ((key.validFor?.start || BEGINNING_OF_TIME) <= date && (key.validFor?.end || END_OF_TIME) >= date); }, diff --git a/deps/npm/node_modules/@sigstore/verify/dist/trust/trust.types.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/trust/trust.types.js similarity index 100% rename from deps/npm/node_modules/@sigstore/verify/dist/trust/trust.types.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/trust/trust.types.js diff --git a/deps/npm/node_modules/@sigstore/verify/dist/verifier.js b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/verifier.js similarity index 100% rename from deps/npm/node_modules/@sigstore/verify/dist/verifier.js rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/dist/verifier.js diff --git a/deps/npm/node_modules/@sigstore/verify/package.json b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/package.json similarity index 86% rename from deps/npm/node_modules/@sigstore/verify/package.json rename to deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/package.json index cd0c845a797e47..edf72b8bfd9680 100644 --- a/deps/npm/node_modules/@sigstore/verify/package.json +++ b/deps/npm/node_modules/sigstore/node_modules/@sigstore/verify/package.json @@ -1,6 +1,6 @@ { "name": "@sigstore/verify", - "version": "1.2.1", + "version": "2.0.0", "description": "Verification of Sigstore signatures", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -27,10 +27,10 @@ }, "dependencies": { "@sigstore/protobuf-specs": "^0.3.2", - "@sigstore/bundle": "^2.3.2", - "@sigstore/core": "^1.1.0" + "@sigstore/bundle": "^3.0.0", + "@sigstore/core": "^2.0.0" }, "engines": { - "node": "^16.14.0 || >=18.0.0" + "node": "^18.17.0 || >=20.5.0" } } diff --git a/deps/npm/node_modules/sigstore/package.json b/deps/npm/node_modules/sigstore/package.json index fa8744bf304a3f..0f798a263657b4 100644 --- a/deps/npm/node_modules/sigstore/package.json +++ b/deps/npm/node_modules/sigstore/package.json @@ -1,6 +1,6 @@ { "name": "sigstore", - "version": "2.3.1", + "version": "3.0.0", "description": "code-signing for npm packages", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -27,21 +27,21 @@ "provenance": true }, "devDependencies": { - "@sigstore/rekor-types": "^2.0.0", + "@sigstore/rekor-types": "^3.0.0", "@sigstore/jest": "^0.0.0", - "@sigstore/mock": "^0.7.4", - "@tufjs/repo-mock": "^2.0.1", + "@sigstore/mock": "^0.8.0", + "@tufjs/repo-mock": "^3.0.1", "@types/make-fetch-happen": "^10.0.4" }, "dependencies": { - "@sigstore/bundle": "^2.3.2", - "@sigstore/core": "^1.0.0", + "@sigstore/bundle": "^3.0.0", + "@sigstore/core": "^2.0.0", "@sigstore/protobuf-specs": "^0.3.2", - "@sigstore/sign": "^2.3.2", - "@sigstore/tuf": "^2.3.4", - "@sigstore/verify": "^1.2.1" + "@sigstore/sign": "^3.0.0", + "@sigstore/tuf": "^3.0.0", + "@sigstore/verify": "^2.0.0" }, "engines": { - "node": "^16.14.0 || >=18.0.0" + "node": "^18.17.0 || >=20.5.0" } } diff --git a/deps/npm/node_modules/spdx-license-ids/deprecated.json b/deps/npm/node_modules/spdx-license-ids/deprecated.json index 278531e40c613d..4f70a14c7469da 100644 --- a/deps/npm/node_modules/spdx-license-ids/deprecated.json +++ b/deps/npm/node_modules/spdx-license-ids/deprecated.json @@ -19,6 +19,7 @@ "LGPL-2.0", "LGPL-2.1", "LGPL-3.0", + "Net-SNMP", "Nunit", "StandardML-NJ", "bzip2-1.0.5", diff --git a/deps/npm/node_modules/spdx-license-ids/index.json b/deps/npm/node_modules/spdx-license-ids/index.json index c7686a710d61d1..f43d5016bd95ab 100644 --- a/deps/npm/node_modules/spdx-license-ids/index.json +++ b/deps/npm/node_modules/spdx-license-ids/index.json @@ -197,6 +197,8 @@ "DRL-1.0", "DRL-1.1", "DSDP", + "DocBook-Schema", + "DocBook-XML", "Dotseqn", "ECL-1.0", "ECL-2.0", @@ -260,6 +262,7 @@ "Glulxe", "Graphics-Gems", "Gutmann", + "HIDAPI", "HP-1986", "HP-1989", "HPND", @@ -270,6 +273,7 @@ "HPND-Kevlin-Henney", "HPND-MIT-disclaimer", "HPND-Markus-Kuhn", + "HPND-Netrek", "HPND-Pbmplus", "HPND-UC", "HPND-UC-export-US", @@ -403,7 +407,6 @@ "NTP", "NTP-0", "Naumen", - "Net-SNMP", "NetCDF", "Newsletr", "Nokia", @@ -485,6 +488,7 @@ "RSCPL", "Rdisc", "Ruby", + "Ruby-pty", "SAX-PD", "SAX-PD-2.0", "SCEA", @@ -541,6 +545,7 @@ "UMich-Merit", "UPL-1.0", "URT-RLE", + "Ubuntu-font-1.0", "Unicode-3.0", "Unicode-DFS-2015", "Unicode-DFS-2016", @@ -559,6 +564,7 @@ "Wsuipa", "X11", "X11-distribute-modifications-variant", + "X11-swapped", "XFree86-1.1", "XSkat", "Xdebug-1.03", diff --git a/deps/npm/node_modules/spdx-license-ids/package.json b/deps/npm/node_modules/spdx-license-ids/package.json index 5f5ed9554f2579..7ab34aab6b8b1d 100644 --- a/deps/npm/node_modules/spdx-license-ids/package.json +++ b/deps/npm/node_modules/spdx-license-ids/package.json @@ -1,6 +1,6 @@ { "name": "spdx-license-ids", - "version": "3.0.18", + "version": "3.0.20", "description": "A list of SPDX license identifiers", "repository": "jslicense/spdx-license-ids", "author": "Shinnosuke Watanabe (https://github.com/shinnn)", diff --git a/deps/npm/node_modules/tuf-js/dist/config.js b/deps/npm/node_modules/tuf-js/dist/config.js index 6845679942fec5..c66d76af86b98c 100644 --- a/deps/npm/node_modules/tuf-js/dist/config.js +++ b/deps/npm/node_modules/tuf-js/dist/config.js @@ -2,7 +2,7 @@ Object.defineProperty(exports, "__esModule", { value: true }); exports.defaultConfig = void 0; exports.defaultConfig = { - maxRootRotations: 32, + maxRootRotations: 256, maxDelegations: 32, rootMaxLength: 512000, //bytes timestampMaxLength: 16384, // bytes diff --git a/deps/npm/node_modules/tuf-js/dist/updater.js b/deps/npm/node_modules/tuf-js/dist/updater.js index 5317f7e14659ac..8d5eb4428f044a 100644 --- a/deps/npm/node_modules/tuf-js/dist/updater.js +++ b/deps/npm/node_modules/tuf-js/dist/updater.js @@ -144,7 +144,7 @@ class Updater { const rootVersion = this.trustedSet.root.signed.version; const lowerBound = rootVersion + 1; const upperBound = lowerBound + this.config.maxRootRotations; - for (let version = lowerBound; version <= upperBound; version++) { + for (let version = lowerBound; version < upperBound; version++) { const rootUrl = url.join(this.metadataBaseUrl, `${version}.root.json`); try { // Client workflow 5.3.3: download new root metadata file @@ -155,7 +155,13 @@ class Updater { this.persistMetadata(models_1.MetadataKind.Root, bytesData); } catch (error) { - break; + if (error instanceof error_1.DownloadHTTPError) { + // 404/403 means current root is newest available + if ([403, 404].includes(error.statusCode)) { + break; + } + } + throw error; } } } @@ -247,7 +253,8 @@ class Updater { const version = this.trustedSet.root.signed.consistentSnapshot ? metaInfo.version : undefined; - const metadataUrl = url.join(this.metadataBaseUrl, version ? `${version}.${role}.json` : `${role}.json`); + const encodedRole = encodeURIComponent(role); + const metadataUrl = url.join(this.metadataBaseUrl, version ? `${version}.${encodedRole}.json` : `${encodedRole}.json`); try { // Client workflow 5.6.1: download targets metadata file const bytesData = await this.fetcher.downloadBytes(metadataUrl, maxLength); @@ -280,7 +287,6 @@ class Updater { while (visitedRoleNames.size <= this.config.maxDelegations && delegationsToVisit.length > 0) { // Pop the role name from the top of the stack. - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const { roleName, parentRoleName } = delegationsToVisit.pop(); // Skip any visited current role to prevent cycles. // Client workflow 5.6.7.1: skip already-visited roles @@ -330,13 +336,14 @@ class Updater { return path.join(this.targetDir, filePath); } persistMetadata(metaDataName, bytesData) { + const encodedName = encodeURIComponent(metaDataName); try { - const filePath = path.join(this.dir, `${metaDataName}.json`); + const filePath = path.join(this.dir, `${encodedName}.json`); log('WRITE %s', filePath); fs.writeFileSync(filePath, bytesData.toString('utf8')); } catch (error) { - throw new error_1.PersistError(`Failed to persist metadata ${metaDataName} error: ${error}`); + throw new error_1.PersistError(`Failed to persist metadata ${encodedName} error: ${error}`); } } } diff --git a/deps/npm/node_modules/tuf-js/dist/utils/url.js b/deps/npm/node_modules/tuf-js/dist/utils/url.js index ce67fe2c230535..359d1f3ef385b7 100644 --- a/deps/npm/node_modules/tuf-js/dist/utils/url.js +++ b/deps/npm/node_modules/tuf-js/dist/utils/url.js @@ -1,11 +1,10 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.join = void 0; +exports.join = join; const url_1 = require("url"); function join(base, path) { return new url_1.URL(ensureTrailingSlash(base) + removeLeadingSlash(path)).toString(); } -exports.join = join; function ensureTrailingSlash(path) { return path.endsWith('/') ? path : path + '/'; } diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/agents.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/agents.js deleted file mode 100644 index c541b93001517e..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/agents.js +++ /dev/null @@ -1,206 +0,0 @@ -'use strict' - -const net = require('net') -const tls = require('tls') -const { once } = require('events') -const timers = require('timers/promises') -const { normalizeOptions, cacheOptions } = require('./options') -const { getProxy, getProxyAgent, proxyCache } = require('./proxy.js') -const Errors = require('./errors.js') -const { Agent: AgentBase } = require('agent-base') - -module.exports = class Agent extends AgentBase { - #options - #timeouts - #proxy - #noProxy - #ProxyAgent - - constructor (options = {}) { - const { timeouts, proxy, noProxy, ...normalizedOptions } = normalizeOptions(options) - - super(normalizedOptions) - - this.#options = normalizedOptions - this.#timeouts = timeouts - - if (proxy) { - this.#proxy = new URL(proxy) - this.#noProxy = noProxy - this.#ProxyAgent = getProxyAgent(proxy) - } - } - - get proxy () { - return this.#proxy ? { url: this.#proxy } : {} - } - - #getProxy (options) { - if (!this.#proxy) { - return - } - - const proxy = getProxy(`${options.protocol}//${options.host}:${options.port}`, { - proxy: this.#proxy, - noProxy: this.#noProxy, - }) - - if (!proxy) { - return - } - - const cacheKey = cacheOptions({ - ...options, - ...this.#options, - timeouts: this.#timeouts, - proxy, - }) - - if (proxyCache.has(cacheKey)) { - return proxyCache.get(cacheKey) - } - - let ProxyAgent = this.#ProxyAgent - if (Array.isArray(ProxyAgent)) { - ProxyAgent = this.isSecureEndpoint(options) ? ProxyAgent[1] : ProxyAgent[0] - } - - const proxyAgent = new ProxyAgent(proxy, { - ...this.#options, - socketOptions: { family: this.#options.family }, - }) - proxyCache.set(cacheKey, proxyAgent) - - return proxyAgent - } - - // takes an array of promises and races them against the connection timeout - // which will throw the necessary error if it is hit. This will return the - // result of the promise race. - async #timeoutConnection ({ promises, options, timeout }, ac = new AbortController()) { - if (timeout) { - const connectionTimeout = timers.setTimeout(timeout, null, { signal: ac.signal }) - .then(() => { - throw new Errors.ConnectionTimeoutError(`${options.host}:${options.port}`) - }).catch((err) => { - if (err.name === 'AbortError') { - return - } - throw err - }) - promises.push(connectionTimeout) - } - - let result - try { - result = await Promise.race(promises) - ac.abort() - } catch (err) { - ac.abort() - throw err - } - return result - } - - async connect (request, options) { - // if the connection does not have its own lookup function - // set, then use the one from our options - options.lookup ??= this.#options.lookup - - let socket - let timeout = this.#timeouts.connection - const isSecureEndpoint = this.isSecureEndpoint(options) - - const proxy = this.#getProxy(options) - if (proxy) { - // some of the proxies will wait for the socket to fully connect before - // returning so we have to await this while also racing it against the - // connection timeout. - const start = Date.now() - socket = await this.#timeoutConnection({ - options, - timeout, - promises: [proxy.connect(request, options)], - }) - // see how much time proxy.connect took and subtract it from - // the timeout - if (timeout) { - timeout = timeout - (Date.now() - start) - } - } else { - socket = (isSecureEndpoint ? tls : net).connect(options) - } - - socket.setKeepAlive(this.keepAlive, this.keepAliveMsecs) - socket.setNoDelay(this.keepAlive) - - const abortController = new AbortController() - const { signal } = abortController - - const connectPromise = socket[isSecureEndpoint ? 'secureConnecting' : 'connecting'] - ? once(socket, isSecureEndpoint ? 'secureConnect' : 'connect', { signal }) - : Promise.resolve() - - await this.#timeoutConnection({ - options, - timeout, - promises: [ - connectPromise, - once(socket, 'error', { signal }).then((err) => { - throw err[0] - }), - ], - }, abortController) - - if (this.#timeouts.idle) { - socket.setTimeout(this.#timeouts.idle, () => { - socket.destroy(new Errors.IdleTimeoutError(`${options.host}:${options.port}`)) - }) - } - - return socket - } - - addRequest (request, options) { - const proxy = this.#getProxy(options) - // it would be better to call proxy.addRequest here but this causes the - // http-proxy-agent to call its super.addRequest which causes the request - // to be added to the agent twice. since we only support 3 agents - // currently (see the required agents in proxy.js) we have manually - // checked that the only public methods we need to call are called in the - // next block. this could change in the future and presumably we would get - // failing tests until we have properly called the necessary methods on - // each of our proxy agents - if (proxy?.setRequestProps) { - proxy.setRequestProps(request, options) - } - - request.setHeader('connection', this.keepAlive ? 'keep-alive' : 'close') - - if (this.#timeouts.response) { - let responseTimeout - request.once('finish', () => { - setTimeout(() => { - request.destroy(new Errors.ResponseTimeoutError(request, this.#proxy)) - }, this.#timeouts.response) - }) - request.once('response', () => { - clearTimeout(responseTimeout) - }) - } - - if (this.#timeouts.transfer) { - let transferTimeout - request.once('response', (res) => { - setTimeout(() => { - res.destroy(new Errors.TransferTimeoutError(request, this.#proxy)) - }, this.#timeouts.transfer) - res.once('close', () => { - clearTimeout(transferTimeout) - }) - }) - } - - return super.addRequest(request, options) - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/dns.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/dns.js deleted file mode 100644 index 3c6946c566d736..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/dns.js +++ /dev/null @@ -1,53 +0,0 @@ -'use strict' - -const { LRUCache } = require('lru-cache') -const dns = require('dns') - -// this is a factory so that each request can have its own opts (i.e. ttl) -// while still sharing the cache across all requests -const cache = new LRUCache({ max: 50 }) - -const getOptions = ({ - family = 0, - hints = dns.ADDRCONFIG, - all = false, - verbatim = undefined, - ttl = 5 * 60 * 1000, - lookup = dns.lookup, -}) => ({ - // hints and lookup are returned since both are top level properties to (net|tls).connect - hints, - lookup: (hostname, ...args) => { - const callback = args.pop() // callback is always last arg - const lookupOptions = args[0] ?? {} - - const options = { - family, - hints, - all, - verbatim, - ...(typeof lookupOptions === 'number' ? { family: lookupOptions } : lookupOptions), - } - - const key = JSON.stringify({ hostname, ...options }) - - if (cache.has(key)) { - const cached = cache.get(key) - return process.nextTick(callback, null, ...cached) - } - - lookup(hostname, options, (err, ...result) => { - if (err) { - return callback(err) - } - - cache.set(key, result, { ttl }) - return callback(null, ...result) - }) - }, -}) - -module.exports = { - cache, - getOptions, -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/errors.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/errors.js deleted file mode 100644 index 70475aec8eb357..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/errors.js +++ /dev/null @@ -1,61 +0,0 @@ -'use strict' - -class InvalidProxyProtocolError extends Error { - constructor (url) { - super(`Invalid protocol \`${url.protocol}\` connecting to proxy \`${url.host}\``) - this.code = 'EINVALIDPROXY' - this.proxy = url - } -} - -class ConnectionTimeoutError extends Error { - constructor (host) { - super(`Timeout connecting to host \`${host}\``) - this.code = 'ECONNECTIONTIMEOUT' - this.host = host - } -} - -class IdleTimeoutError extends Error { - constructor (host) { - super(`Idle timeout reached for host \`${host}\``) - this.code = 'EIDLETIMEOUT' - this.host = host - } -} - -class ResponseTimeoutError extends Error { - constructor (request, proxy) { - let msg = 'Response timeout ' - if (proxy) { - msg += `from proxy \`${proxy.host}\` ` - } - msg += `connecting to host \`${request.host}\`` - super(msg) - this.code = 'ERESPONSETIMEOUT' - this.proxy = proxy - this.request = request - } -} - -class TransferTimeoutError extends Error { - constructor (request, proxy) { - let msg = 'Transfer timeout ' - if (proxy) { - msg += `from proxy \`${proxy.host}\` ` - } - msg += `for \`${request.host}\`` - super(msg) - this.code = 'ETRANSFERTIMEOUT' - this.proxy = proxy - this.request = request - } -} - -module.exports = { - InvalidProxyProtocolError, - ConnectionTimeoutError, - IdleTimeoutError, - ResponseTimeoutError, - TransferTimeoutError, -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/index.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/index.js deleted file mode 100644 index b33d6eaef07a21..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/index.js +++ /dev/null @@ -1,56 +0,0 @@ -'use strict' - -const { LRUCache } = require('lru-cache') -const { normalizeOptions, cacheOptions } = require('./options') -const { getProxy, proxyCache } = require('./proxy.js') -const dns = require('./dns.js') -const Agent = require('./agents.js') - -const agentCache = new LRUCache({ max: 20 }) - -const getAgent = (url, { agent, proxy, noProxy, ...options } = {}) => { - // false has meaning so this can't be a simple truthiness check - if (agent != null) { - return agent - } - - url = new URL(url) - - const proxyForUrl = getProxy(url, { proxy, noProxy }) - const normalizedOptions = { - ...normalizeOptions(options), - proxy: proxyForUrl, - } - - const cacheKey = cacheOptions({ - ...normalizedOptions, - secureEndpoint: url.protocol === 'https:', - }) - - if (agentCache.has(cacheKey)) { - return agentCache.get(cacheKey) - } - - const newAgent = new Agent(normalizedOptions) - agentCache.set(cacheKey, newAgent) - - return newAgent -} - -module.exports = { - getAgent, - Agent, - // these are exported for backwards compatability - HttpAgent: Agent, - HttpsAgent: Agent, - cache: { - proxy: proxyCache, - agent: agentCache, - dns: dns.cache, - clear: () => { - proxyCache.clear() - agentCache.clear() - dns.cache.clear() - }, - }, -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/options.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/options.js deleted file mode 100644 index 0bf53f725f0846..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/options.js +++ /dev/null @@ -1,86 +0,0 @@ -'use strict' - -const dns = require('./dns') - -const normalizeOptions = (opts) => { - const family = parseInt(opts.family ?? '0', 10) - const keepAlive = opts.keepAlive ?? true - - const normalized = { - // nodejs http agent options. these are all the defaults - // but kept here to increase the likelihood of cache hits - // https://nodejs.org/api/http.html#new-agentoptions - keepAliveMsecs: keepAlive ? 1000 : undefined, - maxSockets: opts.maxSockets ?? 15, - maxTotalSockets: Infinity, - maxFreeSockets: keepAlive ? 256 : undefined, - scheduling: 'fifo', - // then spread the rest of the options - ...opts, - // we already set these to their defaults that we want - family, - keepAlive, - // our custom timeout options - timeouts: { - // the standard timeout option is mapped to our idle timeout - // and then deleted below - idle: opts.timeout ?? 0, - connection: 0, - response: 0, - transfer: 0, - ...opts.timeouts, - }, - // get the dns options that go at the top level of socket connection - ...dns.getOptions({ family, ...opts.dns }), - } - - // remove timeout since we already used it to set our own idle timeout - delete normalized.timeout - - return normalized -} - -const createKey = (obj) => { - let key = '' - const sorted = Object.entries(obj).sort((a, b) => a[0] - b[0]) - for (let [k, v] of sorted) { - if (v == null) { - v = 'null' - } else if (v instanceof URL) { - v = v.toString() - } else if (typeof v === 'object') { - v = createKey(v) - } - key += `${k}:${v}:` - } - return key -} - -const cacheOptions = ({ secureEndpoint, ...options }) => createKey({ - secureEndpoint: !!secureEndpoint, - // socket connect options - family: options.family, - hints: options.hints, - localAddress: options.localAddress, - // tls specific connect options - strictSsl: secureEndpoint ? !!options.rejectUnauthorized : false, - ca: secureEndpoint ? options.ca : null, - cert: secureEndpoint ? options.cert : null, - key: secureEndpoint ? options.key : null, - // http agent options - keepAlive: options.keepAlive, - keepAliveMsecs: options.keepAliveMsecs, - maxSockets: options.maxSockets, - maxTotalSockets: options.maxTotalSockets, - maxFreeSockets: options.maxFreeSockets, - scheduling: options.scheduling, - // timeout options - timeouts: options.timeouts, - // proxy - proxy: options.proxy, -}) - -module.exports = { - normalizeOptions, - cacheOptions, -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/proxy.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/proxy.js deleted file mode 100644 index 6272e929e57bcf..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/lib/proxy.js +++ /dev/null @@ -1,88 +0,0 @@ -'use strict' - -const { HttpProxyAgent } = require('http-proxy-agent') -const { HttpsProxyAgent } = require('https-proxy-agent') -const { SocksProxyAgent } = require('socks-proxy-agent') -const { LRUCache } = require('lru-cache') -const { InvalidProxyProtocolError } = require('./errors.js') - -const PROXY_CACHE = new LRUCache({ max: 20 }) - -const SOCKS_PROTOCOLS = new Set(SocksProxyAgent.protocols) - -const PROXY_ENV_KEYS = new Set(['https_proxy', 'http_proxy', 'proxy', 'no_proxy']) - -const PROXY_ENV = Object.entries(process.env).reduce((acc, [key, value]) => { - key = key.toLowerCase() - if (PROXY_ENV_KEYS.has(key)) { - acc[key] = value - } - return acc -}, {}) - -const getProxyAgent = (url) => { - url = new URL(url) - - const protocol = url.protocol.slice(0, -1) - if (SOCKS_PROTOCOLS.has(protocol)) { - return SocksProxyAgent - } - if (protocol === 'https' || protocol === 'http') { - return [HttpProxyAgent, HttpsProxyAgent] - } - - throw new InvalidProxyProtocolError(url) -} - -const isNoProxy = (url, noProxy) => { - if (typeof noProxy === 'string') { - noProxy = noProxy.split(',').map((p) => p.trim()).filter(Boolean) - } - - if (!noProxy || !noProxy.length) { - return false - } - - const hostSegments = url.hostname.split('.').reverse() - - return noProxy.some((no) => { - const noSegments = no.split('.').filter(Boolean).reverse() - if (!noSegments.length) { - return false - } - - for (let i = 0; i < noSegments.length; i++) { - if (hostSegments[i] !== noSegments[i]) { - return false - } - } - - return true - }) -} - -const getProxy = (url, { proxy, noProxy }) => { - url = new URL(url) - - if (!proxy) { - proxy = url.protocol === 'https:' - ? PROXY_ENV.https_proxy - : PROXY_ENV.https_proxy || PROXY_ENV.http_proxy || PROXY_ENV.proxy - } - - if (!noProxy) { - noProxy = PROXY_ENV.no_proxy - } - - if (!proxy || isNoProxy(url, noProxy)) { - return null - } - - return new URL(proxy) -} - -module.exports = { - getProxyAgent, - getProxy, - proxyCache: PROXY_CACHE, -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/package.json b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/package.json deleted file mode 100644 index ef5b4e3228cc46..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/agent/package.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "name": "@npmcli/agent", - "version": "2.2.2", - "description": "the http/https agent used by the npm cli", - "main": "lib/index.js", - "scripts": { - "gencerts": "bash scripts/create-cert.sh", - "test": "tap", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap", - "posttest": "npm run lint" - }, - "author": "GitHub Inc.", - "license": "ISC", - "bugs": { - "url": "https://github.com/npm/agent/issues" - }, - "homepage": "https://github.com/npm/agent#readme", - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^16.14.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.21.3", - "publish": "true" - }, - "dependencies": { - "agent-base": "^7.1.0", - "http-proxy-agent": "^7.0.0", - "https-proxy-agent": "^7.0.1", - "lru-cache": "^10.0.1", - "socks-proxy-agent": "^8.0.3" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.21.3", - "minipass-fetch": "^3.0.3", - "nock": "^13.2.7", - "semver": "^7.5.4", - "simple-socks": "^3.1.0", - "tap": "^16.3.0" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/agent.git" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/LICENSE.md b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/LICENSE.md deleted file mode 100644 index 5fc208ff122e08..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ - - -ISC License - -Copyright npm, Inc. - -Permission to use, copy, modify, and/or distribute this -software for any purpose with or without fee is hereby -granted, provided that the above copyright notice and this -permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND NPM DISCLAIMS ALL -WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO -EVENT SHALL NPM BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/common/get-options.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/common/get-options.js deleted file mode 100644 index cb5982f79077ac..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/common/get-options.js +++ /dev/null @@ -1,20 +0,0 @@ -// given an input that may or may not be an object, return an object that has -// a copy of every defined property listed in 'copy'. if the input is not an -// object, assign it to the property named by 'wrap' -const getOptions = (input, { copy, wrap }) => { - const result = {} - - if (input && typeof input === 'object') { - for (const prop of copy) { - if (input[prop] !== undefined) { - result[prop] = input[prop] - } - } - } else { - result[wrap] = input - } - - return result -} - -module.exports = getOptions diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/common/node.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/common/node.js deleted file mode 100644 index 4d13bc037359d7..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/common/node.js +++ /dev/null @@ -1,9 +0,0 @@ -const semver = require('semver') - -const satisfies = (range) => { - return semver.satisfies(process.version, range, { includePrerelease: true }) -} - -module.exports = { - satisfies, -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/LICENSE b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/LICENSE deleted file mode 100644 index 93546dfb7655bf..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -(The MIT License) - -Copyright (c) 2011-2017 JP Richardson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files -(the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, - merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS -OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/errors.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/errors.js deleted file mode 100644 index 1cd1e05d0c533d..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/errors.js +++ /dev/null @@ -1,129 +0,0 @@ -'use strict' -const { inspect } = require('util') - -// adapted from node's internal/errors -// https://github.com/nodejs/node/blob/c8a04049/lib/internal/errors.js - -// close copy of node's internal SystemError class. -class SystemError { - constructor (code, prefix, context) { - // XXX context.code is undefined in all constructors used in cp/polyfill - // that may be a bug copied from node, maybe the constructor should use - // `code` not `errno`? nodejs/node#41104 - let message = `${prefix}: ${context.syscall} returned ` + - `${context.code} (${context.message})` - - if (context.path !== undefined) { - message += ` ${context.path}` - } - if (context.dest !== undefined) { - message += ` => ${context.dest}` - } - - this.code = code - Object.defineProperties(this, { - name: { - value: 'SystemError', - enumerable: false, - writable: true, - configurable: true, - }, - message: { - value: message, - enumerable: false, - writable: true, - configurable: true, - }, - info: { - value: context, - enumerable: true, - configurable: true, - writable: false, - }, - errno: { - get () { - return context.errno - }, - set (value) { - context.errno = value - }, - enumerable: true, - configurable: true, - }, - syscall: { - get () { - return context.syscall - }, - set (value) { - context.syscall = value - }, - enumerable: true, - configurable: true, - }, - }) - - if (context.path !== undefined) { - Object.defineProperty(this, 'path', { - get () { - return context.path - }, - set (value) { - context.path = value - }, - enumerable: true, - configurable: true, - }) - } - - if (context.dest !== undefined) { - Object.defineProperty(this, 'dest', { - get () { - return context.dest - }, - set (value) { - context.dest = value - }, - enumerable: true, - configurable: true, - }) - } - } - - toString () { - return `${this.name} [${this.code}]: ${this.message}` - } - - [Symbol.for('nodejs.util.inspect.custom')] (_recurseTimes, ctx) { - return inspect(this, { - ...ctx, - getters: true, - customInspect: false, - }) - } -} - -function E (code, message) { - module.exports[code] = class NodeError extends SystemError { - constructor (ctx) { - super(code, message, ctx) - } - } -} - -E('ERR_FS_CP_DIR_TO_NON_DIR', 'Cannot overwrite directory with non-directory') -E('ERR_FS_CP_EEXIST', 'Target already exists') -E('ERR_FS_CP_EINVAL', 'Invalid src or dest') -E('ERR_FS_CP_FIFO_PIPE', 'Cannot copy a FIFO pipe') -E('ERR_FS_CP_NON_DIR_TO_DIR', 'Cannot overwrite non-directory with directory') -E('ERR_FS_CP_SOCKET', 'Cannot copy a socket file') -E('ERR_FS_CP_SYMLINK_TO_SUBDIRECTORY', 'Cannot overwrite symlink in subdirectory of self') -E('ERR_FS_CP_UNKNOWN', 'Cannot copy an unknown file type') -E('ERR_FS_EISDIR', 'Path is a directory') - -module.exports.ERR_INVALID_ARG_TYPE = class ERR_INVALID_ARG_TYPE extends Error { - constructor (name, expected, actual) { - super() - this.code = 'ERR_INVALID_ARG_TYPE' - this.message = `The ${name} argument must be ${expected}. Received ${typeof actual}` - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/index.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/index.js deleted file mode 100644 index 972ce7aa12abef..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/index.js +++ /dev/null @@ -1,22 +0,0 @@ -const fs = require('fs/promises') -const getOptions = require('../common/get-options.js') -const node = require('../common/node.js') -const polyfill = require('./polyfill.js') - -// node 16.7.0 added fs.cp -const useNative = node.satisfies('>=16.7.0') - -const cp = async (src, dest, opts) => { - const options = getOptions(opts, { - copy: ['dereference', 'errorOnExist', 'filter', 'force', 'preserveTimestamps', 'recursive'], - }) - - // the polyfill is tested separately from this module, no need to hack - // process.version to try to trigger it just for coverage - // istanbul ignore next - return useNative - ? fs.cp(src, dest, options) - : polyfill(src, dest, options) -} - -module.exports = cp diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/polyfill.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/polyfill.js deleted file mode 100644 index 80eb10de971918..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/cp/polyfill.js +++ /dev/null @@ -1,428 +0,0 @@ -// this file is a modified version of the code in node 17.2.0 -// which is, in turn, a modified version of the fs-extra module on npm -// node core changes: -// - Use of the assert module has been replaced with core's error system. -// - All code related to the glob dependency has been removed. -// - Bring your own custom fs module is not currently supported. -// - Some basic code cleanup. -// changes here: -// - remove all callback related code -// - drop sync support -// - change assertions back to non-internal methods (see options.js) -// - throws ENOTDIR when rmdir gets an ENOENT for a path that exists in Windows -'use strict' - -const { - ERR_FS_CP_DIR_TO_NON_DIR, - ERR_FS_CP_EEXIST, - ERR_FS_CP_EINVAL, - ERR_FS_CP_FIFO_PIPE, - ERR_FS_CP_NON_DIR_TO_DIR, - ERR_FS_CP_SOCKET, - ERR_FS_CP_SYMLINK_TO_SUBDIRECTORY, - ERR_FS_CP_UNKNOWN, - ERR_FS_EISDIR, - ERR_INVALID_ARG_TYPE, -} = require('./errors.js') -const { - constants: { - errno: { - EEXIST, - EISDIR, - EINVAL, - ENOTDIR, - }, - }, -} = require('os') -const { - chmod, - copyFile, - lstat, - mkdir, - readdir, - readlink, - stat, - symlink, - unlink, - utimes, -} = require('fs/promises') -const { - dirname, - isAbsolute, - join, - parse, - resolve, - sep, - toNamespacedPath, -} = require('path') -const { fileURLToPath } = require('url') - -const defaultOptions = { - dereference: false, - errorOnExist: false, - filter: undefined, - force: true, - preserveTimestamps: false, - recursive: false, -} - -async function cp (src, dest, opts) { - if (opts != null && typeof opts !== 'object') { - throw new ERR_INVALID_ARG_TYPE('options', ['Object'], opts) - } - return cpFn( - toNamespacedPath(getValidatedPath(src)), - toNamespacedPath(getValidatedPath(dest)), - { ...defaultOptions, ...opts }) -} - -function getValidatedPath (fileURLOrPath) { - const path = fileURLOrPath != null && fileURLOrPath.href - && fileURLOrPath.origin - ? fileURLToPath(fileURLOrPath) - : fileURLOrPath - return path -} - -async function cpFn (src, dest, opts) { - // Warn about using preserveTimestamps on 32-bit node - // istanbul ignore next - if (opts.preserveTimestamps && process.arch === 'ia32') { - const warning = 'Using the preserveTimestamps option in 32-bit ' + - 'node is not recommended' - process.emitWarning(warning, 'TimestampPrecisionWarning') - } - const stats = await checkPaths(src, dest, opts) - const { srcStat, destStat } = stats - await checkParentPaths(src, srcStat, dest) - if (opts.filter) { - return handleFilter(checkParentDir, destStat, src, dest, opts) - } - return checkParentDir(destStat, src, dest, opts) -} - -async function checkPaths (src, dest, opts) { - const { 0: srcStat, 1: destStat } = await getStats(src, dest, opts) - if (destStat) { - if (areIdentical(srcStat, destStat)) { - throw new ERR_FS_CP_EINVAL({ - message: 'src and dest cannot be the same', - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - if (srcStat.isDirectory() && !destStat.isDirectory()) { - throw new ERR_FS_CP_DIR_TO_NON_DIR({ - message: `cannot overwrite directory ${src} ` + - `with non-directory ${dest}`, - path: dest, - syscall: 'cp', - errno: EISDIR, - }) - } - if (!srcStat.isDirectory() && destStat.isDirectory()) { - throw new ERR_FS_CP_NON_DIR_TO_DIR({ - message: `cannot overwrite non-directory ${src} ` + - `with directory ${dest}`, - path: dest, - syscall: 'cp', - errno: ENOTDIR, - }) - } - } - - if (srcStat.isDirectory() && isSrcSubdir(src, dest)) { - throw new ERR_FS_CP_EINVAL({ - message: `cannot copy ${src} to a subdirectory of self ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - return { srcStat, destStat } -} - -function areIdentical (srcStat, destStat) { - return destStat.ino && destStat.dev && destStat.ino === srcStat.ino && - destStat.dev === srcStat.dev -} - -function getStats (src, dest, opts) { - const statFunc = opts.dereference ? - (file) => stat(file, { bigint: true }) : - (file) => lstat(file, { bigint: true }) - return Promise.all([ - statFunc(src), - statFunc(dest).catch((err) => { - // istanbul ignore next: unsure how to cover. - if (err.code === 'ENOENT') { - return null - } - // istanbul ignore next: unsure how to cover. - throw err - }), - ]) -} - -async function checkParentDir (destStat, src, dest, opts) { - const destParent = dirname(dest) - const dirExists = await pathExists(destParent) - if (dirExists) { - return getStatsForCopy(destStat, src, dest, opts) - } - await mkdir(destParent, { recursive: true }) - return getStatsForCopy(destStat, src, dest, opts) -} - -function pathExists (dest) { - return stat(dest).then( - () => true, - // istanbul ignore next: not sure when this would occur - (err) => (err.code === 'ENOENT' ? false : Promise.reject(err))) -} - -// Recursively check if dest parent is a subdirectory of src. -// It works for all file types including symlinks since it -// checks the src and dest inodes. It starts from the deepest -// parent and stops once it reaches the src parent or the root path. -async function checkParentPaths (src, srcStat, dest) { - const srcParent = resolve(dirname(src)) - const destParent = resolve(dirname(dest)) - if (destParent === srcParent || destParent === parse(destParent).root) { - return - } - let destStat - try { - destStat = await stat(destParent, { bigint: true }) - } catch (err) { - // istanbul ignore else: not sure when this would occur - if (err.code === 'ENOENT') { - return - } - // istanbul ignore next: not sure when this would occur - throw err - } - if (areIdentical(srcStat, destStat)) { - throw new ERR_FS_CP_EINVAL({ - message: `cannot copy ${src} to a subdirectory of self ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - return checkParentPaths(src, srcStat, destParent) -} - -const normalizePathToArray = (path) => - resolve(path).split(sep).filter(Boolean) - -// Return true if dest is a subdir of src, otherwise false. -// It only checks the path strings. -function isSrcSubdir (src, dest) { - const srcArr = normalizePathToArray(src) - const destArr = normalizePathToArray(dest) - return srcArr.every((cur, i) => destArr[i] === cur) -} - -async function handleFilter (onInclude, destStat, src, dest, opts, cb) { - const include = await opts.filter(src, dest) - if (include) { - return onInclude(destStat, src, dest, opts, cb) - } -} - -function startCopy (destStat, src, dest, opts) { - if (opts.filter) { - return handleFilter(getStatsForCopy, destStat, src, dest, opts) - } - return getStatsForCopy(destStat, src, dest, opts) -} - -async function getStatsForCopy (destStat, src, dest, opts) { - const statFn = opts.dereference ? stat : lstat - const srcStat = await statFn(src) - // istanbul ignore else: can't portably test FIFO - if (srcStat.isDirectory() && opts.recursive) { - return onDir(srcStat, destStat, src, dest, opts) - } else if (srcStat.isDirectory()) { - throw new ERR_FS_EISDIR({ - message: `${src} is a directory (not copied)`, - path: src, - syscall: 'cp', - errno: EINVAL, - }) - } else if (srcStat.isFile() || - srcStat.isCharacterDevice() || - srcStat.isBlockDevice()) { - return onFile(srcStat, destStat, src, dest, opts) - } else if (srcStat.isSymbolicLink()) { - return onLink(destStat, src, dest) - } else if (srcStat.isSocket()) { - throw new ERR_FS_CP_SOCKET({ - message: `cannot copy a socket file: ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } else if (srcStat.isFIFO()) { - throw new ERR_FS_CP_FIFO_PIPE({ - message: `cannot copy a FIFO pipe: ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - // istanbul ignore next: should be unreachable - throw new ERR_FS_CP_UNKNOWN({ - message: `cannot copy an unknown file type: ${dest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) -} - -function onFile (srcStat, destStat, src, dest, opts) { - if (!destStat) { - return _copyFile(srcStat, src, dest, opts) - } - return mayCopyFile(srcStat, src, dest, opts) -} - -async function mayCopyFile (srcStat, src, dest, opts) { - if (opts.force) { - await unlink(dest) - return _copyFile(srcStat, src, dest, opts) - } else if (opts.errorOnExist) { - throw new ERR_FS_CP_EEXIST({ - message: `${dest} already exists`, - path: dest, - syscall: 'cp', - errno: EEXIST, - }) - } -} - -async function _copyFile (srcStat, src, dest, opts) { - await copyFile(src, dest) - if (opts.preserveTimestamps) { - return handleTimestampsAndMode(srcStat.mode, src, dest) - } - return setDestMode(dest, srcStat.mode) -} - -async function handleTimestampsAndMode (srcMode, src, dest) { - // Make sure the file is writable before setting the timestamp - // otherwise open fails with EPERM when invoked with 'r+' - // (through utimes call) - if (fileIsNotWritable(srcMode)) { - await makeFileWritable(dest, srcMode) - return setDestTimestampsAndMode(srcMode, src, dest) - } - return setDestTimestampsAndMode(srcMode, src, dest) -} - -function fileIsNotWritable (srcMode) { - return (srcMode & 0o200) === 0 -} - -function makeFileWritable (dest, srcMode) { - return setDestMode(dest, srcMode | 0o200) -} - -async function setDestTimestampsAndMode (srcMode, src, dest) { - await setDestTimestamps(src, dest) - return setDestMode(dest, srcMode) -} - -function setDestMode (dest, srcMode) { - return chmod(dest, srcMode) -} - -async function setDestTimestamps (src, dest) { - // The initial srcStat.atime cannot be trusted - // because it is modified by the read(2) system call - // (See https://nodejs.org/api/fs.html#fs_stat_time_values) - const updatedSrcStat = await stat(src) - return utimes(dest, updatedSrcStat.atime, updatedSrcStat.mtime) -} - -function onDir (srcStat, destStat, src, dest, opts) { - if (!destStat) { - return mkDirAndCopy(srcStat.mode, src, dest, opts) - } - return copyDir(src, dest, opts) -} - -async function mkDirAndCopy (srcMode, src, dest, opts) { - await mkdir(dest) - await copyDir(src, dest, opts) - return setDestMode(dest, srcMode) -} - -async function copyDir (src, dest, opts) { - const dir = await readdir(src) - for (let i = 0; i < dir.length; i++) { - const item = dir[i] - const srcItem = join(src, item) - const destItem = join(dest, item) - const { destStat } = await checkPaths(srcItem, destItem, opts) - await startCopy(destStat, srcItem, destItem, opts) - } -} - -async function onLink (destStat, src, dest) { - let resolvedSrc = await readlink(src) - if (!isAbsolute(resolvedSrc)) { - resolvedSrc = resolve(dirname(src), resolvedSrc) - } - if (!destStat) { - return symlink(resolvedSrc, dest) - } - let resolvedDest - try { - resolvedDest = await readlink(dest) - } catch (err) { - // Dest exists and is a regular file or directory, - // Windows may throw UNKNOWN error. If dest already exists, - // fs throws error anyway, so no need to guard against it here. - // istanbul ignore next: can only test on windows - if (err.code === 'EINVAL' || err.code === 'UNKNOWN') { - return symlink(resolvedSrc, dest) - } - // istanbul ignore next: should not be possible - throw err - } - if (!isAbsolute(resolvedDest)) { - resolvedDest = resolve(dirname(dest), resolvedDest) - } - if (isSrcSubdir(resolvedSrc, resolvedDest)) { - throw new ERR_FS_CP_EINVAL({ - message: `cannot copy ${resolvedSrc} to a subdirectory of self ` + - `${resolvedDest}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - // Do not copy if src is a subdir of dest since unlinking - // dest in this case would result in removing src contents - // and therefore a broken symlink would be created. - const srcStat = await stat(src) - if (srcStat.isDirectory() && isSrcSubdir(resolvedDest, resolvedSrc)) { - throw new ERR_FS_CP_SYMLINK_TO_SUBDIRECTORY({ - message: `cannot overwrite ${resolvedDest} with ${resolvedSrc}`, - path: dest, - syscall: 'cp', - errno: EINVAL, - }) - } - return copyLink(resolvedSrc, dest) -} - -async function copyLink (resolvedSrc, dest) { - await unlink(dest) - return symlink(resolvedSrc, dest) -} - -module.exports = cp diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/index.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/index.js deleted file mode 100644 index 81c746304cc428..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/index.js +++ /dev/null @@ -1,13 +0,0 @@ -'use strict' - -const cp = require('./cp/index.js') -const withTempDir = require('./with-temp-dir.js') -const readdirScoped = require('./readdir-scoped.js') -const moveFile = require('./move-file.js') - -module.exports = { - cp, - withTempDir, - readdirScoped, - moveFile, -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/move-file.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/move-file.js deleted file mode 100644 index d56e06d384659a..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/move-file.js +++ /dev/null @@ -1,78 +0,0 @@ -const { dirname, join, resolve, relative, isAbsolute } = require('path') -const fs = require('fs/promises') - -const pathExists = async path => { - try { - await fs.access(path) - return true - } catch (er) { - return er.code !== 'ENOENT' - } -} - -const moveFile = async (source, destination, options = {}, root = true, symlinks = []) => { - if (!source || !destination) { - throw new TypeError('`source` and `destination` file required') - } - - options = { - overwrite: true, - ...options, - } - - if (!options.overwrite && await pathExists(destination)) { - throw new Error(`The destination file exists: ${destination}`) - } - - await fs.mkdir(dirname(destination), { recursive: true }) - - try { - await fs.rename(source, destination) - } catch (error) { - if (error.code === 'EXDEV' || error.code === 'EPERM') { - const sourceStat = await fs.lstat(source) - if (sourceStat.isDirectory()) { - const files = await fs.readdir(source) - await Promise.all(files.map((file) => - moveFile(join(source, file), join(destination, file), options, false, symlinks) - )) - } else if (sourceStat.isSymbolicLink()) { - symlinks.push({ source, destination }) - } else { - await fs.copyFile(source, destination) - } - } else { - throw error - } - } - - if (root) { - await Promise.all(symlinks.map(async ({ source: symSource, destination: symDestination }) => { - let target = await fs.readlink(symSource) - // junction symlinks in windows will be absolute paths, so we need to - // make sure they point to the symlink destination - if (isAbsolute(target)) { - target = resolve(symDestination, relative(symSource, target)) - } - // try to determine what the actual file is so we can create the correct - // type of symlink in windows - let targetStat = 'file' - try { - targetStat = await fs.stat(resolve(dirname(symSource), target)) - if (targetStat.isDirectory()) { - targetStat = 'junction' - } - } catch { - // targetStat remains 'file' - } - await fs.symlink( - target, - symDestination, - targetStat - ) - })) - await fs.rm(source, { recursive: true, force: true }) - } -} - -module.exports = moveFile diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/readdir-scoped.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/readdir-scoped.js deleted file mode 100644 index cd601dfbe7486b..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/readdir-scoped.js +++ /dev/null @@ -1,20 +0,0 @@ -const { readdir } = require('fs/promises') -const { join } = require('path') - -const readdirScoped = async (dir) => { - const results = [] - - for (const item of await readdir(dir)) { - if (item.startsWith('@')) { - for (const scopedItem of await readdir(join(dir, item))) { - results.push(join(item, scopedItem)) - } - } else { - results.push(item) - } - } - - return results -} - -module.exports = readdirScoped diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/with-temp-dir.js b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/with-temp-dir.js deleted file mode 100644 index 0738ac4f29e1be..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/lib/with-temp-dir.js +++ /dev/null @@ -1,39 +0,0 @@ -const { join, sep } = require('path') - -const getOptions = require('./common/get-options.js') -const { mkdir, mkdtemp, rm } = require('fs/promises') - -// create a temp directory, ensure its permissions match its parent, then call -// the supplied function passing it the path to the directory. clean up after -// the function finishes, whether it throws or not -const withTempDir = async (root, fn, opts) => { - const options = getOptions(opts, { - copy: ['tmpPrefix'], - }) - // create the directory - await mkdir(root, { recursive: true }) - - const target = await mkdtemp(join(`${root}${sep}`, options.tmpPrefix || '')) - let err - let result - - try { - result = await fn(target) - } catch (_err) { - err = _err - } - - try { - await rm(target, { force: true, recursive: true }) - } catch { - // ignore errors - } - - if (err) { - throw err - } - - return result -} - -module.exports = withTempDir diff --git a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/package.json b/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/package.json deleted file mode 100644 index 5261a11b78000e..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/@npmcli/fs/package.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "name": "@npmcli/fs", - "version": "3.1.1", - "description": "filesystem utilities for the npm cli", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "scripts": { - "snap": "tap", - "test": "tap", - "npmclilint": "npmcli-lint", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "lintfix": "npm run lint -- --fix", - "posttest": "npm run lint", - "postsnap": "npm run lintfix --", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/fs.git" - }, - "keywords": [ - "npm", - "oss" - ], - "author": "GitHub Inc.", - "license": "ISC", - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "tap": "^16.0.1" - }, - "dependencies": { - "semver": "^7.3.5" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.22.0" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/@tufjs/models/LICENSE b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/LICENSE similarity index 100% rename from deps/npm/node_modules/@tufjs/models/LICENSE rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/LICENSE diff --git a/deps/npm/node_modules/@tufjs/models/dist/base.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/base.js similarity index 80% rename from deps/npm/node_modules/@tufjs/models/dist/base.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/base.js index 259f6799c13a0d..85e45d8fc1151e 100644 --- a/deps/npm/node_modules/@tufjs/models/dist/base.js +++ b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/base.js @@ -3,7 +3,8 @@ var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); -exports.Signed = exports.isMetadataKind = exports.MetadataKind = void 0; +exports.Signed = exports.MetadataKind = void 0; +exports.isMetadataKind = isMetadataKind; const util_1 = __importDefault(require("util")); const error_1 = require("./error"); const utils_1 = require("./utils"); @@ -19,7 +20,6 @@ function isMetadataKind(value) { return (typeof value === 'string' && Object.values(MetadataKind).includes(value)); } -exports.isMetadataKind = isMetadataKind; /*** * A base class for the signed part of TUF metadata. * @@ -39,8 +39,8 @@ class Signed { if (specList[0] != SPECIFICATION_VERSION[0]) { throw new error_1.ValueError('Unsupported specVersion'); } - this.expires = options.expires || new Date().toISOString(); - this.version = options.version || 1; + this.expires = options.expires; + this.version = options.version; this.unrecognizedFields = options.unrecognizedFields || {}; } equals(other) { @@ -60,13 +60,22 @@ class Signed { } static commonFieldsFromJSON(data) { const { spec_version, expires, version, ...rest } = data; - if (utils_1.guard.isDefined(spec_version) && !(typeof spec_version === 'string')) { + if (!utils_1.guard.isDefined(spec_version)) { + throw new error_1.ValueError('spec_version is not defined'); + } + else if (typeof spec_version !== 'string') { throw new TypeError('spec_version must be a string'); } - if (utils_1.guard.isDefined(expires) && !(typeof expires === 'string')) { + if (!utils_1.guard.isDefined(expires)) { + throw new error_1.ValueError('expires is not defined'); + } + else if (!(typeof expires === 'string')) { throw new TypeError('expires must be a string'); } - if (utils_1.guard.isDefined(version) && !(typeof version === 'number')) { + if (!utils_1.guard.isDefined(version)) { + throw new error_1.ValueError('version is not defined'); + } + else if (!(typeof version === 'number')) { throw new TypeError('version must be a number'); } return { diff --git a/deps/npm/node_modules/@tufjs/models/dist/delegations.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/delegations.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/delegations.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/delegations.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/error.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/error.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/error.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/error.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/file.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/file.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/file.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/file.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/index.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/index.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/index.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/index.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/key.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/key.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/key.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/key.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/metadata.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/metadata.js similarity index 91% rename from deps/npm/node_modules/@tufjs/models/dist/metadata.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/metadata.js index 9668b6f14fa701..389d2504e0b53d 100644 --- a/deps/npm/node_modules/@tufjs/models/dist/metadata.js +++ b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/metadata.js @@ -125,6 +125,9 @@ class Metadata { if (type !== signed._type) { throw new error_1.ValueError(`expected '${type}', got ${signed['_type']}`); } + if (!utils_1.guard.isObjectArray(signatures)) { + throw new TypeError('signatures is not an array'); + } let signedObj; switch (type) { case base_1.MetadataKind.Root: @@ -142,17 +145,16 @@ class Metadata { default: throw new TypeError('invalid metadata type'); } - const sigMap = signaturesFromJSON(signatures); + const sigMap = {}; + // Ensure that each signature is unique + signatures.forEach((sigData) => { + const sig = signature_1.Signature.fromJSON(sigData); + if (sigMap[sig.keyID]) { + throw new error_1.ValueError(`multiple signatures found for keyid: ${sig.keyID}`); + } + sigMap[sig.keyID] = sig; + }); return new Metadata(signedObj, sigMap, rest); } } exports.Metadata = Metadata; -function signaturesFromJSON(data) { - if (!utils_1.guard.isObjectArray(data)) { - throw new TypeError('signatures is not an array'); - } - return data.reduce((acc, sigData) => { - const signature = signature_1.Signature.fromJSON(sigData); - return { ...acc, [signature.keyID]: signature }; - }, {}); -} diff --git a/deps/npm/node_modules/@tufjs/models/dist/role.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/role.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/role.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/role.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/root.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/root.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/root.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/root.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/signature.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/signature.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/signature.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/signature.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/snapshot.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/snapshot.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/snapshot.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/snapshot.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/targets.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/targets.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/targets.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/targets.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/timestamp.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/timestamp.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/timestamp.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/timestamp.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/utils/guard.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/guard.js similarity index 88% rename from deps/npm/node_modules/@tufjs/models/dist/utils/guard.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/guard.js index efe558852303ce..911e8475986bbc 100644 --- a/deps/npm/node_modules/@tufjs/models/dist/utils/guard.js +++ b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/guard.js @@ -1,33 +1,32 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.isObjectRecord = exports.isStringRecord = exports.isObjectArray = exports.isStringArray = exports.isObject = exports.isDefined = void 0; +exports.isDefined = isDefined; +exports.isObject = isObject; +exports.isStringArray = isStringArray; +exports.isObjectArray = isObjectArray; +exports.isStringRecord = isStringRecord; +exports.isObjectRecord = isObjectRecord; function isDefined(val) { return val !== undefined; } -exports.isDefined = isDefined; function isObject(value) { return typeof value === 'object' && value !== null; } -exports.isObject = isObject; function isStringArray(value) { return Array.isArray(value) && value.every((v) => typeof v === 'string'); } -exports.isStringArray = isStringArray; function isObjectArray(value) { return Array.isArray(value) && value.every(isObject); } -exports.isObjectArray = isObjectArray; function isStringRecord(value) { return (typeof value === 'object' && value !== null && Object.keys(value).every((k) => typeof k === 'string') && Object.values(value).every((v) => typeof v === 'string')); } -exports.isStringRecord = isStringRecord; function isObjectRecord(value) { return (typeof value === 'object' && value !== null && Object.keys(value).every((k) => typeof k === 'string') && Object.values(value).every((v) => typeof v === 'object' && v !== null)); } -exports.isObjectRecord = isObjectRecord; diff --git a/deps/npm/node_modules/@tufjs/models/dist/utils/index.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/index.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/utils/index.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/index.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/utils/key.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/key.js similarity index 99% rename from deps/npm/node_modules/@tufjs/models/dist/utils/key.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/key.js index 1f795ba1a2733f..3c3ec07f1425a7 100644 --- a/deps/npm/node_modules/@tufjs/models/dist/utils/key.js +++ b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/key.js @@ -3,7 +3,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); -exports.getPublicKey = void 0; +exports.getPublicKey = getPublicKey; const crypto_1 = __importDefault(require("crypto")); const error_1 = require("../error"); const oid_1 = require("./oid"); @@ -28,7 +28,6 @@ function getPublicKey(keyInfo) { throw new error_1.UnsupportedAlgorithmError(`Unsupported key type: ${keyInfo.keyType}`); } } -exports.getPublicKey = getPublicKey; function getRSAPublicKey(keyInfo) { // Only support PEM-encoded RSA keys if (!keyInfo.keyVal.startsWith(PEM_HEADER)) { diff --git a/deps/npm/node_modules/@tufjs/models/dist/utils/oid.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/oid.js similarity index 96% rename from deps/npm/node_modules/@tufjs/models/dist/utils/oid.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/oid.js index e1bb7af5e54fbf..00b29c3030d1ec 100644 --- a/deps/npm/node_modules/@tufjs/models/dist/utils/oid.js +++ b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/oid.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.encodeOIDString = void 0; +exports.encodeOIDString = encodeOIDString; const ANS1_TAG_OID = 0x06; function encodeOIDString(oid) { const parts = oid.split('.'); @@ -14,7 +14,6 @@ function encodeOIDString(oid) { const der = Buffer.from([first, ...rest]); return Buffer.from([ANS1_TAG_OID, der.length, ...der]); } -exports.encodeOIDString = encodeOIDString; function encodeVariableLengthInteger(value) { const bytes = []; let mask = 0x00; diff --git a/deps/npm/node_modules/@tufjs/models/dist/utils/types.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/types.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/utils/types.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/types.js diff --git a/deps/npm/node_modules/@tufjs/models/dist/utils/verify.js b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/verify.js similarity index 100% rename from deps/npm/node_modules/@tufjs/models/dist/utils/verify.js rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/dist/utils/verify.js diff --git a/deps/npm/node_modules/@tufjs/models/package.json b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/package.json similarity index 90% rename from deps/npm/node_modules/@tufjs/models/package.json rename to deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/package.json index be581591a0f3a3..8e5132ddf1079c 100644 --- a/deps/npm/node_modules/@tufjs/models/package.json +++ b/deps/npm/node_modules/tuf-js/node_modules/@tufjs/models/package.json @@ -1,6 +1,6 @@ { "name": "@tufjs/models", - "version": "2.0.1", + "version": "3.0.1", "description": "TUF metadata models", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -29,9 +29,9 @@ "homepage": "https://github.com/theupdateframework/tuf-js/tree/main/packages/models#readme", "dependencies": { "@tufjs/canonical-json": "2.0.0", - "minimatch": "^9.0.4" + "minimatch": "^9.0.5" }, "engines": { - "node": "^16.14.0 || >=18.0.0" + "node": "^18.17.0 || >=20.5.0" } } diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/LICENSE.md b/deps/npm/node_modules/tuf-js/node_modules/cacache/LICENSE.md deleted file mode 100644 index 8d28acf866d932..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/LICENSE.md +++ /dev/null @@ -1,16 +0,0 @@ -ISC License - -Copyright (c) npm, Inc. - -Permission to use, copy, modify, and/or distribute this software for -any purpose with or without fee is hereby granted, provided that the -above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE COPYRIGHT HOLDER DISCLAIMS -ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR -CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/path.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/path.js deleted file mode 100644 index ad5a76a4f73f26..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/path.js +++ /dev/null @@ -1,29 +0,0 @@ -'use strict' - -const contentVer = require('../../package.json')['cache-version'].content -const hashToSegments = require('../util/hash-to-segments') -const path = require('path') -const ssri = require('ssri') - -// Current format of content file path: -// -// sha512-BaSE64Hex= -> -// ~/.my-cache/content-v2/sha512/ba/da/55deadbeefc0ffee -// -module.exports = contentPath - -function contentPath (cache, integrity) { - const sri = ssri.parse(integrity, { single: true }) - // contentPath is the *strongest* algo given - return path.join( - contentDir(cache), - sri.algorithm, - ...hashToSegments(sri.hexDigest()) - ) -} - -module.exports.contentDir = contentDir - -function contentDir (cache) { - return path.join(cache, `content-v${contentVer}`) -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/read.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/read.js deleted file mode 100644 index 5f6192c3cec566..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/read.js +++ /dev/null @@ -1,165 +0,0 @@ -'use strict' - -const fs = require('fs/promises') -const fsm = require('fs-minipass') -const ssri = require('ssri') -const contentPath = require('./path') -const Pipeline = require('minipass-pipeline') - -module.exports = read - -const MAX_SINGLE_READ_SIZE = 64 * 1024 * 1024 -async function read (cache, integrity, opts = {}) { - const { size } = opts - const { stat, cpath, sri } = await withContentSri(cache, integrity, async (cpath, sri) => { - // get size - const stat = size ? { size } : await fs.stat(cpath) - return { stat, cpath, sri } - }) - - if (stat.size > MAX_SINGLE_READ_SIZE) { - return readPipeline(cpath, stat.size, sri, new Pipeline()).concat() - } - - const data = await fs.readFile(cpath, { encoding: null }) - - if (stat.size !== data.length) { - throw sizeError(stat.size, data.length) - } - - if (!ssri.checkData(data, sri)) { - throw integrityError(sri, cpath) - } - - return data -} - -const readPipeline = (cpath, size, sri, stream) => { - stream.push( - new fsm.ReadStream(cpath, { - size, - readSize: MAX_SINGLE_READ_SIZE, - }), - ssri.integrityStream({ - integrity: sri, - size, - }) - ) - return stream -} - -module.exports.stream = readStream -module.exports.readStream = readStream - -function readStream (cache, integrity, opts = {}) { - const { size } = opts - const stream = new Pipeline() - // Set all this up to run on the stream and then just return the stream - Promise.resolve().then(async () => { - const { stat, cpath, sri } = await withContentSri(cache, integrity, async (cpath, sri) => { - // get size - const stat = size ? { size } : await fs.stat(cpath) - return { stat, cpath, sri } - }) - - return readPipeline(cpath, stat.size, sri, stream) - }).catch(err => stream.emit('error', err)) - - return stream -} - -module.exports.copy = copy - -function copy (cache, integrity, dest) { - return withContentSri(cache, integrity, (cpath) => { - return fs.copyFile(cpath, dest) - }) -} - -module.exports.hasContent = hasContent - -async function hasContent (cache, integrity) { - if (!integrity) { - return false - } - - try { - return await withContentSri(cache, integrity, async (cpath, sri) => { - const stat = await fs.stat(cpath) - return { size: stat.size, sri, stat } - }) - } catch (err) { - if (err.code === 'ENOENT') { - return false - } - - if (err.code === 'EPERM') { - /* istanbul ignore else */ - if (process.platform !== 'win32') { - throw err - } else { - return false - } - } - } -} - -async function withContentSri (cache, integrity, fn) { - const sri = ssri.parse(integrity) - // If `integrity` has multiple entries, pick the first digest - // with available local data. - const algo = sri.pickAlgorithm() - const digests = sri[algo] - - if (digests.length <= 1) { - const cpath = contentPath(cache, digests[0]) - return fn(cpath, digests[0]) - } else { - // Can't use race here because a generic error can happen before - // a ENOENT error, and can happen before a valid result - const results = await Promise.all(digests.map(async (meta) => { - try { - return await withContentSri(cache, meta, fn) - } catch (err) { - if (err.code === 'ENOENT') { - return Object.assign( - new Error('No matching content found for ' + sri.toString()), - { code: 'ENOENT' } - ) - } - return err - } - })) - // Return the first non error if it is found - const result = results.find((r) => !(r instanceof Error)) - if (result) { - return result - } - - // Throw the No matching content found error - const enoentError = results.find((r) => r.code === 'ENOENT') - if (enoentError) { - throw enoentError - } - - // Throw generic error - throw results.find((r) => r instanceof Error) - } -} - -function sizeError (expected, found) { - /* eslint-disable-next-line max-len */ - const err = new Error(`Bad data size: expected inserted data to be ${expected} bytes, but got ${found} instead`) - err.expected = expected - err.found = found - err.code = 'EBADSIZE' - return err -} - -function integrityError (sri, path) { - const err = new Error(`Integrity verification failed for ${sri} (${path})`) - err.code = 'EINTEGRITY' - err.sri = sri - err.path = path - return err -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/rm.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/rm.js deleted file mode 100644 index ce58d679e4cb25..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/rm.js +++ /dev/null @@ -1,18 +0,0 @@ -'use strict' - -const fs = require('fs/promises') -const contentPath = require('./path') -const { hasContent } = require('./read') - -module.exports = rm - -async function rm (cache, integrity) { - const content = await hasContent(cache, integrity) - // ~pretty~ sure we can't end up with a content lacking sri, but be safe - if (content && content.sri) { - await fs.rm(contentPath(cache, content.sri), { recursive: true, force: true }) - return true - } else { - return false - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/write.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/write.js deleted file mode 100644 index e7187abca8788a..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/content/write.js +++ /dev/null @@ -1,206 +0,0 @@ -'use strict' - -const events = require('events') - -const contentPath = require('./path') -const fs = require('fs/promises') -const { moveFile } = require('@npmcli/fs') -const { Minipass } = require('minipass') -const Pipeline = require('minipass-pipeline') -const Flush = require('minipass-flush') -const path = require('path') -const ssri = require('ssri') -const uniqueFilename = require('unique-filename') -const fsm = require('fs-minipass') - -module.exports = write - -// Cache of move operations in process so we don't duplicate -const moveOperations = new Map() - -async function write (cache, data, opts = {}) { - const { algorithms, size, integrity } = opts - - if (typeof size === 'number' && data.length !== size) { - throw sizeError(size, data.length) - } - - const sri = ssri.fromData(data, algorithms ? { algorithms } : {}) - if (integrity && !ssri.checkData(data, integrity, opts)) { - throw checksumError(integrity, sri) - } - - for (const algo in sri) { - const tmp = await makeTmp(cache, opts) - const hash = sri[algo].toString() - try { - await fs.writeFile(tmp.target, data, { flag: 'wx' }) - await moveToDestination(tmp, cache, hash, opts) - } finally { - if (!tmp.moved) { - await fs.rm(tmp.target, { recursive: true, force: true }) - } - } - } - return { integrity: sri, size: data.length } -} - -module.exports.stream = writeStream - -// writes proxied to the 'inputStream' that is passed to the Promise -// 'end' is deferred until content is handled. -class CacacheWriteStream extends Flush { - constructor (cache, opts) { - super() - this.opts = opts - this.cache = cache - this.inputStream = new Minipass() - this.inputStream.on('error', er => this.emit('error', er)) - this.inputStream.on('drain', () => this.emit('drain')) - this.handleContentP = null - } - - write (chunk, encoding, cb) { - if (!this.handleContentP) { - this.handleContentP = handleContent( - this.inputStream, - this.cache, - this.opts - ) - this.handleContentP.catch(error => this.emit('error', error)) - } - return this.inputStream.write(chunk, encoding, cb) - } - - flush (cb) { - this.inputStream.end(() => { - if (!this.handleContentP) { - const e = new Error('Cache input stream was empty') - e.code = 'ENODATA' - // empty streams are probably emitting end right away. - // defer this one tick by rejecting a promise on it. - return Promise.reject(e).catch(cb) - } - // eslint-disable-next-line promise/catch-or-return - this.handleContentP.then( - (res) => { - res.integrity && this.emit('integrity', res.integrity) - // eslint-disable-next-line promise/always-return - res.size !== null && this.emit('size', res.size) - cb() - }, - (er) => cb(er) - ) - }) - } -} - -function writeStream (cache, opts = {}) { - return new CacacheWriteStream(cache, opts) -} - -async function handleContent (inputStream, cache, opts) { - const tmp = await makeTmp(cache, opts) - try { - const res = await pipeToTmp(inputStream, cache, tmp.target, opts) - await moveToDestination( - tmp, - cache, - res.integrity, - opts - ) - return res - } finally { - if (!tmp.moved) { - await fs.rm(tmp.target, { recursive: true, force: true }) - } - } -} - -async function pipeToTmp (inputStream, cache, tmpTarget, opts) { - const outStream = new fsm.WriteStream(tmpTarget, { - flags: 'wx', - }) - - if (opts.integrityEmitter) { - // we need to create these all simultaneously since they can fire in any order - const [integrity, size] = await Promise.all([ - events.once(opts.integrityEmitter, 'integrity').then(res => res[0]), - events.once(opts.integrityEmitter, 'size').then(res => res[0]), - new Pipeline(inputStream, outStream).promise(), - ]) - return { integrity, size } - } - - let integrity - let size - const hashStream = ssri.integrityStream({ - integrity: opts.integrity, - algorithms: opts.algorithms, - size: opts.size, - }) - hashStream.on('integrity', i => { - integrity = i - }) - hashStream.on('size', s => { - size = s - }) - - const pipeline = new Pipeline(inputStream, hashStream, outStream) - await pipeline.promise() - return { integrity, size } -} - -async function makeTmp (cache, opts) { - const tmpTarget = uniqueFilename(path.join(cache, 'tmp'), opts.tmpPrefix) - await fs.mkdir(path.dirname(tmpTarget), { recursive: true }) - return { - target: tmpTarget, - moved: false, - } -} - -async function moveToDestination (tmp, cache, sri) { - const destination = contentPath(cache, sri) - const destDir = path.dirname(destination) - if (moveOperations.has(destination)) { - return moveOperations.get(destination) - } - moveOperations.set( - destination, - fs.mkdir(destDir, { recursive: true }) - .then(async () => { - await moveFile(tmp.target, destination, { overwrite: false }) - tmp.moved = true - return tmp.moved - }) - .catch(err => { - if (!err.message.startsWith('The destination file exists')) { - throw Object.assign(err, { code: 'EEXIST' }) - } - }).finally(() => { - moveOperations.delete(destination) - }) - - ) - return moveOperations.get(destination) -} - -function sizeError (expected, found) { - /* eslint-disable-next-line max-len */ - const err = new Error(`Bad data size: expected inserted data to be ${expected} bytes, but got ${found} instead`) - err.expected = expected - err.found = found - err.code = 'EBADSIZE' - return err -} - -function checksumError (expected, found) { - const err = new Error(`Integrity check failed: - Wanted: ${expected} - Found: ${found}`) - err.code = 'EINTEGRITY' - err.expected = expected - err.found = found - return err -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/entry-index.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/entry-index.js deleted file mode 100644 index 89c28f2f257d48..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/entry-index.js +++ /dev/null @@ -1,336 +0,0 @@ -'use strict' - -const crypto = require('crypto') -const { - appendFile, - mkdir, - readFile, - readdir, - rm, - writeFile, -} = require('fs/promises') -const { Minipass } = require('minipass') -const path = require('path') -const ssri = require('ssri') -const uniqueFilename = require('unique-filename') - -const contentPath = require('./content/path') -const hashToSegments = require('./util/hash-to-segments') -const indexV = require('../package.json')['cache-version'].index -const { moveFile } = require('@npmcli/fs') - -const pMap = require('p-map') -const lsStreamConcurrency = 5 - -module.exports.NotFoundError = class NotFoundError extends Error { - constructor (cache, key) { - super(`No cache entry for ${key} found in ${cache}`) - this.code = 'ENOENT' - this.cache = cache - this.key = key - } -} - -module.exports.compact = compact - -async function compact (cache, key, matchFn, opts = {}) { - const bucket = bucketPath(cache, key) - const entries = await bucketEntries(bucket) - const newEntries = [] - // we loop backwards because the bottom-most result is the newest - // since we add new entries with appendFile - for (let i = entries.length - 1; i >= 0; --i) { - const entry = entries[i] - // a null integrity could mean either a delete was appended - // or the user has simply stored an index that does not map - // to any content. we determine if the user wants to keep the - // null integrity based on the validateEntry function passed in options. - // if the integrity is null and no validateEntry is provided, we break - // as we consider the null integrity to be a deletion of everything - // that came before it. - if (entry.integrity === null && !opts.validateEntry) { - break - } - - // if this entry is valid, and it is either the first entry or - // the newEntries array doesn't already include an entry that - // matches this one based on the provided matchFn, then we add - // it to the beginning of our list - if ((!opts.validateEntry || opts.validateEntry(entry) === true) && - (newEntries.length === 0 || - !newEntries.find((oldEntry) => matchFn(oldEntry, entry)))) { - newEntries.unshift(entry) - } - } - - const newIndex = '\n' + newEntries.map((entry) => { - const stringified = JSON.stringify(entry) - const hash = hashEntry(stringified) - return `${hash}\t${stringified}` - }).join('\n') - - const setup = async () => { - const target = uniqueFilename(path.join(cache, 'tmp'), opts.tmpPrefix) - await mkdir(path.dirname(target), { recursive: true }) - return { - target, - moved: false, - } - } - - const teardown = async (tmp) => { - if (!tmp.moved) { - return rm(tmp.target, { recursive: true, force: true }) - } - } - - const write = async (tmp) => { - await writeFile(tmp.target, newIndex, { flag: 'wx' }) - await mkdir(path.dirname(bucket), { recursive: true }) - // we use @npmcli/move-file directly here because we - // want to overwrite the existing file - await moveFile(tmp.target, bucket) - tmp.moved = true - } - - // write the file atomically - const tmp = await setup() - try { - await write(tmp) - } finally { - await teardown(tmp) - } - - // we reverse the list we generated such that the newest - // entries come first in order to make looping through them easier - // the true passed to formatEntry tells it to keep null - // integrity values, if they made it this far it's because - // validateEntry returned true, and as such we should return it - return newEntries.reverse().map((entry) => formatEntry(cache, entry, true)) -} - -module.exports.insert = insert - -async function insert (cache, key, integrity, opts = {}) { - const { metadata, size, time } = opts - const bucket = bucketPath(cache, key) - const entry = { - key, - integrity: integrity && ssri.stringify(integrity), - time: time || Date.now(), - size, - metadata, - } - try { - await mkdir(path.dirname(bucket), { recursive: true }) - const stringified = JSON.stringify(entry) - // NOTE - Cleverness ahoy! - // - // This works because it's tremendously unlikely for an entry to corrupt - // another while still preserving the string length of the JSON in - // question. So, we just slap the length in there and verify it on read. - // - // Thanks to @isaacs for the whiteboarding session that ended up with - // this. - await appendFile(bucket, `\n${hashEntry(stringified)}\t${stringified}`) - } catch (err) { - if (err.code === 'ENOENT') { - return undefined - } - - throw err - } - return formatEntry(cache, entry) -} - -module.exports.find = find - -async function find (cache, key) { - const bucket = bucketPath(cache, key) - try { - const entries = await bucketEntries(bucket) - return entries.reduce((latest, next) => { - if (next && next.key === key) { - return formatEntry(cache, next) - } else { - return latest - } - }, null) - } catch (err) { - if (err.code === 'ENOENT') { - return null - } else { - throw err - } - } -} - -module.exports.delete = del - -function del (cache, key, opts = {}) { - if (!opts.removeFully) { - return insert(cache, key, null, opts) - } - - const bucket = bucketPath(cache, key) - return rm(bucket, { recursive: true, force: true }) -} - -module.exports.lsStream = lsStream - -function lsStream (cache) { - const indexDir = bucketDir(cache) - const stream = new Minipass({ objectMode: true }) - - // Set all this up to run on the stream and then just return the stream - Promise.resolve().then(async () => { - const buckets = await readdirOrEmpty(indexDir) - await pMap(buckets, async (bucket) => { - const bucketPath = path.join(indexDir, bucket) - const subbuckets = await readdirOrEmpty(bucketPath) - await pMap(subbuckets, async (subbucket) => { - const subbucketPath = path.join(bucketPath, subbucket) - - // "/cachename//./*" - const subbucketEntries = await readdirOrEmpty(subbucketPath) - await pMap(subbucketEntries, async (entry) => { - const entryPath = path.join(subbucketPath, entry) - try { - const entries = await bucketEntries(entryPath) - // using a Map here prevents duplicate keys from showing up - // twice, I guess? - const reduced = entries.reduce((acc, entry) => { - acc.set(entry.key, entry) - return acc - }, new Map()) - // reduced is a map of key => entry - for (const entry of reduced.values()) { - const formatted = formatEntry(cache, entry) - if (formatted) { - stream.write(formatted) - } - } - } catch (err) { - if (err.code === 'ENOENT') { - return undefined - } - throw err - } - }, - { concurrency: lsStreamConcurrency }) - }, - { concurrency: lsStreamConcurrency }) - }, - { concurrency: lsStreamConcurrency }) - stream.end() - return stream - }).catch(err => stream.emit('error', err)) - - return stream -} - -module.exports.ls = ls - -async function ls (cache) { - const entries = await lsStream(cache).collect() - return entries.reduce((acc, xs) => { - acc[xs.key] = xs - return acc - }, {}) -} - -module.exports.bucketEntries = bucketEntries - -async function bucketEntries (bucket, filter) { - const data = await readFile(bucket, 'utf8') - return _bucketEntries(data, filter) -} - -function _bucketEntries (data) { - const entries = [] - data.split('\n').forEach((entry) => { - if (!entry) { - return - } - - const pieces = entry.split('\t') - if (!pieces[1] || hashEntry(pieces[1]) !== pieces[0]) { - // Hash is no good! Corruption or malice? Doesn't matter! - // EJECT EJECT - return - } - let obj - try { - obj = JSON.parse(pieces[1]) - } catch (_) { - // eslint-ignore-next-line no-empty-block - } - // coverage disabled here, no need to test with an entry that parses to something falsey - // istanbul ignore else - if (obj) { - entries.push(obj) - } - }) - return entries -} - -module.exports.bucketDir = bucketDir - -function bucketDir (cache) { - return path.join(cache, `index-v${indexV}`) -} - -module.exports.bucketPath = bucketPath - -function bucketPath (cache, key) { - const hashed = hashKey(key) - return path.join.apply( - path, - [bucketDir(cache)].concat(hashToSegments(hashed)) - ) -} - -module.exports.hashKey = hashKey - -function hashKey (key) { - return hash(key, 'sha256') -} - -module.exports.hashEntry = hashEntry - -function hashEntry (str) { - return hash(str, 'sha1') -} - -function hash (str, digest) { - return crypto - .createHash(digest) - .update(str) - .digest('hex') -} - -function formatEntry (cache, entry, keepAll) { - // Treat null digests as deletions. They'll shadow any previous entries. - if (!entry.integrity && !keepAll) { - return null - } - - return { - key: entry.key, - integrity: entry.integrity, - path: entry.integrity ? contentPath(cache, entry.integrity) : undefined, - size: entry.size, - time: entry.time, - metadata: entry.metadata, - } -} - -function readdirOrEmpty (dir) { - return readdir(dir).catch((err) => { - if (err.code === 'ENOENT' || err.code === 'ENOTDIR') { - return [] - } - - throw err - }) -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/get.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/get.js deleted file mode 100644 index 80ec206c7ecaaa..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/get.js +++ /dev/null @@ -1,170 +0,0 @@ -'use strict' - -const Collect = require('minipass-collect') -const { Minipass } = require('minipass') -const Pipeline = require('minipass-pipeline') - -const index = require('./entry-index') -const memo = require('./memoization') -const read = require('./content/read') - -async function getData (cache, key, opts = {}) { - const { integrity, memoize, size } = opts - const memoized = memo.get(cache, key, opts) - if (memoized && memoize !== false) { - return { - metadata: memoized.entry.metadata, - data: memoized.data, - integrity: memoized.entry.integrity, - size: memoized.entry.size, - } - } - - const entry = await index.find(cache, key, opts) - if (!entry) { - throw new index.NotFoundError(cache, key) - } - const data = await read(cache, entry.integrity, { integrity, size }) - if (memoize) { - memo.put(cache, entry, data, opts) - } - - return { - data, - metadata: entry.metadata, - size: entry.size, - integrity: entry.integrity, - } -} -module.exports = getData - -async function getDataByDigest (cache, key, opts = {}) { - const { integrity, memoize, size } = opts - const memoized = memo.get.byDigest(cache, key, opts) - if (memoized && memoize !== false) { - return memoized - } - - const res = await read(cache, key, { integrity, size }) - if (memoize) { - memo.put.byDigest(cache, key, res, opts) - } - return res -} -module.exports.byDigest = getDataByDigest - -const getMemoizedStream = (memoized) => { - const stream = new Minipass() - stream.on('newListener', function (ev, cb) { - ev === 'metadata' && cb(memoized.entry.metadata) - ev === 'integrity' && cb(memoized.entry.integrity) - ev === 'size' && cb(memoized.entry.size) - }) - stream.end(memoized.data) - return stream -} - -function getStream (cache, key, opts = {}) { - const { memoize, size } = opts - const memoized = memo.get(cache, key, opts) - if (memoized && memoize !== false) { - return getMemoizedStream(memoized) - } - - const stream = new Pipeline() - // Set all this up to run on the stream and then just return the stream - Promise.resolve().then(async () => { - const entry = await index.find(cache, key) - if (!entry) { - throw new index.NotFoundError(cache, key) - } - - stream.emit('metadata', entry.metadata) - stream.emit('integrity', entry.integrity) - stream.emit('size', entry.size) - stream.on('newListener', function (ev, cb) { - ev === 'metadata' && cb(entry.metadata) - ev === 'integrity' && cb(entry.integrity) - ev === 'size' && cb(entry.size) - }) - - const src = read.readStream( - cache, - entry.integrity, - { ...opts, size: typeof size !== 'number' ? entry.size : size } - ) - - if (memoize) { - const memoStream = new Collect.PassThrough() - memoStream.on('collect', data => memo.put(cache, entry, data, opts)) - stream.unshift(memoStream) - } - stream.unshift(src) - return stream - }).catch((err) => stream.emit('error', err)) - - return stream -} - -module.exports.stream = getStream - -function getStreamDigest (cache, integrity, opts = {}) { - const { memoize } = opts - const memoized = memo.get.byDigest(cache, integrity, opts) - if (memoized && memoize !== false) { - const stream = new Minipass() - stream.end(memoized) - return stream - } else { - const stream = read.readStream(cache, integrity, opts) - if (!memoize) { - return stream - } - - const memoStream = new Collect.PassThrough() - memoStream.on('collect', data => memo.put.byDigest( - cache, - integrity, - data, - opts - )) - return new Pipeline(stream, memoStream) - } -} - -module.exports.stream.byDigest = getStreamDigest - -function info (cache, key, opts = {}) { - const { memoize } = opts - const memoized = memo.get(cache, key, opts) - if (memoized && memoize !== false) { - return Promise.resolve(memoized.entry) - } else { - return index.find(cache, key) - } -} -module.exports.info = info - -async function copy (cache, key, dest, opts = {}) { - const entry = await index.find(cache, key, opts) - if (!entry) { - throw new index.NotFoundError(cache, key) - } - await read.copy(cache, entry.integrity, dest, opts) - return { - metadata: entry.metadata, - size: entry.size, - integrity: entry.integrity, - } -} - -module.exports.copy = copy - -async function copyByDigest (cache, key, dest, opts = {}) { - await read.copy(cache, key, dest, opts) - return key -} - -module.exports.copy.byDigest = copyByDigest - -module.exports.hasContent = read.hasContent diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/index.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/index.js deleted file mode 100644 index c9b0da5f3a271b..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/index.js +++ /dev/null @@ -1,42 +0,0 @@ -'use strict' - -const get = require('./get.js') -const put = require('./put.js') -const rm = require('./rm.js') -const verify = require('./verify.js') -const { clearMemoized } = require('./memoization.js') -const tmp = require('./util/tmp.js') -const index = require('./entry-index.js') - -module.exports.index = {} -module.exports.index.compact = index.compact -module.exports.index.insert = index.insert - -module.exports.ls = index.ls -module.exports.ls.stream = index.lsStream - -module.exports.get = get -module.exports.get.byDigest = get.byDigest -module.exports.get.stream = get.stream -module.exports.get.stream.byDigest = get.stream.byDigest -module.exports.get.copy = get.copy -module.exports.get.copy.byDigest = get.copy.byDigest -module.exports.get.info = get.info -module.exports.get.hasContent = get.hasContent - -module.exports.put = put -module.exports.put.stream = put.stream - -module.exports.rm = rm.entry -module.exports.rm.all = rm.all -module.exports.rm.entry = module.exports.rm -module.exports.rm.content = rm.content - -module.exports.clearMemoized = clearMemoized - -module.exports.tmp = {} -module.exports.tmp.mkdir = tmp.mkdir -module.exports.tmp.withTmp = tmp.withTmp - -module.exports.verify = verify -module.exports.verify.lastRun = verify.lastRun diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/memoization.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/memoization.js deleted file mode 100644 index 2ecc60912e4563..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/memoization.js +++ /dev/null @@ -1,72 +0,0 @@ -'use strict' - -const { LRUCache } = require('lru-cache') - -const MEMOIZED = new LRUCache({ - max: 500, - maxSize: 50 * 1024 * 1024, // 50MB - ttl: 3 * 60 * 1000, // 3 minutes - sizeCalculation: (entry, key) => key.startsWith('key:') ? entry.data.length : entry.length, -}) - -module.exports.clearMemoized = clearMemoized - -function clearMemoized () { - const old = {} - MEMOIZED.forEach((v, k) => { - old[k] = v - }) - MEMOIZED.clear() - return old -} - -module.exports.put = put - -function put (cache, entry, data, opts) { - pickMem(opts).set(`key:${cache}:${entry.key}`, { entry, data }) - putDigest(cache, entry.integrity, data, opts) -} - -module.exports.put.byDigest = putDigest - -function putDigest (cache, integrity, data, opts) { - pickMem(opts).set(`digest:${cache}:${integrity}`, data) -} - -module.exports.get = get - -function get (cache, key, opts) { - return pickMem(opts).get(`key:${cache}:${key}`) -} - -module.exports.get.byDigest = getDigest - -function getDigest (cache, integrity, opts) { - return pickMem(opts).get(`digest:${cache}:${integrity}`) -} - -class ObjProxy { - constructor (obj) { - this.obj = obj - } - - get (key) { - return this.obj[key] - } - - set (key, val) { - this.obj[key] = val - } -} - -function pickMem (opts) { - if (!opts || !opts.memoize) { - return MEMOIZED - } else if (opts.memoize.get && opts.memoize.set) { - return opts.memoize - } else if (typeof opts.memoize === 'object') { - return new ObjProxy(opts.memoize) - } else { - return MEMOIZED - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/put.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/put.js deleted file mode 100644 index 9fc932d5f6dec5..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/put.js +++ /dev/null @@ -1,80 +0,0 @@ -'use strict' - -const index = require('./entry-index') -const memo = require('./memoization') -const write = require('./content/write') -const Flush = require('minipass-flush') -const { PassThrough } = require('minipass-collect') -const Pipeline = require('minipass-pipeline') - -const putOpts = (opts) => ({ - algorithms: ['sha512'], - ...opts, -}) - -module.exports = putData - -async function putData (cache, key, data, opts = {}) { - const { memoize } = opts - opts = putOpts(opts) - const res = await write(cache, data, opts) - const entry = await index.insert(cache, key, res.integrity, { ...opts, size: res.size }) - if (memoize) { - memo.put(cache, entry, data, opts) - } - - return res.integrity -} - -module.exports.stream = putStream - -function putStream (cache, key, opts = {}) { - const { memoize } = opts - opts = putOpts(opts) - let integrity - let size - let error - - let memoData - const pipeline = new Pipeline() - // first item in the pipeline is the memoizer, because we need - // that to end first and get the collected data. - if (memoize) { - const memoizer = new PassThrough().on('collect', data => { - memoData = data - }) - pipeline.push(memoizer) - } - - // contentStream is a write-only, not a passthrough - // no data comes out of it. - const contentStream = write.stream(cache, opts) - .on('integrity', (int) => { - integrity = int - }) - .on('size', (s) => { - size = s - }) - .on('error', (err) => { - error = err - }) - - pipeline.push(contentStream) - - // last but not least, we write the index and emit hash and size, - // and memoize if we're doing that - pipeline.push(new Flush({ - async flush () { - if (!error) { - const entry = await index.insert(cache, key, integrity, { ...opts, size }) - if (memoize && memoData) { - memo.put(cache, entry, memoData, opts) - } - pipeline.emit('integrity', integrity) - pipeline.emit('size', size) - } - }, - })) - - return pipeline -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/rm.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/rm.js deleted file mode 100644 index a94760c7cf2430..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/rm.js +++ /dev/null @@ -1,31 +0,0 @@ -'use strict' - -const { rm } = require('fs/promises') -const glob = require('./util/glob.js') -const index = require('./entry-index') -const memo = require('./memoization') -const path = require('path') -const rmContent = require('./content/rm') - -module.exports = entry -module.exports.entry = entry - -function entry (cache, key, opts) { - memo.clearMemoized() - return index.delete(cache, key, opts) -} - -module.exports.content = content - -function content (cache, integrity) { - memo.clearMemoized() - return rmContent(cache, integrity) -} - -module.exports.all = all - -async function all (cache) { - memo.clearMemoized() - const paths = await glob(path.join(cache, '*(content-*|index-*)'), { silent: true, nosort: true }) - return Promise.all(paths.map((p) => rm(p, { recursive: true, force: true }))) -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/glob.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/glob.js deleted file mode 100644 index 8500c1c16a429f..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/glob.js +++ /dev/null @@ -1,7 +0,0 @@ -'use strict' - -const { glob } = require('glob') -const path = require('path') - -const globify = (pattern) => pattern.split(path.win32.sep).join(path.posix.sep) -module.exports = (path, options) => glob(globify(path), options) diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/hash-to-segments.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/hash-to-segments.js deleted file mode 100644 index 445599b5038088..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/hash-to-segments.js +++ /dev/null @@ -1,7 +0,0 @@ -'use strict' - -module.exports = hashToSegments - -function hashToSegments (hash) { - return [hash.slice(0, 2), hash.slice(2, 4), hash.slice(4)] -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/tmp.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/tmp.js deleted file mode 100644 index 0bf5302136ebeb..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/util/tmp.js +++ /dev/null @@ -1,26 +0,0 @@ -'use strict' - -const { withTempDir } = require('@npmcli/fs') -const fs = require('fs/promises') -const path = require('path') - -module.exports.mkdir = mktmpdir - -async function mktmpdir (cache, opts = {}) { - const { tmpPrefix } = opts - const tmpDir = path.join(cache, 'tmp') - await fs.mkdir(tmpDir, { recursive: true, owner: 'inherit' }) - // do not use path.join(), it drops the trailing / if tmpPrefix is unset - const target = `${tmpDir}${path.sep}${tmpPrefix || ''}` - return fs.mkdtemp(target, { owner: 'inherit' }) -} - -module.exports.withTmp = withTmp - -function withTmp (cache, opts, cb) { - if (!cb) { - cb = opts - opts = {} - } - return withTempDir(path.join(cache, 'tmp'), cb, opts) -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/verify.js b/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/verify.js deleted file mode 100644 index d7423da1295b68..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/lib/verify.js +++ /dev/null @@ -1,257 +0,0 @@ -'use strict' - -const { - mkdir, - readFile, - rm, - stat, - truncate, - writeFile, -} = require('fs/promises') -const pMap = require('p-map') -const contentPath = require('./content/path') -const fsm = require('fs-minipass') -const glob = require('./util/glob.js') -const index = require('./entry-index') -const path = require('path') -const ssri = require('ssri') - -const hasOwnProperty = (obj, key) => - Object.prototype.hasOwnProperty.call(obj, key) - -const verifyOpts = (opts) => ({ - concurrency: 20, - log: { silly () {} }, - ...opts, -}) - -module.exports = verify - -async function verify (cache, opts) { - opts = verifyOpts(opts) - opts.log.silly('verify', 'verifying cache at', cache) - - const steps = [ - markStartTime, - fixPerms, - garbageCollect, - rebuildIndex, - cleanTmp, - writeVerifile, - markEndTime, - ] - - const stats = {} - for (const step of steps) { - const label = step.name - const start = new Date() - const s = await step(cache, opts) - if (s) { - Object.keys(s).forEach((k) => { - stats[k] = s[k] - }) - } - const end = new Date() - if (!stats.runTime) { - stats.runTime = {} - } - stats.runTime[label] = end - start - } - stats.runTime.total = stats.endTime - stats.startTime - opts.log.silly( - 'verify', - 'verification finished for', - cache, - 'in', - `${stats.runTime.total}ms` - ) - return stats -} - -async function markStartTime () { - return { startTime: new Date() } -} - -async function markEndTime () { - return { endTime: new Date() } -} - -async function fixPerms (cache, opts) { - opts.log.silly('verify', 'fixing cache permissions') - await mkdir(cache, { recursive: true }) - return null -} - -// Implements a naive mark-and-sweep tracing garbage collector. -// -// The algorithm is basically as follows: -// 1. Read (and filter) all index entries ("pointers") -// 2. Mark each integrity value as "live" -// 3. Read entire filesystem tree in `content-vX/` dir -// 4. If content is live, verify its checksum and delete it if it fails -// 5. If content is not marked as live, rm it. -// -async function garbageCollect (cache, opts) { - opts.log.silly('verify', 'garbage collecting content') - const indexStream = index.lsStream(cache) - const liveContent = new Set() - indexStream.on('data', (entry) => { - if (opts.filter && !opts.filter(entry)) { - return - } - - // integrity is stringified, re-parse it so we can get each hash - const integrity = ssri.parse(entry.integrity) - for (const algo in integrity) { - liveContent.add(integrity[algo].toString()) - } - }) - await new Promise((resolve, reject) => { - indexStream.on('end', resolve).on('error', reject) - }) - const contentDir = contentPath.contentDir(cache) - const files = await glob(path.join(contentDir, '**'), { - follow: false, - nodir: true, - nosort: true, - }) - const stats = { - verifiedContent: 0, - reclaimedCount: 0, - reclaimedSize: 0, - badContentCount: 0, - keptSize: 0, - } - await pMap( - files, - async (f) => { - const split = f.split(/[/\\]/) - const digest = split.slice(split.length - 3).join('') - const algo = split[split.length - 4] - const integrity = ssri.fromHex(digest, algo) - if (liveContent.has(integrity.toString())) { - const info = await verifyContent(f, integrity) - if (!info.valid) { - stats.reclaimedCount++ - stats.badContentCount++ - stats.reclaimedSize += info.size - } else { - stats.verifiedContent++ - stats.keptSize += info.size - } - } else { - // No entries refer to this content. We can delete. - stats.reclaimedCount++ - const s = await stat(f) - await rm(f, { recursive: true, force: true }) - stats.reclaimedSize += s.size - } - return stats - }, - { concurrency: opts.concurrency } - ) - return stats -} - -async function verifyContent (filepath, sri) { - const contentInfo = {} - try { - const { size } = await stat(filepath) - contentInfo.size = size - contentInfo.valid = true - await ssri.checkStream(new fsm.ReadStream(filepath), sri) - } catch (err) { - if (err.code === 'ENOENT') { - return { size: 0, valid: false } - } - if (err.code !== 'EINTEGRITY') { - throw err - } - - await rm(filepath, { recursive: true, force: true }) - contentInfo.valid = false - } - return contentInfo -} - -async function rebuildIndex (cache, opts) { - opts.log.silly('verify', 'rebuilding index') - const entries = await index.ls(cache) - const stats = { - missingContent: 0, - rejectedEntries: 0, - totalEntries: 0, - } - const buckets = {} - for (const k in entries) { - /* istanbul ignore else */ - if (hasOwnProperty(entries, k)) { - const hashed = index.hashKey(k) - const entry = entries[k] - const excluded = opts.filter && !opts.filter(entry) - excluded && stats.rejectedEntries++ - if (buckets[hashed] && !excluded) { - buckets[hashed].push(entry) - } else if (buckets[hashed] && excluded) { - // skip - } else if (excluded) { - buckets[hashed] = [] - buckets[hashed]._path = index.bucketPath(cache, k) - } else { - buckets[hashed] = [entry] - buckets[hashed]._path = index.bucketPath(cache, k) - } - } - } - await pMap( - Object.keys(buckets), - (key) => { - return rebuildBucket(cache, buckets[key], stats, opts) - }, - { concurrency: opts.concurrency } - ) - return stats -} - -async function rebuildBucket (cache, bucket, stats) { - await truncate(bucket._path) - // This needs to be serialized because cacache explicitly - // lets very racy bucket conflicts clobber each other. - for (const entry of bucket) { - const content = contentPath(cache, entry.integrity) - try { - await stat(content) - await index.insert(cache, entry.key, entry.integrity, { - metadata: entry.metadata, - size: entry.size, - time: entry.time, - }) - stats.totalEntries++ - } catch (err) { - if (err.code === 'ENOENT') { - stats.rejectedEntries++ - stats.missingContent++ - } else { - throw err - } - } - } -} - -function cleanTmp (cache, opts) { - opts.log.silly('verify', 'cleaning tmp directory') - return rm(path.join(cache, 'tmp'), { recursive: true, force: true }) -} - -async function writeVerifile (cache, opts) { - const verifile = path.join(cache, '_lastverified') - opts.log.silly('verify', 'writing verifile to ' + verifile) - return writeFile(verifile, `${Date.now()}`) -} - -module.exports.lastRun = lastRun - -async function lastRun (cache) { - const data = await readFile(path.join(cache, '_lastverified'), { encoding: 'utf8' }) - return new Date(+data) -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/cacache/package.json b/deps/npm/node_modules/tuf-js/node_modules/cacache/package.json deleted file mode 100644 index 6e6219158ed759..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/cacache/package.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "name": "cacache", - "version": "18.0.4", - "cache-version": { - "content": "2", - "index": "5" - }, - "description": "Fast, fault-tolerant, cross-platform, disk-based, data-agnostic, content-addressable cache.", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "scripts": { - "test": "tap", - "snap": "tap", - "coverage": "tap", - "test-docker": "docker run -it --rm --name pacotest -v \"$PWD\":/tmp -w /tmp node:latest npm test", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "npmclilint": "npmcli-lint", - "lintfix": "npm run lint -- --fix", - "postsnap": "npm run lintfix --", - "postlint": "template-oss-check", - "posttest": "npm run lint", - "template-oss-apply": "template-oss-apply --force" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/cacache.git" - }, - "keywords": [ - "cache", - "caching", - "content-addressable", - "sri", - "sri hash", - "subresource integrity", - "cache", - "storage", - "store", - "file store", - "filesystem", - "disk cache", - "disk storage" - ], - "license": "ISC", - "dependencies": { - "@npmcli/fs": "^3.1.0", - "fs-minipass": "^3.0.0", - "glob": "^10.2.2", - "lru-cache": "^10.0.1", - "minipass": "^7.0.3", - "minipass-collect": "^2.0.1", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "p-map": "^4.0.0", - "ssri": "^10.0.0", - "tar": "^6.1.11", - "unique-filename": "^3.0.0" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "tap": "^16.0.0" - }, - "engines": { - "node": "^16.14.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "windowsCI": false, - "version": "4.22.0", - "publish": "true" - }, - "author": "GitHub Inc.", - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/LICENSE b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/LICENSE deleted file mode 100644 index 1808eb2844231c..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/LICENSE +++ /dev/null @@ -1,16 +0,0 @@ -ISC License - -Copyright 2017-2022 (c) npm, Inc. - -Permission to use, copy, modify, and/or distribute this software for -any purpose with or without fee is hereby granted, provided that the -above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE COPYRIGHT HOLDER DISCLAIMS -ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR -CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/entry.js b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/entry.js deleted file mode 100644 index bfcfacbcc95e18..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/entry.js +++ /dev/null @@ -1,471 +0,0 @@ -const { Request, Response } = require('minipass-fetch') -const { Minipass } = require('minipass') -const MinipassFlush = require('minipass-flush') -const cacache = require('cacache') -const url = require('url') - -const CachingMinipassPipeline = require('../pipeline.js') -const CachePolicy = require('./policy.js') -const cacheKey = require('./key.js') -const remote = require('../remote.js') - -const hasOwnProperty = (obj, prop) => Object.prototype.hasOwnProperty.call(obj, prop) - -// allow list for request headers that will be written to the cache index -// note: we will also store any request headers -// that are named in a response's vary header -const KEEP_REQUEST_HEADERS = [ - 'accept-charset', - 'accept-encoding', - 'accept-language', - 'accept', - 'cache-control', -] - -// allow list for response headers that will be written to the cache index -// note: we must not store the real response's age header, or when we load -// a cache policy based on the metadata it will think the cached response -// is always stale -const KEEP_RESPONSE_HEADERS = [ - 'cache-control', - 'content-encoding', - 'content-language', - 'content-type', - 'date', - 'etag', - 'expires', - 'last-modified', - 'link', - 'location', - 'pragma', - 'vary', -] - -// return an object containing all metadata to be written to the index -const getMetadata = (request, response, options) => { - const metadata = { - time: Date.now(), - url: request.url, - reqHeaders: {}, - resHeaders: {}, - - // options on which we must match the request and vary the response - options: { - compress: options.compress != null ? options.compress : request.compress, - }, - } - - // only save the status if it's not a 200 or 304 - if (response.status !== 200 && response.status !== 304) { - metadata.status = response.status - } - - for (const name of KEEP_REQUEST_HEADERS) { - if (request.headers.has(name)) { - metadata.reqHeaders[name] = request.headers.get(name) - } - } - - // if the request's host header differs from the host in the url - // we need to keep it, otherwise it's just noise and we ignore it - const host = request.headers.get('host') - const parsedUrl = new url.URL(request.url) - if (host && parsedUrl.host !== host) { - metadata.reqHeaders.host = host - } - - // if the response has a vary header, make sure - // we store the relevant request headers too - if (response.headers.has('vary')) { - const vary = response.headers.get('vary') - // a vary of "*" means every header causes a different response. - // in that scenario, we do not include any additional headers - // as the freshness check will always fail anyway and we don't - // want to bloat the cache indexes - if (vary !== '*') { - // copy any other request headers that will vary the response - const varyHeaders = vary.trim().toLowerCase().split(/\s*,\s*/) - for (const name of varyHeaders) { - if (request.headers.has(name)) { - metadata.reqHeaders[name] = request.headers.get(name) - } - } - } - } - - for (const name of KEEP_RESPONSE_HEADERS) { - if (response.headers.has(name)) { - metadata.resHeaders[name] = response.headers.get(name) - } - } - - for (const name of options.cacheAdditionalHeaders) { - if (response.headers.has(name)) { - metadata.resHeaders[name] = response.headers.get(name) - } - } - - return metadata -} - -// symbols used to hide objects that may be lazily evaluated in a getter -const _request = Symbol('request') -const _response = Symbol('response') -const _policy = Symbol('policy') - -class CacheEntry { - constructor ({ entry, request, response, options }) { - if (entry) { - this.key = entry.key - this.entry = entry - // previous versions of this module didn't write an explicit timestamp in - // the metadata, so fall back to the entry's timestamp. we can't use the - // entry timestamp to determine staleness because cacache will update it - // when it verifies its data - this.entry.metadata.time = this.entry.metadata.time || this.entry.time - } else { - this.key = cacheKey(request) - } - - this.options = options - - // these properties are behind getters that lazily evaluate - this[_request] = request - this[_response] = response - this[_policy] = null - } - - // returns a CacheEntry instance that satisfies the given request - // or undefined if no existing entry satisfies - static async find (request, options) { - try { - // compacts the index and returns an array of unique entries - var matches = await cacache.index.compact(options.cachePath, cacheKey(request), (A, B) => { - const entryA = new CacheEntry({ entry: A, options }) - const entryB = new CacheEntry({ entry: B, options }) - return entryA.policy.satisfies(entryB.request) - }, { - validateEntry: (entry) => { - // clean out entries with a buggy content-encoding value - if (entry.metadata && - entry.metadata.resHeaders && - entry.metadata.resHeaders['content-encoding'] === null) { - return false - } - - // if an integrity is null, it needs to have a status specified - if (entry.integrity === null) { - return !!(entry.metadata && entry.metadata.status) - } - - return true - }, - }) - } catch (err) { - // if the compact request fails, ignore the error and return - return - } - - // a cache mode of 'reload' means to behave as though we have no cache - // on the way to the network. return undefined to allow cacheFetch to - // create a brand new request no matter what. - if (options.cache === 'reload') { - return - } - - // find the specific entry that satisfies the request - let match - for (const entry of matches) { - const _entry = new CacheEntry({ - entry, - options, - }) - - if (_entry.policy.satisfies(request)) { - match = _entry - break - } - } - - return match - } - - // if the user made a PUT/POST/PATCH then we invalidate our - // cache for the same url by deleting the index entirely - static async invalidate (request, options) { - const key = cacheKey(request) - try { - await cacache.rm.entry(options.cachePath, key, { removeFully: true }) - } catch (err) { - // ignore errors - } - } - - get request () { - if (!this[_request]) { - this[_request] = new Request(this.entry.metadata.url, { - method: 'GET', - headers: this.entry.metadata.reqHeaders, - ...this.entry.metadata.options, - }) - } - - return this[_request] - } - - get response () { - if (!this[_response]) { - this[_response] = new Response(null, { - url: this.entry.metadata.url, - counter: this.options.counter, - status: this.entry.metadata.status || 200, - headers: { - ...this.entry.metadata.resHeaders, - 'content-length': this.entry.size, - }, - }) - } - - return this[_response] - } - - get policy () { - if (!this[_policy]) { - this[_policy] = new CachePolicy({ - entry: this.entry, - request: this.request, - response: this.response, - options: this.options, - }) - } - - return this[_policy] - } - - // wraps the response in a pipeline that stores the data - // in the cache while the user consumes it - async store (status) { - // if we got a status other than 200, 301, or 308, - // or the CachePolicy forbid storage, append the - // cache status header and return it untouched - if ( - this.request.method !== 'GET' || - ![200, 301, 308].includes(this.response.status) || - !this.policy.storable() - ) { - this.response.headers.set('x-local-cache-status', 'skip') - return this.response - } - - const size = this.response.headers.get('content-length') - const cacheOpts = { - algorithms: this.options.algorithms, - metadata: getMetadata(this.request, this.response, this.options), - size, - integrity: this.options.integrity, - integrityEmitter: this.response.body.hasIntegrityEmitter && this.response.body, - } - - let body = null - // we only set a body if the status is a 200, redirects are - // stored as metadata only - if (this.response.status === 200) { - let cacheWriteResolve, cacheWriteReject - const cacheWritePromise = new Promise((resolve, reject) => { - cacheWriteResolve = resolve - cacheWriteReject = reject - }).catch((err) => { - body.emit('error', err) - }) - - body = new CachingMinipassPipeline({ events: ['integrity', 'size'] }, new MinipassFlush({ - flush () { - return cacheWritePromise - }, - })) - // this is always true since if we aren't reusing the one from the remote fetch, we - // are using the one from cacache - body.hasIntegrityEmitter = true - - const onResume = () => { - const tee = new Minipass() - const cacheStream = cacache.put.stream(this.options.cachePath, this.key, cacheOpts) - // re-emit the integrity and size events on our new response body so they can be reused - cacheStream.on('integrity', i => body.emit('integrity', i)) - cacheStream.on('size', s => body.emit('size', s)) - // stick a flag on here so downstream users will know if they can expect integrity events - tee.pipe(cacheStream) - // TODO if the cache write fails, log a warning but return the response anyway - // eslint-disable-next-line promise/catch-or-return - cacheStream.promise().then(cacheWriteResolve, cacheWriteReject) - body.unshift(tee) - body.unshift(this.response.body) - } - - body.once('resume', onResume) - body.once('end', () => body.removeListener('resume', onResume)) - } else { - await cacache.index.insert(this.options.cachePath, this.key, null, cacheOpts) - } - - // note: we do not set the x-local-cache-hash header because we do not know - // the hash value until after the write to the cache completes, which doesn't - // happen until after the response has been sent and it's too late to write - // the header anyway - this.response.headers.set('x-local-cache', encodeURIComponent(this.options.cachePath)) - this.response.headers.set('x-local-cache-key', encodeURIComponent(this.key)) - this.response.headers.set('x-local-cache-mode', 'stream') - this.response.headers.set('x-local-cache-status', status) - this.response.headers.set('x-local-cache-time', new Date().toISOString()) - const newResponse = new Response(body, { - url: this.response.url, - status: this.response.status, - headers: this.response.headers, - counter: this.options.counter, - }) - return newResponse - } - - // use the cached data to create a response and return it - async respond (method, options, status) { - let response - if (method === 'HEAD' || [301, 308].includes(this.response.status)) { - // if the request is a HEAD, or the response is a redirect, - // then the metadata in the entry already includes everything - // we need to build a response - response = this.response - } else { - // we're responding with a full cached response, so create a body - // that reads from cacache and attach it to a new Response - const body = new Minipass() - const headers = { ...this.policy.responseHeaders() } - - const onResume = () => { - const cacheStream = cacache.get.stream.byDigest( - this.options.cachePath, this.entry.integrity, { memoize: this.options.memoize } - ) - cacheStream.on('error', async (err) => { - cacheStream.pause() - if (err.code === 'EINTEGRITY') { - await cacache.rm.content( - this.options.cachePath, this.entry.integrity, { memoize: this.options.memoize } - ) - } - if (err.code === 'ENOENT' || err.code === 'EINTEGRITY') { - await CacheEntry.invalidate(this.request, this.options) - } - body.emit('error', err) - cacheStream.resume() - }) - // emit the integrity and size events based on our metadata so we're consistent - body.emit('integrity', this.entry.integrity) - body.emit('size', Number(headers['content-length'])) - cacheStream.pipe(body) - } - - body.once('resume', onResume) - body.once('end', () => body.removeListener('resume', onResume)) - response = new Response(body, { - url: this.entry.metadata.url, - counter: options.counter, - status: 200, - headers, - }) - } - - response.headers.set('x-local-cache', encodeURIComponent(this.options.cachePath)) - response.headers.set('x-local-cache-hash', encodeURIComponent(this.entry.integrity)) - response.headers.set('x-local-cache-key', encodeURIComponent(this.key)) - response.headers.set('x-local-cache-mode', 'stream') - response.headers.set('x-local-cache-status', status) - response.headers.set('x-local-cache-time', new Date(this.entry.metadata.time).toUTCString()) - return response - } - - // use the provided request along with this cache entry to - // revalidate the stored response. returns a response, either - // from the cache or from the update - async revalidate (request, options) { - const revalidateRequest = new Request(request, { - headers: this.policy.revalidationHeaders(request), - }) - - try { - // NOTE: be sure to remove the headers property from the - // user supplied options, since we have already defined - // them on the new request object. if they're still in the - // options then those will overwrite the ones from the policy - var response = await remote(revalidateRequest, { - ...options, - headers: undefined, - }) - } catch (err) { - // if the network fetch fails, return the stale - // cached response unless it has a cache-control - // of 'must-revalidate' - if (!this.policy.mustRevalidate) { - return this.respond(request.method, options, 'stale') - } - - throw err - } - - if (this.policy.revalidated(revalidateRequest, response)) { - // we got a 304, write a new index to the cache and respond from cache - const metadata = getMetadata(request, response, options) - // 304 responses do not include headers that are specific to the response data - // since they do not include a body, so we copy values for headers that were - // in the old cache entry to the new one, if the new metadata does not already - // include that header - for (const name of KEEP_RESPONSE_HEADERS) { - if ( - !hasOwnProperty(metadata.resHeaders, name) && - hasOwnProperty(this.entry.metadata.resHeaders, name) - ) { - metadata.resHeaders[name] = this.entry.metadata.resHeaders[name] - } - } - - for (const name of options.cacheAdditionalHeaders) { - const inMeta = hasOwnProperty(metadata.resHeaders, name) - const inEntry = hasOwnProperty(this.entry.metadata.resHeaders, name) - const inPolicy = hasOwnProperty(this.policy.response.headers, name) - - // if the header is in the existing entry, but it is not in the metadata - // then we need to write it to the metadata as this will refresh the on-disk cache - if (!inMeta && inEntry) { - metadata.resHeaders[name] = this.entry.metadata.resHeaders[name] - } - // if the header is in the metadata, but not in the policy, then we need to set - // it in the policy so that it's included in the immediate response. future - // responses will load a new cache entry, so we don't need to change that - if (!inPolicy && inMeta) { - this.policy.response.headers[name] = metadata.resHeaders[name] - } - } - - try { - await cacache.index.insert(options.cachePath, this.key, this.entry.integrity, { - size: this.entry.size, - metadata, - }) - } catch (err) { - // if updating the cache index fails, we ignore it and - // respond anyway - } - return this.respond(request.method, options, 'revalidated') - } - - // if we got a modified response, create a new entry based on it - const newEntry = new CacheEntry({ - request, - response, - options, - }) - - // respond with the new entry while writing it to the cache - return newEntry.store('updated') - } -} - -module.exports = CacheEntry diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/errors.js b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/errors.js deleted file mode 100644 index 67a66573bebe66..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/errors.js +++ /dev/null @@ -1,11 +0,0 @@ -class NotCachedError extends Error { - constructor (url) { - /* eslint-disable-next-line max-len */ - super(`request to ${url} failed: cache mode is 'only-if-cached' but no cached response is available.`) - this.code = 'ENOTCACHED' - } -} - -module.exports = { - NotCachedError, -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/index.js b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/index.js deleted file mode 100644 index 0de49d23fb9336..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/index.js +++ /dev/null @@ -1,49 +0,0 @@ -const { NotCachedError } = require('./errors.js') -const CacheEntry = require('./entry.js') -const remote = require('../remote.js') - -// do whatever is necessary to get a Response and return it -const cacheFetch = async (request, options) => { - // try to find a cached entry that satisfies this request - const entry = await CacheEntry.find(request, options) - if (!entry) { - // no cached result, if the cache mode is 'only-if-cached' that's a failure - if (options.cache === 'only-if-cached') { - throw new NotCachedError(request.url) - } - - // otherwise, we make a request, store it and return it - const response = await remote(request, options) - const newEntry = new CacheEntry({ request, response, options }) - return newEntry.store('miss') - } - - // we have a cached response that satisfies this request, however if the cache - // mode is 'no-cache' then we send the revalidation request no matter what - if (options.cache === 'no-cache') { - return entry.revalidate(request, options) - } - - // if the cached entry is not stale, or if the cache mode is 'force-cache' or - // 'only-if-cached' we can respond with the cached entry. set the status - // based on the result of needsRevalidation and respond - const _needsRevalidation = entry.policy.needsRevalidation(request) - if (options.cache === 'force-cache' || - options.cache === 'only-if-cached' || - !_needsRevalidation) { - return entry.respond(request.method, options, _needsRevalidation ? 'stale' : 'hit') - } - - // if we got here, the cache entry is stale so revalidate it - return entry.revalidate(request, options) -} - -cacheFetch.invalidate = async (request, options) => { - if (!options.cachePath) { - return - } - - return CacheEntry.invalidate(request, options) -} - -module.exports = cacheFetch diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/key.js b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/key.js deleted file mode 100644 index f7684d562b7fae..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/key.js +++ /dev/null @@ -1,17 +0,0 @@ -const { URL, format } = require('url') - -// options passed to url.format() when generating a key -const formatOptions = { - auth: false, - fragment: false, - search: true, - unicode: false, -} - -// returns a string to be used as the cache key for the Request -const cacheKey = (request) => { - const parsed = new URL(request.url) - return `make-fetch-happen:request-cache:${format(parsed, formatOptions)}` -} - -module.exports = cacheKey diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/policy.js b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/policy.js deleted file mode 100644 index ada3c8600dae92..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/cache/policy.js +++ /dev/null @@ -1,161 +0,0 @@ -const CacheSemantics = require('http-cache-semantics') -const Negotiator = require('negotiator') -const ssri = require('ssri') - -// options passed to http-cache-semantics constructor -const policyOptions = { - shared: false, - ignoreCargoCult: true, -} - -// a fake empty response, used when only testing the -// request for storability -const emptyResponse = { status: 200, headers: {} } - -// returns a plain object representation of the Request -const requestObject = (request) => { - const _obj = { - method: request.method, - url: request.url, - headers: {}, - compress: request.compress, - } - - request.headers.forEach((value, key) => { - _obj.headers[key] = value - }) - - return _obj -} - -// returns a plain object representation of the Response -const responseObject = (response) => { - const _obj = { - status: response.status, - headers: {}, - } - - response.headers.forEach((value, key) => { - _obj.headers[key] = value - }) - - return _obj -} - -class CachePolicy { - constructor ({ entry, request, response, options }) { - this.entry = entry - this.request = requestObject(request) - this.response = responseObject(response) - this.options = options - this.policy = new CacheSemantics(this.request, this.response, policyOptions) - - if (this.entry) { - // if we have an entry, copy the timestamp to the _responseTime - // this is necessary because the CacheSemantics constructor forces - // the value to Date.now() which means a policy created from a - // cache entry is likely to always identify itself as stale - this.policy._responseTime = this.entry.metadata.time - } - } - - // static method to quickly determine if a request alone is storable - static storable (request, options) { - // no cachePath means no caching - if (!options.cachePath) { - return false - } - - // user explicitly asked not to cache - if (options.cache === 'no-store') { - return false - } - - // we only cache GET and HEAD requests - if (!['GET', 'HEAD'].includes(request.method)) { - return false - } - - // otherwise, let http-cache-semantics make the decision - // based on the request's headers - const policy = new CacheSemantics(requestObject(request), emptyResponse, policyOptions) - return policy.storable() - } - - // returns true if the policy satisfies the request - satisfies (request) { - const _req = requestObject(request) - if (this.request.headers.host !== _req.headers.host) { - return false - } - - if (this.request.compress !== _req.compress) { - return false - } - - const negotiatorA = new Negotiator(this.request) - const negotiatorB = new Negotiator(_req) - - if (JSON.stringify(negotiatorA.mediaTypes()) !== JSON.stringify(negotiatorB.mediaTypes())) { - return false - } - - if (JSON.stringify(negotiatorA.languages()) !== JSON.stringify(negotiatorB.languages())) { - return false - } - - if (JSON.stringify(negotiatorA.encodings()) !== JSON.stringify(negotiatorB.encodings())) { - return false - } - - if (this.options.integrity) { - return ssri.parse(this.options.integrity).match(this.entry.integrity) - } - - return true - } - - // returns true if the request and response allow caching - storable () { - return this.policy.storable() - } - - // NOTE: this is a hack to avoid parsing the cache-control - // header ourselves, it returns true if the response's - // cache-control contains must-revalidate - get mustRevalidate () { - return !!this.policy._rescc['must-revalidate'] - } - - // returns true if the cached response requires revalidation - // for the given request - needsRevalidation (request) { - const _req = requestObject(request) - // force method to GET because we only cache GETs - // but can serve a HEAD from a cached GET - _req.method = 'GET' - return !this.policy.satisfiesWithoutRevalidation(_req) - } - - responseHeaders () { - return this.policy.responseHeaders() - } - - // returns a new object containing the appropriate headers - // to send a revalidation request - revalidationHeaders (request) { - const _req = requestObject(request) - return this.policy.revalidationHeaders(_req) - } - - // returns true if the request/response was revalidated - // successfully. returns false if a new response was received - revalidated (request, response) { - const _req = requestObject(request) - const _res = responseObject(response) - const policy = this.policy.revalidatedPolicy(_req, _res) - return !policy.modified - } -} - -module.exports = CachePolicy diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/fetch.js b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/fetch.js deleted file mode 100644 index 233ba67e165502..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/fetch.js +++ /dev/null @@ -1,118 +0,0 @@ -'use strict' - -const { FetchError, Request, isRedirect } = require('minipass-fetch') -const url = require('url') - -const CachePolicy = require('./cache/policy.js') -const cache = require('./cache/index.js') -const remote = require('./remote.js') - -// given a Request, a Response and user options -// return true if the response is a redirect that -// can be followed. we throw errors that will result -// in the fetch being rejected if the redirect is -// possible but invalid for some reason -const canFollowRedirect = (request, response, options) => { - if (!isRedirect(response.status)) { - return false - } - - if (options.redirect === 'manual') { - return false - } - - if (options.redirect === 'error') { - throw new FetchError(`redirect mode is set to error: ${request.url}`, - 'no-redirect', { code: 'ENOREDIRECT' }) - } - - if (!response.headers.has('location')) { - throw new FetchError(`redirect location header missing for: ${request.url}`, - 'no-location', { code: 'EINVALIDREDIRECT' }) - } - - if (request.counter >= request.follow) { - throw new FetchError(`maximum redirect reached at: ${request.url}`, - 'max-redirect', { code: 'EMAXREDIRECT' }) - } - - return true -} - -// given a Request, a Response, and the user's options return an object -// with a new Request and a new options object that will be used for -// following the redirect -const getRedirect = (request, response, options) => { - const _opts = { ...options } - const location = response.headers.get('location') - const redirectUrl = new url.URL(location, /^https?:/.test(location) ? undefined : request.url) - // Comment below is used under the following license: - /** - * @license - * Copyright (c) 2010-2012 Mikeal Rogers - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an "AS - * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language - * governing permissions and limitations under the License. - */ - - // Remove authorization if changing hostnames (but not if just - // changing ports or protocols). This matches the behavior of request: - // https://github.com/request/request/blob/b12a6245/lib/redirect.js#L134-L138 - if (new url.URL(request.url).hostname !== redirectUrl.hostname) { - request.headers.delete('authorization') - request.headers.delete('cookie') - } - - // for POST request with 301/302 response, or any request with 303 response, - // use GET when following redirect - if ( - response.status === 303 || - (request.method === 'POST' && [301, 302].includes(response.status)) - ) { - _opts.method = 'GET' - _opts.body = null - request.headers.delete('content-length') - } - - _opts.headers = {} - request.headers.forEach((value, key) => { - _opts.headers[key] = value - }) - - _opts.counter = ++request.counter - const redirectReq = new Request(url.format(redirectUrl), _opts) - return { - request: redirectReq, - options: _opts, - } -} - -const fetch = async (request, options) => { - const response = CachePolicy.storable(request, options) - ? await cache(request, options) - : await remote(request, options) - - // if the request wasn't a GET or HEAD, and the response - // status is between 200 and 399 inclusive, invalidate the - // request url - if (!['GET', 'HEAD'].includes(request.method) && - response.status >= 200 && - response.status <= 399) { - await cache.invalidate(request, options) - } - - if (!canFollowRedirect(request, response, options)) { - return response - } - - const redirect = getRedirect(request, response, options) - return fetch(redirect.request, redirect.options) -} - -module.exports = fetch diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/index.js b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/index.js deleted file mode 100644 index 2f12e8e1b61131..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/index.js +++ /dev/null @@ -1,41 +0,0 @@ -const { FetchError, Headers, Request, Response } = require('minipass-fetch') - -const configureOptions = require('./options.js') -const fetch = require('./fetch.js') - -const makeFetchHappen = (url, opts) => { - const options = configureOptions(opts) - - const request = new Request(url, options) - return fetch(request, options) -} - -makeFetchHappen.defaults = (defaultUrl, defaultOptions = {}, wrappedFetch = makeFetchHappen) => { - if (typeof defaultUrl === 'object') { - defaultOptions = defaultUrl - defaultUrl = null - } - - const defaultedFetch = (url, options = {}) => { - const finalUrl = url || defaultUrl - const finalOptions = { - ...defaultOptions, - ...options, - headers: { - ...defaultOptions.headers, - ...options.headers, - }, - } - return wrappedFetch(finalUrl, finalOptions) - } - - defaultedFetch.defaults = (defaultUrl1, defaultOptions1 = {}) => - makeFetchHappen.defaults(defaultUrl1, defaultOptions1, defaultedFetch) - return defaultedFetch -} - -module.exports = makeFetchHappen -module.exports.FetchError = FetchError -module.exports.Headers = Headers -module.exports.Request = Request -module.exports.Response = Response diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/options.js b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/options.js deleted file mode 100644 index f77511279f831d..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/options.js +++ /dev/null @@ -1,54 +0,0 @@ -const dns = require('dns') - -const conditionalHeaders = [ - 'if-modified-since', - 'if-none-match', - 'if-unmodified-since', - 'if-match', - 'if-range', -] - -const configureOptions = (opts) => { - const { strictSSL, ...options } = { ...opts } - options.method = options.method ? options.method.toUpperCase() : 'GET' - options.rejectUnauthorized = strictSSL !== false - - if (!options.retry) { - options.retry = { retries: 0 } - } else if (typeof options.retry === 'string') { - const retries = parseInt(options.retry, 10) - if (isFinite(retries)) { - options.retry = { retries } - } else { - options.retry = { retries: 0 } - } - } else if (typeof options.retry === 'number') { - options.retry = { retries: options.retry } - } else { - options.retry = { retries: 0, ...options.retry } - } - - options.dns = { ttl: 5 * 60 * 1000, lookup: dns.lookup, ...options.dns } - - options.cache = options.cache || 'default' - if (options.cache === 'default') { - const hasConditionalHeader = Object.keys(options.headers || {}).some((name) => { - return conditionalHeaders.includes(name.toLowerCase()) - }) - if (hasConditionalHeader) { - options.cache = 'no-store' - } - } - - options.cacheAdditionalHeaders = options.cacheAdditionalHeaders || [] - - // cacheManager is deprecated, but if it's set and - // cachePath is not we should copy it to the new field - if (options.cacheManager && !options.cachePath) { - options.cachePath = options.cacheManager - } - - return options -} - -module.exports = configureOptions diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/pipeline.js b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/pipeline.js deleted file mode 100644 index b1d221b2d0ce31..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/pipeline.js +++ /dev/null @@ -1,41 +0,0 @@ -'use strict' - -const MinipassPipeline = require('minipass-pipeline') - -class CachingMinipassPipeline extends MinipassPipeline { - #events = [] - #data = new Map() - - constructor (opts, ...streams) { - // CRITICAL: do NOT pass the streams to the call to super(), this will start - // the flow of data and potentially cause the events we need to catch to emit - // before we've finished our own setup. instead we call super() with no args, - // finish our setup, and then push the streams into ourselves to start the - // data flow - super() - this.#events = opts.events - - /* istanbul ignore next - coverage disabled because this is pointless to test here */ - if (streams.length) { - this.push(...streams) - } - } - - on (event, handler) { - if (this.#events.includes(event) && this.#data.has(event)) { - return handler(...this.#data.get(event)) - } - - return super.on(event, handler) - } - - emit (event, ...data) { - if (this.#events.includes(event)) { - this.#data.set(event, data) - } - - return super.emit(event, ...data) - } -} - -module.exports = CachingMinipassPipeline diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/remote.js b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/remote.js deleted file mode 100644 index 8554564074de6e..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/lib/remote.js +++ /dev/null @@ -1,131 +0,0 @@ -const { Minipass } = require('minipass') -const fetch = require('minipass-fetch') -const promiseRetry = require('promise-retry') -const ssri = require('ssri') -const { log } = require('proc-log') - -const CachingMinipassPipeline = require('./pipeline.js') -const { getAgent } = require('@npmcli/agent') -const pkg = require('../package.json') - -const USER_AGENT = `${pkg.name}/${pkg.version} (+https://npm.im/${pkg.name})` - -const RETRY_ERRORS = [ - 'ECONNRESET', // remote socket closed on us - 'ECONNREFUSED', // remote host refused to open connection - 'EADDRINUSE', // failed to bind to a local port (proxy?) - 'ETIMEDOUT', // someone in the transaction is WAY TOO SLOW - // from @npmcli/agent - 'ECONNECTIONTIMEOUT', - 'EIDLETIMEOUT', - 'ERESPONSETIMEOUT', - 'ETRANSFERTIMEOUT', - // Known codes we do NOT retry on: - // ENOTFOUND (getaddrinfo failure. Either bad hostname, or offline) - // EINVALIDPROXY // invalid protocol from @npmcli/agent - // EINVALIDRESPONSE // invalid status code from @npmcli/agent -] - -const RETRY_TYPES = [ - 'request-timeout', -] - -// make a request directly to the remote source, -// retrying certain classes of errors as well as -// following redirects (through the cache if necessary) -// and verifying response integrity -const remoteFetch = (request, options) => { - const agent = getAgent(request.url, options) - if (!request.headers.has('connection')) { - request.headers.set('connection', agent ? 'keep-alive' : 'close') - } - - if (!request.headers.has('user-agent')) { - request.headers.set('user-agent', USER_AGENT) - } - - // keep our own options since we're overriding the agent - // and the redirect mode - const _opts = { - ...options, - agent, - redirect: 'manual', - } - - return promiseRetry(async (retryHandler, attemptNum) => { - const req = new fetch.Request(request, _opts) - try { - let res = await fetch(req, _opts) - if (_opts.integrity && res.status === 200) { - // we got a 200 response and the user has specified an expected - // integrity value, so wrap the response in an ssri stream to verify it - const integrityStream = ssri.integrityStream({ - algorithms: _opts.algorithms, - integrity: _opts.integrity, - size: _opts.size, - }) - const pipeline = new CachingMinipassPipeline({ - events: ['integrity', 'size'], - }, res.body, integrityStream) - // we also propagate the integrity and size events out to the pipeline so we can use - // this new response body as an integrityEmitter for cacache - integrityStream.on('integrity', i => pipeline.emit('integrity', i)) - integrityStream.on('size', s => pipeline.emit('size', s)) - res = new fetch.Response(pipeline, res) - // set an explicit flag so we know if our response body will emit integrity and size - res.body.hasIntegrityEmitter = true - } - - res.headers.set('x-fetch-attempts', attemptNum) - - // do not retry POST requests, or requests with a streaming body - // do retry requests with a 408, 420, 429 or 500+ status in the response - const isStream = Minipass.isStream(req.body) - const isRetriable = req.method !== 'POST' && - !isStream && - ([408, 420, 429].includes(res.status) || res.status >= 500) - - if (isRetriable) { - if (typeof options.onRetry === 'function') { - options.onRetry(res) - } - - /* eslint-disable-next-line max-len */ - log.http('fetch', `${req.method} ${req.url} attempt ${attemptNum} failed with ${res.status}`) - return retryHandler(res) - } - - return res - } catch (err) { - const code = (err.code === 'EPROMISERETRY') - ? err.retried.code - : err.code - - // err.retried will be the thing that was thrown from above - // if it's a response, we just got a bad status code and we - // can re-throw to allow the retry - const isRetryError = err.retried instanceof fetch.Response || - (RETRY_ERRORS.includes(code) && RETRY_TYPES.includes(err.type)) - - if (req.method === 'POST' || isRetryError) { - throw err - } - - if (typeof options.onRetry === 'function') { - options.onRetry(err) - } - - log.http('fetch', `${req.method} ${req.url} attempt ${attemptNum} failed with ${err.code}`) - return retryHandler(err) - } - }, options.retry).catch((err) => { - // don't reject for http errors, just return them - if (err.status >= 400 && err.type !== 'system') { - return err - } - - throw err - }) -} - -module.exports = remoteFetch diff --git a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/package.json b/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/package.json deleted file mode 100644 index 7adb4d1e7f9719..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/make-fetch-happen/package.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "name": "make-fetch-happen", - "version": "13.0.1", - "description": "Opinionated, caching, retrying fetch client", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "scripts": { - "test": "tap", - "posttest": "npm run lint", - "eslint": "eslint", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "lintfix": "npm run lint -- --fix", - "postlint": "template-oss-check", - "snap": "tap", - "template-oss-apply": "template-oss-apply --force" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/make-fetch-happen.git" - }, - "keywords": [ - "http", - "request", - "fetch", - "mean girls", - "caching", - "cache", - "subresource integrity" - ], - "author": "GitHub Inc.", - "license": "ISC", - "dependencies": { - "@npmcli/agent": "^2.0.0", - "cacache": "^18.0.0", - "http-cache-semantics": "^4.1.1", - "is-lambda": "^1.0.1", - "minipass": "^7.0.2", - "minipass-fetch": "^3.0.0", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "negotiator": "^0.6.3", - "proc-log": "^4.2.0", - "promise-retry": "^2.0.1", - "ssri": "^10.0.0" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.21.4", - "nock": "^13.2.4", - "safe-buffer": "^5.2.1", - "standard-version": "^9.3.2", - "tap": "^16.0.0" - }, - "engines": { - "node": "^16.14.0 || >=18.0.0" - }, - "tap": { - "color": 1, - "files": "test/*.js", - "check-coverage": true, - "timeout": 60, - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.21.4", - "publish": "true" - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/LICENSE b/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/LICENSE deleted file mode 100644 index 3c3410cdc12ee3..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -The MIT License (MIT) - -Copyright (c) Isaac Z. Schlueter and Contributors -Copyright (c) 2016 David Frank - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---- - -Note: This is a derivative work based on "node-fetch" by David Frank, -modified and distributed under the terms of the MIT license above. -https://github.com/bitinn/node-fetch diff --git a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/abort-error.js b/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/abort-error.js deleted file mode 100644 index b18f643269e375..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/abort-error.js +++ /dev/null @@ -1,17 +0,0 @@ -'use strict' -class AbortError extends Error { - constructor (message) { - super(message) - this.code = 'FETCH_ABORTED' - this.type = 'aborted' - Error.captureStackTrace(this, this.constructor) - } - - get name () { - return 'AbortError' - } - - // don't allow name to be overridden, but don't throw either - set name (s) {} -} -module.exports = AbortError diff --git a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/blob.js b/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/blob.js deleted file mode 100644 index 121b1730102e72..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/blob.js +++ /dev/null @@ -1,97 +0,0 @@ -'use strict' -const { Minipass } = require('minipass') -const TYPE = Symbol('type') -const BUFFER = Symbol('buffer') - -class Blob { - constructor (blobParts, options) { - this[TYPE] = '' - - const buffers = [] - let size = 0 - - if (blobParts) { - const a = blobParts - const length = Number(a.length) - for (let i = 0; i < length; i++) { - const element = a[i] - const buffer = element instanceof Buffer ? element - : ArrayBuffer.isView(element) - ? Buffer.from(element.buffer, element.byteOffset, element.byteLength) - : element instanceof ArrayBuffer ? Buffer.from(element) - : element instanceof Blob ? element[BUFFER] - : typeof element === 'string' ? Buffer.from(element) - : Buffer.from(String(element)) - size += buffer.length - buffers.push(buffer) - } - } - - this[BUFFER] = Buffer.concat(buffers, size) - - const type = options && options.type !== undefined - && String(options.type).toLowerCase() - if (type && !/[^\u0020-\u007E]/.test(type)) { - this[TYPE] = type - } - } - - get size () { - return this[BUFFER].length - } - - get type () { - return this[TYPE] - } - - text () { - return Promise.resolve(this[BUFFER].toString()) - } - - arrayBuffer () { - const buf = this[BUFFER] - const off = buf.byteOffset - const len = buf.byteLength - const ab = buf.buffer.slice(off, off + len) - return Promise.resolve(ab) - } - - stream () { - return new Minipass().end(this[BUFFER]) - } - - slice (start, end, type) { - const size = this.size - const relativeStart = start === undefined ? 0 - : start < 0 ? Math.max(size + start, 0) - : Math.min(start, size) - const relativeEnd = end === undefined ? size - : end < 0 ? Math.max(size + end, 0) - : Math.min(end, size) - const span = Math.max(relativeEnd - relativeStart, 0) - - const buffer = this[BUFFER] - const slicedBuffer = buffer.slice( - relativeStart, - relativeStart + span - ) - const blob = new Blob([], { type }) - blob[BUFFER] = slicedBuffer - return blob - } - - get [Symbol.toStringTag] () { - return 'Blob' - } - - static get BUFFER () { - return BUFFER - } -} - -Object.defineProperties(Blob.prototype, { - size: { enumerable: true }, - type: { enumerable: true }, -}) - -module.exports = Blob diff --git a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/body.js b/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/body.js deleted file mode 100644 index 62286bd1de0d91..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/body.js +++ /dev/null @@ -1,350 +0,0 @@ -'use strict' -const { Minipass } = require('minipass') -const MinipassSized = require('minipass-sized') - -const Blob = require('./blob.js') -const { BUFFER } = Blob -const FetchError = require('./fetch-error.js') - -// optional dependency on 'encoding' -let convert -try { - convert = require('encoding').convert -} catch (e) { - // defer error until textConverted is called -} - -const INTERNALS = Symbol('Body internals') -const CONSUME_BODY = Symbol('consumeBody') - -class Body { - constructor (bodyArg, options = {}) { - const { size = 0, timeout = 0 } = options - const body = bodyArg === undefined || bodyArg === null ? null - : isURLSearchParams(bodyArg) ? Buffer.from(bodyArg.toString()) - : isBlob(bodyArg) ? bodyArg - : Buffer.isBuffer(bodyArg) ? bodyArg - : Object.prototype.toString.call(bodyArg) === '[object ArrayBuffer]' - ? Buffer.from(bodyArg) - : ArrayBuffer.isView(bodyArg) - ? Buffer.from(bodyArg.buffer, bodyArg.byteOffset, bodyArg.byteLength) - : Minipass.isStream(bodyArg) ? bodyArg - : Buffer.from(String(bodyArg)) - - this[INTERNALS] = { - body, - disturbed: false, - error: null, - } - - this.size = size - this.timeout = timeout - - if (Minipass.isStream(body)) { - body.on('error', er => { - const error = er.name === 'AbortError' ? er - : new FetchError(`Invalid response while trying to fetch ${ - this.url}: ${er.message}`, 'system', er) - this[INTERNALS].error = error - }) - } - } - - get body () { - return this[INTERNALS].body - } - - get bodyUsed () { - return this[INTERNALS].disturbed - } - - arrayBuffer () { - return this[CONSUME_BODY]().then(buf => - buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength)) - } - - blob () { - const ct = this.headers && this.headers.get('content-type') || '' - return this[CONSUME_BODY]().then(buf => Object.assign( - new Blob([], { type: ct.toLowerCase() }), - { [BUFFER]: buf } - )) - } - - async json () { - const buf = await this[CONSUME_BODY]() - try { - return JSON.parse(buf.toString()) - } catch (er) { - throw new FetchError( - `invalid json response body at ${this.url} reason: ${er.message}`, - 'invalid-json' - ) - } - } - - text () { - return this[CONSUME_BODY]().then(buf => buf.toString()) - } - - buffer () { - return this[CONSUME_BODY]() - } - - textConverted () { - return this[CONSUME_BODY]().then(buf => convertBody(buf, this.headers)) - } - - [CONSUME_BODY] () { - if (this[INTERNALS].disturbed) { - return Promise.reject(new TypeError(`body used already for: ${ - this.url}`)) - } - - this[INTERNALS].disturbed = true - - if (this[INTERNALS].error) { - return Promise.reject(this[INTERNALS].error) - } - - // body is null - if (this.body === null) { - return Promise.resolve(Buffer.alloc(0)) - } - - if (Buffer.isBuffer(this.body)) { - return Promise.resolve(this.body) - } - - const upstream = isBlob(this.body) ? this.body.stream() : this.body - - /* istanbul ignore if: should never happen */ - if (!Minipass.isStream(upstream)) { - return Promise.resolve(Buffer.alloc(0)) - } - - const stream = this.size && upstream instanceof MinipassSized ? upstream - : !this.size && upstream instanceof Minipass && - !(upstream instanceof MinipassSized) ? upstream - : this.size ? new MinipassSized({ size: this.size }) - : new Minipass() - - // allow timeout on slow response body, but only if the stream is still writable. this - // makes the timeout center on the socket stream from lib/index.js rather than the - // intermediary minipass stream we create to receive the data - const resTimeout = this.timeout && stream.writable ? setTimeout(() => { - stream.emit('error', new FetchError( - `Response timeout while trying to fetch ${ - this.url} (over ${this.timeout}ms)`, 'body-timeout')) - }, this.timeout) : null - - // do not keep the process open just for this timeout, even - // though we expect it'll get cleared eventually. - if (resTimeout && resTimeout.unref) { - resTimeout.unref() - } - - // do the pipe in the promise, because the pipe() can send too much - // data through right away and upset the MP Sized object - return new Promise((resolve) => { - // if the stream is some other kind of stream, then pipe through a MP - // so we can collect it more easily. - if (stream !== upstream) { - upstream.on('error', er => stream.emit('error', er)) - upstream.pipe(stream) - } - resolve() - }).then(() => stream.concat()).then(buf => { - clearTimeout(resTimeout) - return buf - }).catch(er => { - clearTimeout(resTimeout) - // request was aborted, reject with this Error - if (er.name === 'AbortError' || er.name === 'FetchError') { - throw er - } else if (er.name === 'RangeError') { - throw new FetchError(`Could not create Buffer from response body for ${ - this.url}: ${er.message}`, 'system', er) - } else { - // other errors, such as incorrect content-encoding or content-length - throw new FetchError(`Invalid response body while trying to fetch ${ - this.url}: ${er.message}`, 'system', er) - } - }) - } - - static clone (instance) { - if (instance.bodyUsed) { - throw new Error('cannot clone body after it is used') - } - - const body = instance.body - - // check that body is a stream and not form-data object - // NB: can't clone the form-data object without having it as a dependency - if (Minipass.isStream(body) && typeof body.getBoundary !== 'function') { - // create a dedicated tee stream so that we don't lose data - // potentially sitting in the body stream's buffer by writing it - // immediately to p1 and not having it for p2. - const tee = new Minipass() - const p1 = new Minipass() - const p2 = new Minipass() - tee.on('error', er => { - p1.emit('error', er) - p2.emit('error', er) - }) - body.on('error', er => tee.emit('error', er)) - tee.pipe(p1) - tee.pipe(p2) - body.pipe(tee) - // set instance body to one fork, return the other - instance[INTERNALS].body = p1 - return p2 - } else { - return instance.body - } - } - - static extractContentType (body) { - return body === null || body === undefined ? null - : typeof body === 'string' ? 'text/plain;charset=UTF-8' - : isURLSearchParams(body) - ? 'application/x-www-form-urlencoded;charset=UTF-8' - : isBlob(body) ? body.type || null - : Buffer.isBuffer(body) ? null - : Object.prototype.toString.call(body) === '[object ArrayBuffer]' ? null - : ArrayBuffer.isView(body) ? null - : typeof body.getBoundary === 'function' - ? `multipart/form-data;boundary=${body.getBoundary()}` - : Minipass.isStream(body) ? null - : 'text/plain;charset=UTF-8' - } - - static getTotalBytes (instance) { - const { body } = instance - return (body === null || body === undefined) ? 0 - : isBlob(body) ? body.size - : Buffer.isBuffer(body) ? body.length - : body && typeof body.getLengthSync === 'function' && ( - // detect form data input from form-data module - body._lengthRetrievers && - /* istanbul ignore next */ body._lengthRetrievers.length === 0 || // 1.x - body.hasKnownLength && body.hasKnownLength()) // 2.x - ? body.getLengthSync() - : null - } - - static writeToStream (dest, instance) { - const { body } = instance - - if (body === null || body === undefined) { - dest.end() - } else if (Buffer.isBuffer(body) || typeof body === 'string') { - dest.end(body) - } else { - // body is stream or blob - const stream = isBlob(body) ? body.stream() : body - stream.on('error', er => dest.emit('error', er)).pipe(dest) - } - - return dest - } -} - -Object.defineProperties(Body.prototype, { - body: { enumerable: true }, - bodyUsed: { enumerable: true }, - arrayBuffer: { enumerable: true }, - blob: { enumerable: true }, - json: { enumerable: true }, - text: { enumerable: true }, -}) - -const isURLSearchParams = obj => - // Duck-typing as a necessary condition. - (typeof obj !== 'object' || - typeof obj.append !== 'function' || - typeof obj.delete !== 'function' || - typeof obj.get !== 'function' || - typeof obj.getAll !== 'function' || - typeof obj.has !== 'function' || - typeof obj.set !== 'function') ? false - // Brand-checking and more duck-typing as optional condition. - : obj.constructor.name === 'URLSearchParams' || - Object.prototype.toString.call(obj) === '[object URLSearchParams]' || - typeof obj.sort === 'function' - -const isBlob = obj => - typeof obj === 'object' && - typeof obj.arrayBuffer === 'function' && - typeof obj.type === 'string' && - typeof obj.stream === 'function' && - typeof obj.constructor === 'function' && - typeof obj.constructor.name === 'string' && - /^(Blob|File)$/.test(obj.constructor.name) && - /^(Blob|File)$/.test(obj[Symbol.toStringTag]) - -const convertBody = (buffer, headers) => { - /* istanbul ignore if */ - if (typeof convert !== 'function') { - throw new Error('The package `encoding` must be installed to use the textConverted() function') - } - - const ct = headers && headers.get('content-type') - let charset = 'utf-8' - let res - - // header - if (ct) { - res = /charset=([^;]*)/i.exec(ct) - } - - // no charset in content type, peek at response body for at most 1024 bytes - const str = buffer.slice(0, 1024).toString() - - // html5 - if (!res && str) { - res = / this.expect - ? 'max-size' : type - this.message = message - Error.captureStackTrace(this, this.constructor) - } - - get name () { - return 'FetchError' - } - - // don't allow name to be overwritten - set name (n) {} - - get [Symbol.toStringTag] () { - return 'FetchError' - } -} -module.exports = FetchError diff --git a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/headers.js b/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/headers.js deleted file mode 100644 index dd6e854d5ba399..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/headers.js +++ /dev/null @@ -1,267 +0,0 @@ -'use strict' -const invalidTokenRegex = /[^^_`a-zA-Z\-0-9!#$%&'*+.|~]/ -const invalidHeaderCharRegex = /[^\t\x20-\x7e\x80-\xff]/ - -const validateName = name => { - name = `${name}` - if (invalidTokenRegex.test(name) || name === '') { - throw new TypeError(`${name} is not a legal HTTP header name`) - } -} - -const validateValue = value => { - value = `${value}` - if (invalidHeaderCharRegex.test(value)) { - throw new TypeError(`${value} is not a legal HTTP header value`) - } -} - -const find = (map, name) => { - name = name.toLowerCase() - for (const key in map) { - if (key.toLowerCase() === name) { - return key - } - } - return undefined -} - -const MAP = Symbol('map') -class Headers { - constructor (init = undefined) { - this[MAP] = Object.create(null) - if (init instanceof Headers) { - const rawHeaders = init.raw() - const headerNames = Object.keys(rawHeaders) - for (const headerName of headerNames) { - for (const value of rawHeaders[headerName]) { - this.append(headerName, value) - } - } - return - } - - // no-op - if (init === undefined || init === null) { - return - } - - if (typeof init === 'object') { - const method = init[Symbol.iterator] - if (method !== null && method !== undefined) { - if (typeof method !== 'function') { - throw new TypeError('Header pairs must be iterable') - } - - // sequence> - // Note: per spec we have to first exhaust the lists then process them - const pairs = [] - for (const pair of init) { - if (typeof pair !== 'object' || - typeof pair[Symbol.iterator] !== 'function') { - throw new TypeError('Each header pair must be iterable') - } - const arrPair = Array.from(pair) - if (arrPair.length !== 2) { - throw new TypeError('Each header pair must be a name/value tuple') - } - pairs.push(arrPair) - } - - for (const pair of pairs) { - this.append(pair[0], pair[1]) - } - } else { - // record - for (const key of Object.keys(init)) { - this.append(key, init[key]) - } - } - } else { - throw new TypeError('Provided initializer must be an object') - } - } - - get (name) { - name = `${name}` - validateName(name) - const key = find(this[MAP], name) - if (key === undefined) { - return null - } - - return this[MAP][key].join(', ') - } - - forEach (callback, thisArg = undefined) { - let pairs = getHeaders(this) - for (let i = 0; i < pairs.length; i++) { - const [name, value] = pairs[i] - callback.call(thisArg, value, name, this) - // refresh in case the callback added more headers - pairs = getHeaders(this) - } - } - - set (name, value) { - name = `${name}` - value = `${value}` - validateName(name) - validateValue(value) - const key = find(this[MAP], name) - this[MAP][key !== undefined ? key : name] = [value] - } - - append (name, value) { - name = `${name}` - value = `${value}` - validateName(name) - validateValue(value) - const key = find(this[MAP], name) - if (key !== undefined) { - this[MAP][key].push(value) - } else { - this[MAP][name] = [value] - } - } - - has (name) { - name = `${name}` - validateName(name) - return find(this[MAP], name) !== undefined - } - - delete (name) { - name = `${name}` - validateName(name) - const key = find(this[MAP], name) - if (key !== undefined) { - delete this[MAP][key] - } - } - - raw () { - return this[MAP] - } - - keys () { - return new HeadersIterator(this, 'key') - } - - values () { - return new HeadersIterator(this, 'value') - } - - [Symbol.iterator] () { - return new HeadersIterator(this, 'key+value') - } - - entries () { - return new HeadersIterator(this, 'key+value') - } - - get [Symbol.toStringTag] () { - return 'Headers' - } - - static exportNodeCompatibleHeaders (headers) { - const obj = Object.assign(Object.create(null), headers[MAP]) - - // http.request() only supports string as Host header. This hack makes - // specifying custom Host header possible. - const hostHeaderKey = find(headers[MAP], 'Host') - if (hostHeaderKey !== undefined) { - obj[hostHeaderKey] = obj[hostHeaderKey][0] - } - - return obj - } - - static createHeadersLenient (obj) { - const headers = new Headers() - for (const name of Object.keys(obj)) { - if (invalidTokenRegex.test(name)) { - continue - } - - if (Array.isArray(obj[name])) { - for (const val of obj[name]) { - if (invalidHeaderCharRegex.test(val)) { - continue - } - - if (headers[MAP][name] === undefined) { - headers[MAP][name] = [val] - } else { - headers[MAP][name].push(val) - } - } - } else if (!invalidHeaderCharRegex.test(obj[name])) { - headers[MAP][name] = [obj[name]] - } - } - return headers - } -} - -Object.defineProperties(Headers.prototype, { - get: { enumerable: true }, - forEach: { enumerable: true }, - set: { enumerable: true }, - append: { enumerable: true }, - has: { enumerable: true }, - delete: { enumerable: true }, - keys: { enumerable: true }, - values: { enumerable: true }, - entries: { enumerable: true }, -}) - -const getHeaders = (headers, kind = 'key+value') => - Object.keys(headers[MAP]).sort().map( - kind === 'key' ? k => k.toLowerCase() - : kind === 'value' ? k => headers[MAP][k].join(', ') - : k => [k.toLowerCase(), headers[MAP][k].join(', ')] - ) - -const INTERNAL = Symbol('internal') - -class HeadersIterator { - constructor (target, kind) { - this[INTERNAL] = { - target, - kind, - index: 0, - } - } - - get [Symbol.toStringTag] () { - return 'HeadersIterator' - } - - next () { - /* istanbul ignore if: should be impossible */ - if (!this || Object.getPrototypeOf(this) !== HeadersIterator.prototype) { - throw new TypeError('Value of `this` is not a HeadersIterator') - } - - const { target, kind, index } = this[INTERNAL] - const values = getHeaders(target, kind) - const len = values.length - if (index >= len) { - return { - value: undefined, - done: true, - } - } - - this[INTERNAL].index++ - - return { value: values[index], done: false } - } -} - -// manually extend because 'extends' requires a ctor -Object.setPrototypeOf(HeadersIterator.prototype, - Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]()))) - -module.exports = Headers diff --git a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/index.js b/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/index.js deleted file mode 100644 index da402161670e65..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/index.js +++ /dev/null @@ -1,377 +0,0 @@ -'use strict' -const { URL } = require('url') -const http = require('http') -const https = require('https') -const zlib = require('minizlib') -const { Minipass } = require('minipass') - -const Body = require('./body.js') -const { writeToStream, getTotalBytes } = Body -const Response = require('./response.js') -const Headers = require('./headers.js') -const { createHeadersLenient } = Headers -const Request = require('./request.js') -const { getNodeRequestOptions } = Request -const FetchError = require('./fetch-error.js') -const AbortError = require('./abort-error.js') - -// XXX this should really be split up and unit-ized for easier testing -// and better DRY implementation of data/http request aborting -const fetch = async (url, opts) => { - if (/^data:/.test(url)) { - const request = new Request(url, opts) - // delay 1 promise tick so that the consumer can abort right away - return Promise.resolve().then(() => new Promise((resolve, reject) => { - let type, data - try { - const { pathname, search } = new URL(url) - const split = pathname.split(',') - if (split.length < 2) { - throw new Error('invalid data: URI') - } - const mime = split.shift() - const base64 = /;base64$/.test(mime) - type = base64 ? mime.slice(0, -1 * ';base64'.length) : mime - const rawData = decodeURIComponent(split.join(',') + search) - data = base64 ? Buffer.from(rawData, 'base64') : Buffer.from(rawData) - } catch (er) { - return reject(new FetchError(`[${request.method}] ${ - request.url} invalid URL, ${er.message}`, 'system', er)) - } - - const { signal } = request - if (signal && signal.aborted) { - return reject(new AbortError('The user aborted a request.')) - } - - const headers = { 'Content-Length': data.length } - if (type) { - headers['Content-Type'] = type - } - return resolve(new Response(data, { headers })) - })) - } - - return new Promise((resolve, reject) => { - // build request object - const request = new Request(url, opts) - let options - try { - options = getNodeRequestOptions(request) - } catch (er) { - return reject(er) - } - - const send = (options.protocol === 'https:' ? https : http).request - const { signal } = request - let response = null - const abort = () => { - const error = new AbortError('The user aborted a request.') - reject(error) - if (Minipass.isStream(request.body) && - typeof request.body.destroy === 'function') { - request.body.destroy(error) - } - if (response && response.body) { - response.body.emit('error', error) - } - } - - if (signal && signal.aborted) { - return abort() - } - - const abortAndFinalize = () => { - abort() - finalize() - } - - const finalize = () => { - req.abort() - if (signal) { - signal.removeEventListener('abort', abortAndFinalize) - } - clearTimeout(reqTimeout) - } - - // send request - const req = send(options) - - if (signal) { - signal.addEventListener('abort', abortAndFinalize) - } - - let reqTimeout = null - if (request.timeout) { - req.once('socket', () => { - reqTimeout = setTimeout(() => { - reject(new FetchError(`network timeout at: ${ - request.url}`, 'request-timeout')) - finalize() - }, request.timeout) - }) - } - - req.on('error', er => { - // if a 'response' event is emitted before the 'error' event, then by the - // time this handler is run it's too late to reject the Promise for the - // response. instead, we forward the error event to the response stream - // so that the error will surface to the user when they try to consume - // the body. this is done as a side effect of aborting the request except - // for in windows, where we must forward the event manually, otherwise - // there is no longer a ref'd socket attached to the request and the - // stream never ends so the event loop runs out of work and the process - // exits without warning. - // coverage skipped here due to the difficulty in testing - // istanbul ignore next - if (req.res) { - req.res.emit('error', er) - } - reject(new FetchError(`request to ${request.url} failed, reason: ${ - er.message}`, 'system', er)) - finalize() - }) - - req.on('response', res => { - clearTimeout(reqTimeout) - - const headers = createHeadersLenient(res.headers) - - // HTTP fetch step 5 - if (fetch.isRedirect(res.statusCode)) { - // HTTP fetch step 5.2 - const location = headers.get('Location') - - // HTTP fetch step 5.3 - let locationURL = null - try { - locationURL = location === null ? null : new URL(location, request.url).toString() - } catch { - // error here can only be invalid URL in Location: header - // do not throw when options.redirect == manual - // let the user extract the errorneous redirect URL - if (request.redirect !== 'manual') { - /* eslint-disable-next-line max-len */ - reject(new FetchError(`uri requested responds with an invalid redirect URL: ${location}`, 'invalid-redirect')) - finalize() - return - } - } - - // HTTP fetch step 5.5 - if (request.redirect === 'error') { - reject(new FetchError('uri requested responds with a redirect, ' + - `redirect mode is set to error: ${request.url}`, 'no-redirect')) - finalize() - return - } else if (request.redirect === 'manual') { - // node-fetch-specific step: make manual redirect a bit easier to - // use by setting the Location header value to the resolved URL. - if (locationURL !== null) { - // handle corrupted header - try { - headers.set('Location', locationURL) - } catch (err) { - /* istanbul ignore next: nodejs server prevent invalid - response headers, we can't test this through normal - request */ - reject(err) - } - } - } else if (request.redirect === 'follow' && locationURL !== null) { - // HTTP-redirect fetch step 5 - if (request.counter >= request.follow) { - reject(new FetchError(`maximum redirect reached at: ${ - request.url}`, 'max-redirect')) - finalize() - return - } - - // HTTP-redirect fetch step 9 - if (res.statusCode !== 303 && - request.body && - getTotalBytes(request) === null) { - reject(new FetchError( - 'Cannot follow redirect with body being a readable stream', - 'unsupported-redirect' - )) - finalize() - return - } - - // Update host due to redirection - request.headers.set('host', (new URL(locationURL)).host) - - // HTTP-redirect fetch step 6 (counter increment) - // Create a new Request object. - const requestOpts = { - headers: new Headers(request.headers), - follow: request.follow, - counter: request.counter + 1, - agent: request.agent, - compress: request.compress, - method: request.method, - body: request.body, - signal: request.signal, - timeout: request.timeout, - } - - // if the redirect is to a new hostname, strip the authorization and cookie headers - const parsedOriginal = new URL(request.url) - const parsedRedirect = new URL(locationURL) - if (parsedOriginal.hostname !== parsedRedirect.hostname) { - requestOpts.headers.delete('authorization') - requestOpts.headers.delete('cookie') - } - - // HTTP-redirect fetch step 11 - if (res.statusCode === 303 || ( - (res.statusCode === 301 || res.statusCode === 302) && - request.method === 'POST' - )) { - requestOpts.method = 'GET' - requestOpts.body = undefined - requestOpts.headers.delete('content-length') - } - - // HTTP-redirect fetch step 15 - resolve(fetch(new Request(locationURL, requestOpts))) - finalize() - return - } - } // end if(isRedirect) - - // prepare response - res.once('end', () => - signal && signal.removeEventListener('abort', abortAndFinalize)) - - const body = new Minipass() - // if an error occurs, either on the response stream itself, on one of the - // decoder streams, or a response length timeout from the Body class, we - // forward the error through to our internal body stream. If we see an - // error event on that, we call finalize to abort the request and ensure - // we don't leave a socket believing a request is in flight. - // this is difficult to test, so lacks specific coverage. - body.on('error', finalize) - // exceedingly rare that the stream would have an error, - // but just in case we proxy it to the stream in use. - res.on('error', /* istanbul ignore next */ er => body.emit('error', er)) - res.on('data', (chunk) => body.write(chunk)) - res.on('end', () => body.end()) - - const responseOptions = { - url: request.url, - status: res.statusCode, - statusText: res.statusMessage, - headers: headers, - size: request.size, - timeout: request.timeout, - counter: request.counter, - trailer: new Promise(resolveTrailer => - res.on('end', () => resolveTrailer(createHeadersLenient(res.trailers)))), - } - - // HTTP-network fetch step 12.1.1.3 - const codings = headers.get('Content-Encoding') - - // HTTP-network fetch step 12.1.1.4: handle content codings - - // in following scenarios we ignore compression support - // 1. compression support is disabled - // 2. HEAD request - // 3. no Content-Encoding header - // 4. no content response (204) - // 5. content not modified response (304) - if (!request.compress || - request.method === 'HEAD' || - codings === null || - res.statusCode === 204 || - res.statusCode === 304) { - response = new Response(body, responseOptions) - resolve(response) - return - } - - // Be less strict when decoding compressed responses, since sometimes - // servers send slightly invalid responses that are still accepted - // by common browsers. - // Always using Z_SYNC_FLUSH is what cURL does. - const zlibOptions = { - flush: zlib.constants.Z_SYNC_FLUSH, - finishFlush: zlib.constants.Z_SYNC_FLUSH, - } - - // for gzip - if (codings === 'gzip' || codings === 'x-gzip') { - const unzip = new zlib.Gunzip(zlibOptions) - response = new Response( - // exceedingly rare that the stream would have an error, - // but just in case we proxy it to the stream in use. - body.on('error', /* istanbul ignore next */ er => unzip.emit('error', er)).pipe(unzip), - responseOptions - ) - resolve(response) - return - } - - // for deflate - if (codings === 'deflate' || codings === 'x-deflate') { - // handle the infamous raw deflate response from old servers - // a hack for old IIS and Apache servers - const raw = res.pipe(new Minipass()) - raw.once('data', chunk => { - // see http://stackoverflow.com/questions/37519828 - const decoder = (chunk[0] & 0x0F) === 0x08 - ? new zlib.Inflate() - : new zlib.InflateRaw() - // exceedingly rare that the stream would have an error, - // but just in case we proxy it to the stream in use. - body.on('error', /* istanbul ignore next */ er => decoder.emit('error', er)).pipe(decoder) - response = new Response(decoder, responseOptions) - resolve(response) - }) - return - } - - // for br - if (codings === 'br') { - // ignoring coverage so tests don't have to fake support (or lack of) for brotli - // istanbul ignore next - try { - var decoder = new zlib.BrotliDecompress() - } catch (err) { - reject(err) - finalize() - return - } - // exceedingly rare that the stream would have an error, - // but just in case we proxy it to the stream in use. - body.on('error', /* istanbul ignore next */ er => decoder.emit('error', er)).pipe(decoder) - response = new Response(decoder, responseOptions) - resolve(response) - return - } - - // otherwise, use response as-is - response = new Response(body, responseOptions) - resolve(response) - }) - - writeToStream(req, request) - }) -} - -module.exports = fetch - -fetch.isRedirect = code => - code === 301 || - code === 302 || - code === 303 || - code === 307 || - code === 308 - -fetch.Headers = Headers -fetch.Request = Request -fetch.Response = Response -fetch.FetchError = FetchError -fetch.AbortError = AbortError diff --git a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/request.js b/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/request.js deleted file mode 100644 index 054439e6699107..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/request.js +++ /dev/null @@ -1,282 +0,0 @@ -'use strict' -const { URL } = require('url') -const { Minipass } = require('minipass') -const Headers = require('./headers.js') -const { exportNodeCompatibleHeaders } = Headers -const Body = require('./body.js') -const { clone, extractContentType, getTotalBytes } = Body - -const version = require('../package.json').version -const defaultUserAgent = - `minipass-fetch/${version} (+https://github.com/isaacs/minipass-fetch)` - -const INTERNALS = Symbol('Request internals') - -const isRequest = input => - typeof input === 'object' && typeof input[INTERNALS] === 'object' - -const isAbortSignal = signal => { - const proto = ( - signal - && typeof signal === 'object' - && Object.getPrototypeOf(signal) - ) - return !!(proto && proto.constructor.name === 'AbortSignal') -} - -class Request extends Body { - constructor (input, init = {}) { - const parsedURL = isRequest(input) ? new URL(input.url) - : input && input.href ? new URL(input.href) - : new URL(`${input}`) - - if (isRequest(input)) { - init = { ...input[INTERNALS], ...init } - } else if (!input || typeof input === 'string') { - input = {} - } - - const method = (init.method || input.method || 'GET').toUpperCase() - const isGETHEAD = method === 'GET' || method === 'HEAD' - - if ((init.body !== null && init.body !== undefined || - isRequest(input) && input.body !== null) && isGETHEAD) { - throw new TypeError('Request with GET/HEAD method cannot have body') - } - - const inputBody = init.body !== null && init.body !== undefined ? init.body - : isRequest(input) && input.body !== null ? clone(input) - : null - - super(inputBody, { - timeout: init.timeout || input.timeout || 0, - size: init.size || input.size || 0, - }) - - const headers = new Headers(init.headers || input.headers || {}) - - if (inputBody !== null && inputBody !== undefined && - !headers.has('Content-Type')) { - const contentType = extractContentType(inputBody) - if (contentType) { - headers.append('Content-Type', contentType) - } - } - - const signal = 'signal' in init ? init.signal - : null - - if (signal !== null && signal !== undefined && !isAbortSignal(signal)) { - throw new TypeError('Expected signal must be an instanceof AbortSignal') - } - - // TLS specific options that are handled by node - const { - ca, - cert, - ciphers, - clientCertEngine, - crl, - dhparam, - ecdhCurve, - family, - honorCipherOrder, - key, - passphrase, - pfx, - rejectUnauthorized = process.env.NODE_TLS_REJECT_UNAUTHORIZED !== '0', - secureOptions, - secureProtocol, - servername, - sessionIdContext, - } = init - - this[INTERNALS] = { - method, - redirect: init.redirect || input.redirect || 'follow', - headers, - parsedURL, - signal, - ca, - cert, - ciphers, - clientCertEngine, - crl, - dhparam, - ecdhCurve, - family, - honorCipherOrder, - key, - passphrase, - pfx, - rejectUnauthorized, - secureOptions, - secureProtocol, - servername, - sessionIdContext, - } - - // node-fetch-only options - this.follow = init.follow !== undefined ? init.follow - : input.follow !== undefined ? input.follow - : 20 - this.compress = init.compress !== undefined ? init.compress - : input.compress !== undefined ? input.compress - : true - this.counter = init.counter || input.counter || 0 - this.agent = init.agent || input.agent - } - - get method () { - return this[INTERNALS].method - } - - get url () { - return this[INTERNALS].parsedURL.toString() - } - - get headers () { - return this[INTERNALS].headers - } - - get redirect () { - return this[INTERNALS].redirect - } - - get signal () { - return this[INTERNALS].signal - } - - clone () { - return new Request(this) - } - - get [Symbol.toStringTag] () { - return 'Request' - } - - static getNodeRequestOptions (request) { - const parsedURL = request[INTERNALS].parsedURL - const headers = new Headers(request[INTERNALS].headers) - - // fetch step 1.3 - if (!headers.has('Accept')) { - headers.set('Accept', '*/*') - } - - // Basic fetch - if (!/^https?:$/.test(parsedURL.protocol)) { - throw new TypeError('Only HTTP(S) protocols are supported') - } - - if (request.signal && - Minipass.isStream(request.body) && - typeof request.body.destroy !== 'function') { - throw new Error( - 'Cancellation of streamed requests with AbortSignal is not supported') - } - - // HTTP-network-or-cache fetch steps 2.4-2.7 - const contentLengthValue = - (request.body === null || request.body === undefined) && - /^(POST|PUT)$/i.test(request.method) ? '0' - : request.body !== null && request.body !== undefined - ? getTotalBytes(request) - : null - - if (contentLengthValue) { - headers.set('Content-Length', contentLengthValue + '') - } - - // HTTP-network-or-cache fetch step 2.11 - if (!headers.has('User-Agent')) { - headers.set('User-Agent', defaultUserAgent) - } - - // HTTP-network-or-cache fetch step 2.15 - if (request.compress && !headers.has('Accept-Encoding')) { - headers.set('Accept-Encoding', 'gzip,deflate') - } - - const agent = typeof request.agent === 'function' - ? request.agent(parsedURL) - : request.agent - - if (!headers.has('Connection') && !agent) { - headers.set('Connection', 'close') - } - - // TLS specific options that are handled by node - const { - ca, - cert, - ciphers, - clientCertEngine, - crl, - dhparam, - ecdhCurve, - family, - honorCipherOrder, - key, - passphrase, - pfx, - rejectUnauthorized, - secureOptions, - secureProtocol, - servername, - sessionIdContext, - } = request[INTERNALS] - - // HTTP-network fetch step 4.2 - // chunked encoding is handled by Node.js - - // we cannot spread parsedURL directly, so we have to read each property one-by-one - // and map them to the equivalent https?.request() method options - const urlProps = { - auth: parsedURL.username || parsedURL.password - ? `${parsedURL.username}:${parsedURL.password}` - : '', - host: parsedURL.host, - hostname: parsedURL.hostname, - path: `${parsedURL.pathname}${parsedURL.search}`, - port: parsedURL.port, - protocol: parsedURL.protocol, - } - - return { - ...urlProps, - method: request.method, - headers: exportNodeCompatibleHeaders(headers), - agent, - ca, - cert, - ciphers, - clientCertEngine, - crl, - dhparam, - ecdhCurve, - family, - honorCipherOrder, - key, - passphrase, - pfx, - rejectUnauthorized, - secureOptions, - secureProtocol, - servername, - sessionIdContext, - timeout: request.timeout, - } - } -} - -module.exports = Request - -Object.defineProperties(Request.prototype, { - method: { enumerable: true }, - url: { enumerable: true }, - headers: { enumerable: true }, - redirect: { enumerable: true }, - clone: { enumerable: true }, - signal: { enumerable: true }, -}) diff --git a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/response.js b/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/response.js deleted file mode 100644 index 54cb52db3594a7..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/lib/response.js +++ /dev/null @@ -1,90 +0,0 @@ -'use strict' -const http = require('http') -const { STATUS_CODES } = http - -const Headers = require('./headers.js') -const Body = require('./body.js') -const { clone, extractContentType } = Body - -const INTERNALS = Symbol('Response internals') - -class Response extends Body { - constructor (body = null, opts = {}) { - super(body, opts) - - const status = opts.status || 200 - const headers = new Headers(opts.headers) - - if (body !== null && body !== undefined && !headers.has('Content-Type')) { - const contentType = extractContentType(body) - if (contentType) { - headers.append('Content-Type', contentType) - } - } - - this[INTERNALS] = { - url: opts.url, - status, - statusText: opts.statusText || STATUS_CODES[status], - headers, - counter: opts.counter, - trailer: Promise.resolve(opts.trailer || new Headers()), - } - } - - get trailer () { - return this[INTERNALS].trailer - } - - get url () { - return this[INTERNALS].url || '' - } - - get status () { - return this[INTERNALS].status - } - - get ok () { - return this[INTERNALS].status >= 200 && this[INTERNALS].status < 300 - } - - get redirected () { - return this[INTERNALS].counter > 0 - } - - get statusText () { - return this[INTERNALS].statusText - } - - get headers () { - return this[INTERNALS].headers - } - - clone () { - return new Response(clone(this), { - url: this.url, - status: this.status, - statusText: this.statusText, - headers: this.headers, - ok: this.ok, - redirected: this.redirected, - trailer: this.trailer, - }) - } - - get [Symbol.toStringTag] () { - return 'Response' - } -} - -module.exports = Response - -Object.defineProperties(Response.prototype, { - url: { enumerable: true }, - status: { enumerable: true }, - ok: { enumerable: true }, - redirected: { enumerable: true }, - statusText: { enumerable: true }, - headers: { enumerable: true }, - clone: { enumerable: true }, -}) diff --git a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/package.json b/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/package.json deleted file mode 100644 index d491a7fba126d0..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/minipass-fetch/package.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "name": "minipass-fetch", - "version": "3.0.5", - "description": "An implementation of window.fetch in Node.js using Minipass streams", - "license": "MIT", - "main": "lib/index.js", - "scripts": { - "test:tls-fixtures": "./test/fixtures/tls/setup.sh", - "test": "tap", - "snap": "tap", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "lintfix": "npm run lint -- --fix", - "posttest": "npm run lint", - "template-oss-apply": "template-oss-apply --force" - }, - "tap": { - "coverage-map": "map.js", - "check-coverage": true, - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "@ungap/url-search-params": "^0.2.2", - "abort-controller": "^3.0.0", - "abortcontroller-polyfill": "~1.7.3", - "encoding": "^0.1.13", - "form-data": "^4.0.0", - "nock": "^13.2.4", - "parted": "^0.1.1", - "string-to-arraybuffer": "^1.0.2", - "tap": "^16.0.0" - }, - "dependencies": { - "minipass": "^7.0.3", - "minipass-sized": "^1.0.3", - "minizlib": "^2.1.2" - }, - "optionalDependencies": { - "encoding": "^0.1.13" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/minipass-fetch.git" - }, - "keywords": [ - "fetch", - "minipass", - "node-fetch", - "window.fetch" - ], - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "author": "GitHub Inc.", - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.22.0", - "publish": "true" - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/proc-log/LICENSE b/deps/npm/node_modules/tuf-js/node_modules/proc-log/LICENSE deleted file mode 100644 index 83837797202b70..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/proc-log/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -The ISC License - -Copyright (c) GitHub, Inc. - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/tuf-js/node_modules/proc-log/lib/index.js b/deps/npm/node_modules/tuf-js/node_modules/proc-log/lib/index.js deleted file mode 100644 index 86d90861078dab..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/proc-log/lib/index.js +++ /dev/null @@ -1,153 +0,0 @@ -const META = Symbol('proc-log.meta') -module.exports = { - META: META, - output: { - LEVELS: [ - 'standard', - 'error', - 'buffer', - 'flush', - ], - KEYS: { - standard: 'standard', - error: 'error', - buffer: 'buffer', - flush: 'flush', - }, - standard: function (...args) { - return process.emit('output', 'standard', ...args) - }, - error: function (...args) { - return process.emit('output', 'error', ...args) - }, - buffer: function (...args) { - return process.emit('output', 'buffer', ...args) - }, - flush: function (...args) { - return process.emit('output', 'flush', ...args) - }, - }, - log: { - LEVELS: [ - 'notice', - 'error', - 'warn', - 'info', - 'verbose', - 'http', - 'silly', - 'timing', - 'pause', - 'resume', - ], - KEYS: { - notice: 'notice', - error: 'error', - warn: 'warn', - info: 'info', - verbose: 'verbose', - http: 'http', - silly: 'silly', - timing: 'timing', - pause: 'pause', - resume: 'resume', - }, - error: function (...args) { - return process.emit('log', 'error', ...args) - }, - notice: function (...args) { - return process.emit('log', 'notice', ...args) - }, - warn: function (...args) { - return process.emit('log', 'warn', ...args) - }, - info: function (...args) { - return process.emit('log', 'info', ...args) - }, - verbose: function (...args) { - return process.emit('log', 'verbose', ...args) - }, - http: function (...args) { - return process.emit('log', 'http', ...args) - }, - silly: function (...args) { - return process.emit('log', 'silly', ...args) - }, - timing: function (...args) { - return process.emit('log', 'timing', ...args) - }, - pause: function () { - return process.emit('log', 'pause') - }, - resume: function () { - return process.emit('log', 'resume') - }, - }, - time: { - LEVELS: [ - 'start', - 'end', - ], - KEYS: { - start: 'start', - end: 'end', - }, - start: function (name, fn) { - process.emit('time', 'start', name) - function end () { - return process.emit('time', 'end', name) - } - if (typeof fn === 'function') { - const res = fn() - if (res && res.finally) { - return res.finally(end) - } - end() - return res - } - return end - }, - end: function (name) { - return process.emit('time', 'end', name) - }, - }, - input: { - LEVELS: [ - 'start', - 'end', - 'read', - ], - KEYS: { - start: 'start', - end: 'end', - read: 'read', - }, - start: function (fn) { - process.emit('input', 'start') - function end () { - return process.emit('input', 'end') - } - if (typeof fn === 'function') { - const res = fn() - if (res && res.finally) { - return res.finally(end) - } - end() - return res - } - return end - }, - end: function () { - return process.emit('input', 'end') - }, - read: function (...args) { - let resolve, reject - const promise = new Promise((_resolve, _reject) => { - resolve = _resolve - reject = _reject - }) - process.emit('input', 'read', resolve, reject, ...args) - return promise - }, - }, -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/proc-log/package.json b/deps/npm/node_modules/tuf-js/node_modules/proc-log/package.json deleted file mode 100644 index 4ab89102ecc9b5..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/proc-log/package.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "name": "proc-log", - "version": "4.2.0", - "files": [ - "bin/", - "lib/" - ], - "main": "lib/index.js", - "description": "just emit 'log' events on the process object", - "repository": { - "type": "git", - "url": "https://github.com/npm/proc-log.git" - }, - "author": "GitHub Inc.", - "license": "ISC", - "scripts": { - "test": "tap", - "snap": "tap", - "posttest": "npm run lint", - "postsnap": "eslint index.js test/*.js --fix", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "lintfix": "npm run lint -- --fix", - "template-oss-apply": "template-oss-apply --force" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.21.3", - "tap": "^16.0.1" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.21.3", - "publish": true - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/ssri/LICENSE.md b/deps/npm/node_modules/tuf-js/node_modules/ssri/LICENSE.md deleted file mode 100644 index e335388869f50f..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/ssri/LICENSE.md +++ /dev/null @@ -1,16 +0,0 @@ -ISC License - -Copyright 2021 (c) npm, Inc. - -Permission to use, copy, modify, and/or distribute this software for -any purpose with or without fee is hereby granted, provided that the -above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE COPYRIGHT HOLDER DISCLAIMS -ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR -CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/tuf-js/node_modules/ssri/lib/index.js b/deps/npm/node_modules/tuf-js/node_modules/ssri/lib/index.js deleted file mode 100644 index 7d749ed480fb98..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/ssri/lib/index.js +++ /dev/null @@ -1,580 +0,0 @@ -'use strict' - -const crypto = require('crypto') -const { Minipass } = require('minipass') - -const SPEC_ALGORITHMS = ['sha512', 'sha384', 'sha256'] -const DEFAULT_ALGORITHMS = ['sha512'] - -// TODO: this should really be a hardcoded list of algorithms we support, -// rather than [a-z0-9]. -const BASE64_REGEX = /^[a-z0-9+/]+(?:=?=?)$/i -const SRI_REGEX = /^([a-z0-9]+)-([^?]+)([?\S*]*)$/ -const STRICT_SRI_REGEX = /^([a-z0-9]+)-([A-Za-z0-9+/=]{44,88})(\?[\x21-\x7E]*)?$/ -const VCHAR_REGEX = /^[\x21-\x7E]+$/ - -const getOptString = options => options?.length ? `?${options.join('?')}` : '' - -class IntegrityStream extends Minipass { - #emittedIntegrity - #emittedSize - #emittedVerified - - constructor (opts) { - super() - this.size = 0 - this.opts = opts - - // may be overridden later, but set now for class consistency - this.#getOptions() - - // options used for calculating stream. can't be changed. - if (opts?.algorithms) { - this.algorithms = [...opts.algorithms] - } else { - this.algorithms = [...DEFAULT_ALGORITHMS] - } - if (this.algorithm !== null && !this.algorithms.includes(this.algorithm)) { - this.algorithms.push(this.algorithm) - } - - this.hashes = this.algorithms.map(crypto.createHash) - } - - #getOptions () { - // For verification - this.sri = this.opts?.integrity ? parse(this.opts?.integrity, this.opts) : null - this.expectedSize = this.opts?.size - - if (!this.sri) { - this.algorithm = null - } else if (this.sri.isHash) { - this.goodSri = true - this.algorithm = this.sri.algorithm - } else { - this.goodSri = !this.sri.isEmpty() - this.algorithm = this.sri.pickAlgorithm(this.opts) - } - - this.digests = this.goodSri ? this.sri[this.algorithm] : null - this.optString = getOptString(this.opts?.options) - } - - on (ev, handler) { - if (ev === 'size' && this.#emittedSize) { - return handler(this.#emittedSize) - } - - if (ev === 'integrity' && this.#emittedIntegrity) { - return handler(this.#emittedIntegrity) - } - - if (ev === 'verified' && this.#emittedVerified) { - return handler(this.#emittedVerified) - } - - return super.on(ev, handler) - } - - emit (ev, data) { - if (ev === 'end') { - this.#onEnd() - } - return super.emit(ev, data) - } - - write (data) { - this.size += data.length - this.hashes.forEach(h => h.update(data)) - return super.write(data) - } - - #onEnd () { - if (!this.goodSri) { - this.#getOptions() - } - const newSri = parse(this.hashes.map((h, i) => { - return `${this.algorithms[i]}-${h.digest('base64')}${this.optString}` - }).join(' '), this.opts) - // Integrity verification mode - const match = this.goodSri && newSri.match(this.sri, this.opts) - if (typeof this.expectedSize === 'number' && this.size !== this.expectedSize) { - /* eslint-disable-next-line max-len */ - const err = new Error(`stream size mismatch when checking ${this.sri}.\n Wanted: ${this.expectedSize}\n Found: ${this.size}`) - err.code = 'EBADSIZE' - err.found = this.size - err.expected = this.expectedSize - err.sri = this.sri - this.emit('error', err) - } else if (this.sri && !match) { - /* eslint-disable-next-line max-len */ - const err = new Error(`${this.sri} integrity checksum failed when using ${this.algorithm}: wanted ${this.digests} but got ${newSri}. (${this.size} bytes)`) - err.code = 'EINTEGRITY' - err.found = newSri - err.expected = this.digests - err.algorithm = this.algorithm - err.sri = this.sri - this.emit('error', err) - } else { - this.#emittedSize = this.size - this.emit('size', this.size) - this.#emittedIntegrity = newSri - this.emit('integrity', newSri) - if (match) { - this.#emittedVerified = match - this.emit('verified', match) - } - } - } -} - -class Hash { - get isHash () { - return true - } - - constructor (hash, opts) { - const strict = opts?.strict - this.source = hash.trim() - - // set default values so that we make V8 happy to - // always see a familiar object template. - this.digest = '' - this.algorithm = '' - this.options = [] - - // 3.1. Integrity metadata (called "Hash" by ssri) - // https://w3c.github.io/webappsec-subresource-integrity/#integrity-metadata-description - const match = this.source.match( - strict - ? STRICT_SRI_REGEX - : SRI_REGEX - ) - if (!match) { - return - } - if (strict && !SPEC_ALGORITHMS.includes(match[1])) { - return - } - this.algorithm = match[1] - this.digest = match[2] - - const rawOpts = match[3] - if (rawOpts) { - this.options = rawOpts.slice(1).split('?') - } - } - - hexDigest () { - return this.digest && Buffer.from(this.digest, 'base64').toString('hex') - } - - toJSON () { - return this.toString() - } - - match (integrity, opts) { - const other = parse(integrity, opts) - if (!other) { - return false - } - if (other.isIntegrity) { - const algo = other.pickAlgorithm(opts, [this.algorithm]) - - if (!algo) { - return false - } - - const foundHash = other[algo].find(hash => hash.digest === this.digest) - - if (foundHash) { - return foundHash - } - - return false - } - return other.digest === this.digest ? other : false - } - - toString (opts) { - if (opts?.strict) { - // Strict mode enforces the standard as close to the foot of the - // letter as it can. - if (!( - // The spec has very restricted productions for algorithms. - // https://www.w3.org/TR/CSP2/#source-list-syntax - SPEC_ALGORITHMS.includes(this.algorithm) && - // Usually, if someone insists on using a "different" base64, we - // leave it as-is, since there's multiple standards, and the - // specified is not a URL-safe variant. - // https://www.w3.org/TR/CSP2/#base64_value - this.digest.match(BASE64_REGEX) && - // Option syntax is strictly visual chars. - // https://w3c.github.io/webappsec-subresource-integrity/#grammardef-option-expression - // https://tools.ietf.org/html/rfc5234#appendix-B.1 - this.options.every(opt => opt.match(VCHAR_REGEX)) - )) { - return '' - } - } - return `${this.algorithm}-${this.digest}${getOptString(this.options)}` - } -} - -function integrityHashToString (toString, sep, opts, hashes) { - const toStringIsNotEmpty = toString !== '' - - let shouldAddFirstSep = false - let complement = '' - - const lastIndex = hashes.length - 1 - - for (let i = 0; i < lastIndex; i++) { - const hashString = Hash.prototype.toString.call(hashes[i], opts) - - if (hashString) { - shouldAddFirstSep = true - - complement += hashString - complement += sep - } - } - - const finalHashString = Hash.prototype.toString.call(hashes[lastIndex], opts) - - if (finalHashString) { - shouldAddFirstSep = true - complement += finalHashString - } - - if (toStringIsNotEmpty && shouldAddFirstSep) { - return toString + sep + complement - } - - return toString + complement -} - -class Integrity { - get isIntegrity () { - return true - } - - toJSON () { - return this.toString() - } - - isEmpty () { - return Object.keys(this).length === 0 - } - - toString (opts) { - let sep = opts?.sep || ' ' - let toString = '' - - if (opts?.strict) { - // Entries must be separated by whitespace, according to spec. - sep = sep.replace(/\S+/g, ' ') - - for (const hash of SPEC_ALGORITHMS) { - if (this[hash]) { - toString = integrityHashToString(toString, sep, opts, this[hash]) - } - } - } else { - for (const hash of Object.keys(this)) { - toString = integrityHashToString(toString, sep, opts, this[hash]) - } - } - - return toString - } - - concat (integrity, opts) { - const other = typeof integrity === 'string' - ? integrity - : stringify(integrity, opts) - return parse(`${this.toString(opts)} ${other}`, opts) - } - - hexDigest () { - return parse(this, { single: true }).hexDigest() - } - - // add additional hashes to an integrity value, but prevent - // *changing* an existing integrity hash. - merge (integrity, opts) { - const other = parse(integrity, opts) - for (const algo in other) { - if (this[algo]) { - if (!this[algo].find(hash => - other[algo].find(otherhash => - hash.digest === otherhash.digest))) { - throw new Error('hashes do not match, cannot update integrity') - } - } else { - this[algo] = other[algo] - } - } - } - - match (integrity, opts) { - const other = parse(integrity, opts) - if (!other) { - return false - } - const algo = other.pickAlgorithm(opts, Object.keys(this)) - return ( - !!algo && - this[algo] && - other[algo] && - this[algo].find(hash => - other[algo].find(otherhash => - hash.digest === otherhash.digest - ) - ) - ) || false - } - - // Pick the highest priority algorithm present, optionally also limited to a - // set of hashes found in another integrity. When limiting it may return - // nothing. - pickAlgorithm (opts, hashes) { - const pickAlgorithm = opts?.pickAlgorithm || getPrioritizedHash - const keys = Object.keys(this).filter(k => { - if (hashes?.length) { - return hashes.includes(k) - } - return true - }) - if (keys.length) { - return keys.reduce((acc, algo) => pickAlgorithm(acc, algo) || acc) - } - // no intersection between this and hashes, - return null - } -} - -module.exports.parse = parse -function parse (sri, opts) { - if (!sri) { - return null - } - if (typeof sri === 'string') { - return _parse(sri, opts) - } else if (sri.algorithm && sri.digest) { - const fullSri = new Integrity() - fullSri[sri.algorithm] = [sri] - return _parse(stringify(fullSri, opts), opts) - } else { - return _parse(stringify(sri, opts), opts) - } -} - -function _parse (integrity, opts) { - // 3.4.3. Parse metadata - // https://w3c.github.io/webappsec-subresource-integrity/#parse-metadata - if (opts?.single) { - return new Hash(integrity, opts) - } - const hashes = integrity.trim().split(/\s+/).reduce((acc, string) => { - const hash = new Hash(string, opts) - if (hash.algorithm && hash.digest) { - const algo = hash.algorithm - if (!acc[algo]) { - acc[algo] = [] - } - acc[algo].push(hash) - } - return acc - }, new Integrity()) - return hashes.isEmpty() ? null : hashes -} - -module.exports.stringify = stringify -function stringify (obj, opts) { - if (obj.algorithm && obj.digest) { - return Hash.prototype.toString.call(obj, opts) - } else if (typeof obj === 'string') { - return stringify(parse(obj, opts), opts) - } else { - return Integrity.prototype.toString.call(obj, opts) - } -} - -module.exports.fromHex = fromHex -function fromHex (hexDigest, algorithm, opts) { - const optString = getOptString(opts?.options) - return parse( - `${algorithm}-${ - Buffer.from(hexDigest, 'hex').toString('base64') - }${optString}`, opts - ) -} - -module.exports.fromData = fromData -function fromData (data, opts) { - const algorithms = opts?.algorithms || [...DEFAULT_ALGORITHMS] - const optString = getOptString(opts?.options) - return algorithms.reduce((acc, algo) => { - const digest = crypto.createHash(algo).update(data).digest('base64') - const hash = new Hash( - `${algo}-${digest}${optString}`, - opts - ) - /* istanbul ignore else - it would be VERY strange if the string we - * just calculated with an algo did not have an algo or digest. - */ - if (hash.algorithm && hash.digest) { - const hashAlgo = hash.algorithm - if (!acc[hashAlgo]) { - acc[hashAlgo] = [] - } - acc[hashAlgo].push(hash) - } - return acc - }, new Integrity()) -} - -module.exports.fromStream = fromStream -function fromStream (stream, opts) { - const istream = integrityStream(opts) - return new Promise((resolve, reject) => { - stream.pipe(istream) - stream.on('error', reject) - istream.on('error', reject) - let sri - istream.on('integrity', s => { - sri = s - }) - istream.on('end', () => resolve(sri)) - istream.resume() - }) -} - -module.exports.checkData = checkData -function checkData (data, sri, opts) { - sri = parse(sri, opts) - if (!sri || !Object.keys(sri).length) { - if (opts?.error) { - throw Object.assign( - new Error('No valid integrity hashes to check against'), { - code: 'EINTEGRITY', - } - ) - } else { - return false - } - } - const algorithm = sri.pickAlgorithm(opts) - const digest = crypto.createHash(algorithm).update(data).digest('base64') - const newSri = parse({ algorithm, digest }) - const match = newSri.match(sri, opts) - opts = opts || {} - if (match || !(opts.error)) { - return match - } else if (typeof opts.size === 'number' && (data.length !== opts.size)) { - /* eslint-disable-next-line max-len */ - const err = new Error(`data size mismatch when checking ${sri}.\n Wanted: ${opts.size}\n Found: ${data.length}`) - err.code = 'EBADSIZE' - err.found = data.length - err.expected = opts.size - err.sri = sri - throw err - } else { - /* eslint-disable-next-line max-len */ - const err = new Error(`Integrity checksum failed when using ${algorithm}: Wanted ${sri}, but got ${newSri}. (${data.length} bytes)`) - err.code = 'EINTEGRITY' - err.found = newSri - err.expected = sri - err.algorithm = algorithm - err.sri = sri - throw err - } -} - -module.exports.checkStream = checkStream -function checkStream (stream, sri, opts) { - opts = opts || Object.create(null) - opts.integrity = sri - sri = parse(sri, opts) - if (!sri || !Object.keys(sri).length) { - return Promise.reject(Object.assign( - new Error('No valid integrity hashes to check against'), { - code: 'EINTEGRITY', - } - )) - } - const checker = integrityStream(opts) - return new Promise((resolve, reject) => { - stream.pipe(checker) - stream.on('error', reject) - checker.on('error', reject) - let verified - checker.on('verified', s => { - verified = s - }) - checker.on('end', () => resolve(verified)) - checker.resume() - }) -} - -module.exports.integrityStream = integrityStream -function integrityStream (opts = Object.create(null)) { - return new IntegrityStream(opts) -} - -module.exports.create = createIntegrity -function createIntegrity (opts) { - const algorithms = opts?.algorithms || [...DEFAULT_ALGORITHMS] - const optString = getOptString(opts?.options) - - const hashes = algorithms.map(crypto.createHash) - - return { - update: function (chunk, enc) { - hashes.forEach(h => h.update(chunk, enc)) - return this - }, - digest: function () { - const integrity = algorithms.reduce((acc, algo) => { - const digest = hashes.shift().digest('base64') - const hash = new Hash( - `${algo}-${digest}${optString}`, - opts - ) - /* istanbul ignore else - it would be VERY strange if the hash we - * just calculated with an algo did not have an algo or digest. - */ - if (hash.algorithm && hash.digest) { - const hashAlgo = hash.algorithm - if (!acc[hashAlgo]) { - acc[hashAlgo] = [] - } - acc[hashAlgo].push(hash) - } - return acc - }, new Integrity()) - - return integrity - }, - } -} - -const NODE_HASHES = crypto.getHashes() - -// This is a Best Effort™ at a reasonable priority for hash algos -const DEFAULT_PRIORITY = [ - 'md5', 'whirlpool', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512', - // TODO - it's unclear _which_ of these Node will actually use as its name - // for the algorithm, so we guesswork it based on the OpenSSL names. - 'sha3', - 'sha3-256', 'sha3-384', 'sha3-512', - 'sha3_256', 'sha3_384', 'sha3_512', -].filter(algo => NODE_HASHES.includes(algo)) - -function getPrioritizedHash (algo1, algo2) { - /* eslint-disable-next-line max-len */ - return DEFAULT_PRIORITY.indexOf(algo1.toLowerCase()) >= DEFAULT_PRIORITY.indexOf(algo2.toLowerCase()) - ? algo1 - : algo2 -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/ssri/package.json b/deps/npm/node_modules/tuf-js/node_modules/ssri/package.json deleted file mode 100644 index 28395414e4643c..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/ssri/package.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "name": "ssri", - "version": "10.0.6", - "description": "Standard Subresource Integrity library -- parses, serializes, generates, and verifies integrity metadata according to the SRI spec.", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "scripts": { - "prerelease": "npm t", - "postrelease": "npm publish", - "posttest": "npm run lint", - "test": "tap", - "coverage": "tap", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap" - }, - "tap": { - "check-coverage": true, - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "repository": { - "type": "git", - "url": "git+https://github.com/npm/ssri.git" - }, - "keywords": [ - "w3c", - "web", - "security", - "integrity", - "checksum", - "hashing", - "subresource integrity", - "sri", - "sri hash", - "sri string", - "sri generator", - "html" - ], - "author": "GitHub Inc.", - "license": "ISC", - "dependencies": { - "minipass": "^7.0.3" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.22.0", - "tap": "^16.0.1" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.22.0", - "publish": "true" - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/unique-filename/LICENSE b/deps/npm/node_modules/tuf-js/node_modules/unique-filename/LICENSE deleted file mode 100644 index 69619c125ea7ef..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/unique-filename/LICENSE +++ /dev/null @@ -1,5 +0,0 @@ -Copyright npm, Inc - -Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/tuf-js/node_modules/unique-filename/lib/index.js b/deps/npm/node_modules/tuf-js/node_modules/unique-filename/lib/index.js deleted file mode 100644 index d067d2e709809a..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/unique-filename/lib/index.js +++ /dev/null @@ -1,7 +0,0 @@ -var path = require('path') - -var uniqueSlug = require('unique-slug') - -module.exports = function (filepath, prefix, uniq) { - return path.join(filepath, (prefix ? prefix + '-' : '') + uniqueSlug(uniq)) -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/unique-filename/package.json b/deps/npm/node_modules/tuf-js/node_modules/unique-filename/package.json deleted file mode 100644 index b2fbf0666489a6..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/unique-filename/package.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "name": "unique-filename", - "version": "3.0.0", - "description": "Generate a unique filename for use in temporary directories or caches.", - "main": "lib/index.js", - "scripts": { - "test": "tap", - "lint": "eslint \"**/*.js\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap", - "posttest": "npm run lint" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/unique-filename.git" - }, - "keywords": [], - "author": "GitHub Inc.", - "license": "ISC", - "bugs": { - "url": "https://github.com/iarna/unique-filename/issues" - }, - "homepage": "https://github.com/iarna/unique-filename", - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.5.1", - "tap": "^16.3.0" - }, - "dependencies": { - "unique-slug": "^4.0.0" - }, - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.5.1" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/unique-slug/LICENSE b/deps/npm/node_modules/tuf-js/node_modules/unique-slug/LICENSE deleted file mode 100644 index 7953647e7760b8..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/unique-slug/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -The ISC License - -Copyright npm, Inc - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/tuf-js/node_modules/unique-slug/lib/index.js b/deps/npm/node_modules/tuf-js/node_modules/unique-slug/lib/index.js deleted file mode 100644 index 1bac84d95d7307..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/unique-slug/lib/index.js +++ /dev/null @@ -1,11 +0,0 @@ -'use strict' -var MurmurHash3 = require('imurmurhash') - -module.exports = function (uniq) { - if (uniq) { - var hash = new MurmurHash3(uniq) - return ('00000000' + hash.result().toString(16)).slice(-8) - } else { - return (Math.random().toString(16) + '0000000').slice(2, 10) - } -} diff --git a/deps/npm/node_modules/tuf-js/node_modules/unique-slug/package.json b/deps/npm/node_modules/tuf-js/node_modules/unique-slug/package.json deleted file mode 100644 index 33732cdbb42859..00000000000000 --- a/deps/npm/node_modules/tuf-js/node_modules/unique-slug/package.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "name": "unique-slug", - "version": "4.0.0", - "description": "Generate a unique character string suitible for use in files and URLs.", - "main": "lib/index.js", - "scripts": { - "test": "tap", - "lint": "eslint \"**/*.js\"", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "snap": "tap", - "posttest": "npm run lint" - }, - "keywords": [], - "author": "GitHub Inc.", - "license": "ISC", - "devDependencies": { - "@npmcli/eslint-config": "^3.1.0", - "@npmcli/template-oss": "4.5.1", - "tap": "^16.3.0" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/unique-slug.git" - }, - "dependencies": { - "imurmurhash": "^0.1.4" - }, - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.5.1" - }, - "tap": { - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - } -} diff --git a/deps/npm/node_modules/tuf-js/package.json b/deps/npm/node_modules/tuf-js/package.json index 9280719230d9ab..e79a3d45f3f06a 100644 --- a/deps/npm/node_modules/tuf-js/package.json +++ b/deps/npm/node_modules/tuf-js/package.json @@ -1,12 +1,12 @@ { "name": "tuf-js", - "version": "2.2.1", + "version": "3.0.1", "description": "JavaScript implementation of The Update Framework (TUF)", "main": "dist/index.js", "types": "dist/index.d.ts", "scripts": { "build": "tsc --build", - "clean": "rm -rf dist", + "clean": "rm -rf dist && rm tsconfig.tsbuildinfo", "test": "jest" }, "repository": { @@ -28,16 +28,16 @@ }, "homepage": "https://github.com/theupdateframework/tuf-js/tree/main/packages/client#readme", "devDependencies": { - "@tufjs/repo-mock": "2.0.1", + "@tufjs/repo-mock": "3.0.1", "@types/debug": "^4.1.12", "@types/make-fetch-happen": "^10.0.4" }, "dependencies": { - "@tufjs/models": "2.0.1", - "debug": "^4.3.4", - "make-fetch-happen": "^13.0.1" + "@tufjs/models": "3.0.1", + "debug": "^4.3.6", + "make-fetch-happen": "^14.0.1" }, "engines": { - "node": "^16.14.0 || >=18.0.0" + "node": "^18.17.0 || >=20.5.0" } } diff --git a/deps/npm/node_modules/wrap-ansi/node_modules/ansi-regex/index.js b/deps/npm/node_modules/wrap-ansi/node_modules/ansi-regex/index.js index 130a0929b8ce8c..ddfdba39a783a4 100644 --- a/deps/npm/node_modules/wrap-ansi/node_modules/ansi-regex/index.js +++ b/deps/npm/node_modules/wrap-ansi/node_modules/ansi-regex/index.js @@ -1,7 +1,9 @@ export default function ansiRegex({onlyFirst = false} = {}) { + // Valid string terminator sequences are BEL, ESC\, and 0x9c + const ST = '(?:\\u0007|\\u001B\\u005C|\\u009C)'; const pattern = [ - '[\\u001B\\u009B][[\\]()#;?]*(?:(?:(?:(?:;[-a-zA-Z\\d\\/#&.:=?%@~_]+)*|[a-zA-Z\\d]+(?:;[-a-zA-Z\\d\\/#&.:=?%@~_]*)*)?\\u0007)', - '(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PR-TZcf-ntqry=><~]))' + `[\\u001B\\u009B][[\\]()#;?]*(?:(?:(?:(?:;[-a-zA-Z\\d\\/#&.:=?%@~_]+)*|[a-zA-Z\\d]+(?:;[-a-zA-Z\\d\\/#&.:=?%@~_]*)*)?${ST})`, + '(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PR-TZcf-nq-uy=><~]))', ].join('|'); return new RegExp(pattern, onlyFirst ? undefined : 'g'); diff --git a/deps/npm/node_modules/wrap-ansi/node_modules/ansi-regex/package.json b/deps/npm/node_modules/wrap-ansi/node_modules/ansi-regex/package.json index 7bbb563bf2a70a..49f3f61021512b 100644 --- a/deps/npm/node_modules/wrap-ansi/node_modules/ansi-regex/package.json +++ b/deps/npm/node_modules/wrap-ansi/node_modules/ansi-regex/package.json @@ -1,6 +1,6 @@ { "name": "ansi-regex", - "version": "6.0.1", + "version": "6.1.0", "description": "Regular expression for matching ANSI escape codes", "license": "MIT", "repository": "chalk/ansi-regex", @@ -12,6 +12,8 @@ }, "type": "module", "exports": "./index.js", + "types": "./index.d.ts", + "sideEffects": false, "engines": { "node": ">=12" }, @@ -51,8 +53,9 @@ "pattern" ], "devDependencies": { + "ansi-escapes": "^5.0.0", "ava": "^3.15.0", - "tsd": "^0.14.0", - "xo": "^0.38.2" + "tsd": "^0.21.0", + "xo": "^0.54.2" } } diff --git a/deps/npm/package.json b/deps/npm/package.json index c92578506b30ac..8d01af4a7ce8d0 100644 --- a/deps/npm/package.json +++ b/deps/npm/package.json @@ -1,5 +1,5 @@ { - "version": "10.9.0", + "version": "10.9.2", "name": "npm", "description": "a package manager for JavaScript", "workspaces": [ @@ -55,25 +55,25 @@ "@npmcli/arborist": "^8.0.0", "@npmcli/config": "^9.0.0", "@npmcli/fs": "^4.0.0", - "@npmcli/map-workspaces": "^4.0.1", - "@npmcli/package-json": "^6.0.1", - "@npmcli/promise-spawn": "^8.0.1", + "@npmcli/map-workspaces": "^4.0.2", + "@npmcli/package-json": "^6.1.0", + "@npmcli/promise-spawn": "^8.0.2", "@npmcli/redact": "^3.0.0", "@npmcli/run-script": "^9.0.1", - "@sigstore/tuf": "^2.3.4", + "@sigstore/tuf": "^3.0.0", "abbrev": "^3.0.0", "archy": "~1.0.0", "cacache": "^19.0.1", "chalk": "^5.3.0", - "ci-info": "^4.0.0", + "ci-info": "^4.1.0", "cli-columns": "^4.0.0", "fastest-levenshtein": "^1.0.16", "fs-minipass": "^3.0.3", "glob": "^10.4.5", "graceful-fs": "^4.2.11", - "hosted-git-info": "^8.0.0", + "hosted-git-info": "^8.0.2", "ini": "^5.0.0", - "init-package-json": "^7.0.1", + "init-package-json": "^7.0.2", "is-cidr": "^5.1.0", "json-parse-even-better-errors": "^4.0.0", "libnpmaccess": "^9.0.0", @@ -83,27 +83,27 @@ "libnpmhook": "^11.0.0", "libnpmorg": "^7.0.0", "libnpmpack": "^8.0.0", - "libnpmpublish": "^10.0.0", + "libnpmpublish": "^10.0.1", "libnpmsearch": "^8.0.0", "libnpmteam": "^7.0.0", "libnpmversion": "^7.0.0", - "make-fetch-happen": "^14.0.1", + "make-fetch-happen": "^14.0.3", "minimatch": "^9.0.5", "minipass": "^7.1.1", "minipass-pipeline": "^1.2.4", "ms": "^2.1.2", - "node-gyp": "^10.2.0", + "node-gyp": "^11.0.0", "nopt": "^8.0.0", "normalize-package-data": "^7.0.0", "npm-audit-report": "^6.0.0", - "npm-install-checks": "^7.1.0", + "npm-install-checks": "^7.1.1", "npm-package-arg": "^12.0.0", "npm-pick-manifest": "^10.0.0", "npm-profile": "^11.0.1", - "npm-registry-fetch": "^18.0.1", + "npm-registry-fetch": "^18.0.2", "npm-user-validate": "^3.0.0", "p-map": "^4.0.0", - "pacote": "^19.0.0", + "pacote": "^19.0.1", "parse-conflict-json": "^4.0.0", "proc-log": "^5.0.0", "qrcode-terminal": "^0.12.0", diff --git a/deps/simdjson/simdjson.cpp b/deps/simdjson/simdjson.cpp index b5dd7243853217..d79fc703a815ca 100644 --- a/deps/simdjson/simdjson.cpp +++ b/deps/simdjson/simdjson.cpp @@ -1,4 +1,4 @@ -/* auto-generated on 2024-08-01 09:31:50 -0400. Do not edit! */ +/* auto-generated on 2024-08-26 09:37:03 -0400. Do not edit! */ /* including simdjson.cpp: */ /* begin file simdjson.cpp */ #define SIMDJSON_SRC_SIMDJSON_CPP @@ -332,6 +332,8 @@ double from_chars(const char *first, const char* end) noexcept; #define SIMDJSON_ISALIGNED_N(ptr, n) (((uintptr_t)(ptr) & ((n)-1)) == 0) #if SIMDJSON_REGULAR_VISUAL_STUDIO + // We could use [[deprecated]] but it requires C++14 + #define simdjson_deprecated __declspec(deprecated) #define simdjson_really_inline __forceinline #define simdjson_never_inline __declspec(noinline) @@ -370,6 +372,8 @@ double from_chars(const char *first, const char* end) noexcept; #define SIMDJSON_POP_DISABLE_UNUSED_WARNINGS #else // SIMDJSON_REGULAR_VISUAL_STUDIO + // We could use [[deprecated]] but it requires C++14 + #define simdjson_deprecated __attribute__((deprecated)) #define simdjson_really_inline inline __attribute__((always_inline)) #define simdjson_never_inline inline __attribute__((noinline)) diff --git a/deps/simdjson/simdjson.h b/deps/simdjson/simdjson.h index ddb6f2e4e0a6ed..f21cd9381eef59 100644 --- a/deps/simdjson/simdjson.h +++ b/deps/simdjson/simdjson.h @@ -1,4 +1,4 @@ -/* auto-generated on 2024-08-01 09:31:50 -0400. Do not edit! */ +/* auto-generated on 2024-08-26 09:37:03 -0400. Do not edit! */ /* including simdjson.h: */ /* begin file simdjson.h */ #ifndef SIMDJSON_H @@ -352,6 +352,8 @@ double from_chars(const char *first, const char* end) noexcept; #define SIMDJSON_ISALIGNED_N(ptr, n) (((uintptr_t)(ptr) & ((n)-1)) == 0) #if SIMDJSON_REGULAR_VISUAL_STUDIO + // We could use [[deprecated]] but it requires C++14 + #define simdjson_deprecated __declspec(deprecated) #define simdjson_really_inline __forceinline #define simdjson_never_inline __declspec(noinline) @@ -390,6 +392,8 @@ double from_chars(const char *first, const char* end) noexcept; #define SIMDJSON_POP_DISABLE_UNUSED_WARNINGS #else // SIMDJSON_REGULAR_VISUAL_STUDIO + // We could use [[deprecated]] but it requires C++14 + #define simdjson_deprecated __attribute__((deprecated)) #define simdjson_really_inline inline __attribute__((always_inline)) #define simdjson_never_inline inline __attribute__((noinline)) @@ -2366,7 +2370,7 @@ namespace std { #define SIMDJSON_SIMDJSON_VERSION_H /** The version of simdjson being used (major.minor.revision) */ -#define SIMDJSON_VERSION "3.10.0" +#define SIMDJSON_VERSION "3.10.1" namespace simdjson { enum { @@ -2381,7 +2385,7 @@ enum { /** * The revision (major.minor.REVISION) of simdjson being used. */ - SIMDJSON_VERSION_REVISION = 0 + SIMDJSON_VERSION_REVISION = 1 }; } // namespace simdjson @@ -34014,7 +34018,7 @@ class parser { private: /** @private [for benchmarking access] The implementation to use */ - std::unique_ptr implementation{}; + std::unique_ptr implementation{}; size_t _capacity{0}; size_t _max_capacity; size_t _max_depth{DEFAULT_MAX_DEPTH}; @@ -34557,7 +34561,7 @@ class document { " You may also add support for custom types, see our documentation."); } /** @overload template simdjson_result get() & noexcept */ - template simdjson_inline simdjson_result get() && noexcept { + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept { // Unless the simdjson library or the user provides an inline implementation, calling this method should // immediately fail. static_assert(!sizeof(T), "The get method with given type is not implemented by the simdjson library. " @@ -34580,7 +34584,7 @@ class document { */ template simdjson_inline error_code get(T &out) & noexcept; /** @overload template error_code get(T &out) & noexcept */ - template simdjson_inline error_code get(T &out) && noexcept; + template simdjson_deprecated simdjson_inline error_code get(T &out) && noexcept; #if SIMDJSON_EXCEPTIONS /** @@ -34593,7 +34597,10 @@ class document { * @returns An instance of type T */ template - explicit simdjson_inline operator T() noexcept(false); + explicit simdjson_inline operator T() & noexcept(false); + template + explicit simdjson_deprecated simdjson_inline operator T() && noexcept(false); + /** * Cast this JSON value to an array. * @@ -35159,7 +35166,7 @@ struct simdjson_result : public arm64::implementation simdjson_inline simdjson_result is_null() noexcept; template simdjson_inline simdjson_result get() & noexcept; - template simdjson_inline simdjson_result get() && noexcept; + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept; template simdjson_inline error_code get(T &out) & noexcept; template simdjson_inline error_code get(T &out) && noexcept; @@ -35599,7 +35606,7 @@ class document_stream { friend class document; friend class json_iterator; friend struct simdjson_result; - friend struct internal::simdjson_result_base; + friend struct simdjson::internal::simdjson_result_base; }; // document_stream } // namespace ondemand @@ -36561,16 +36568,14 @@ simdjson_inline simdjson_result &simdjson_resul /* amalgamation skipped (editor-only): #ifndef SIMDJSON_CONDITIONAL_INCLUDE */ /* amalgamation skipped (editor-only): #define SIMDJSON_GENERIC_ONDEMAND_DOCUMENT_INL_H */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/base.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/document.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value_iterator-inl.h" */ /* amalgamation skipped (editor-only): #endif // SIMDJSON_CONDITIONAL_INCLUDE */ @@ -36725,24 +36730,26 @@ template<> simdjson_inline simdjson_result document::get() & noexcept { template<> simdjson_inline simdjson_result document::get() & noexcept { return get_bool(); } template<> simdjson_inline simdjson_result document::get() & noexcept { return get_value(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } template simdjson_inline error_code document::get(T &out) & noexcept { return get().get(out); } -template simdjson_inline error_code document::get(T &out) && noexcept { +template simdjson_deprecated simdjson_inline error_code document::get(T &out) && noexcept { return std::forward(*this).get().get(out); } #if SIMDJSON_EXCEPTIONS template -simdjson_inline document::operator T() noexcept(false) { return get(); } +simdjson_deprecated simdjson_inline document::operator T() && noexcept(false) { return get(); } +template +simdjson_inline document::operator T() & noexcept(false) { return get(); } simdjson_inline document::operator array() & noexcept(false) { return get_array(); } simdjson_inline document::operator object() & noexcept(false) { return get_object(); } simdjson_inline document::operator uint64_t() noexcept(false) { return get_uint64(); } @@ -37029,7 +37036,7 @@ simdjson_inline simdjson_result simdjson_result::g return first.get(); } template -simdjson_inline simdjson_result simdjson_result::get() && noexcept { +simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first).get(); } @@ -37045,7 +37052,7 @@ simdjson_inline error_code simdjson_result::get(T &ou } template<> simdjson_inline simdjson_result simdjson_result::get() & noexcept = delete; -template<> simdjson_inline simdjson_result simdjson_result::get() && noexcept { +template<> simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first); } @@ -40014,8 +40021,6 @@ simdjson_inline simdjson_result::simdjson_resul /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ @@ -40561,9 +40566,9 @@ simdjson_inline simdjson_result simdjson_result implementation{}; + std::unique_ptr implementation{}; size_t _capacity{0}; size_t _max_capacity; size_t _max_depth{DEFAULT_MAX_DEPTH}; @@ -45023,7 +45028,7 @@ class document { " You may also add support for custom types, see our documentation."); } /** @overload template simdjson_result get() & noexcept */ - template simdjson_inline simdjson_result get() && noexcept { + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept { // Unless the simdjson library or the user provides an inline implementation, calling this method should // immediately fail. static_assert(!sizeof(T), "The get method with given type is not implemented by the simdjson library. " @@ -45046,7 +45051,7 @@ class document { */ template simdjson_inline error_code get(T &out) & noexcept; /** @overload template error_code get(T &out) & noexcept */ - template simdjson_inline error_code get(T &out) && noexcept; + template simdjson_deprecated simdjson_inline error_code get(T &out) && noexcept; #if SIMDJSON_EXCEPTIONS /** @@ -45059,7 +45064,10 @@ class document { * @returns An instance of type T */ template - explicit simdjson_inline operator T() noexcept(false); + explicit simdjson_inline operator T() & noexcept(false); + template + explicit simdjson_deprecated simdjson_inline operator T() && noexcept(false); + /** * Cast this JSON value to an array. * @@ -45625,7 +45633,7 @@ struct simdjson_result : public fallback::implemen simdjson_inline simdjson_result is_null() noexcept; template simdjson_inline simdjson_result get() & noexcept; - template simdjson_inline simdjson_result get() && noexcept; + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept; template simdjson_inline error_code get(T &out) & noexcept; template simdjson_inline error_code get(T &out) && noexcept; @@ -46065,7 +46073,7 @@ class document_stream { friend class document; friend class json_iterator; friend struct simdjson_result; - friend struct internal::simdjson_result_base; + friend struct simdjson::internal::simdjson_result_base; }; // document_stream } // namespace ondemand @@ -47027,16 +47035,14 @@ simdjson_inline simdjson_result &simdjson_re /* amalgamation skipped (editor-only): #ifndef SIMDJSON_CONDITIONAL_INCLUDE */ /* amalgamation skipped (editor-only): #define SIMDJSON_GENERIC_ONDEMAND_DOCUMENT_INL_H */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/base.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/document.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value_iterator-inl.h" */ /* amalgamation skipped (editor-only): #endif // SIMDJSON_CONDITIONAL_INCLUDE */ @@ -47191,24 +47197,26 @@ template<> simdjson_inline simdjson_result document::get() & noexcept { template<> simdjson_inline simdjson_result document::get() & noexcept { return get_bool(); } template<> simdjson_inline simdjson_result document::get() & noexcept { return get_value(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } template simdjson_inline error_code document::get(T &out) & noexcept { return get().get(out); } -template simdjson_inline error_code document::get(T &out) && noexcept { +template simdjson_deprecated simdjson_inline error_code document::get(T &out) && noexcept { return std::forward(*this).get().get(out); } #if SIMDJSON_EXCEPTIONS template -simdjson_inline document::operator T() noexcept(false) { return get(); } +simdjson_deprecated simdjson_inline document::operator T() && noexcept(false) { return get(); } +template +simdjson_inline document::operator T() & noexcept(false) { return get(); } simdjson_inline document::operator array() & noexcept(false) { return get_array(); } simdjson_inline document::operator object() & noexcept(false) { return get_object(); } simdjson_inline document::operator uint64_t() noexcept(false) { return get_uint64(); } @@ -47495,7 +47503,7 @@ simdjson_inline simdjson_result simdjson_result return first.get(); } template -simdjson_inline simdjson_result simdjson_result::get() && noexcept { +simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first).get(); } @@ -47511,7 +47519,7 @@ simdjson_inline error_code simdjson_result::get(T } template<> simdjson_inline simdjson_result simdjson_result::get() & noexcept = delete; -template<> simdjson_inline simdjson_result simdjson_result::get() && noexcept { +template<> simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first); } @@ -50480,8 +50488,6 @@ simdjson_inline simdjson_result::simdjson_re /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ @@ -51027,9 +51033,9 @@ simdjson_inline simdjson_result simdjson_result implementation{}; + std::unique_ptr implementation{}; size_t _capacity{0}; size_t _max_capacity; size_t _max_depth{DEFAULT_MAX_DEPTH}; @@ -55981,7 +55987,7 @@ class document { " You may also add support for custom types, see our documentation."); } /** @overload template simdjson_result get() & noexcept */ - template simdjson_inline simdjson_result get() && noexcept { + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept { // Unless the simdjson library or the user provides an inline implementation, calling this method should // immediately fail. static_assert(!sizeof(T), "The get method with given type is not implemented by the simdjson library. " @@ -56004,7 +56010,7 @@ class document { */ template simdjson_inline error_code get(T &out) & noexcept; /** @overload template error_code get(T &out) & noexcept */ - template simdjson_inline error_code get(T &out) && noexcept; + template simdjson_deprecated simdjson_inline error_code get(T &out) && noexcept; #if SIMDJSON_EXCEPTIONS /** @@ -56017,7 +56023,10 @@ class document { * @returns An instance of type T */ template - explicit simdjson_inline operator T() noexcept(false); + explicit simdjson_inline operator T() & noexcept(false); + template + explicit simdjson_deprecated simdjson_inline operator T() && noexcept(false); + /** * Cast this JSON value to an array. * @@ -56583,7 +56592,7 @@ struct simdjson_result : public haswell::implementa simdjson_inline simdjson_result is_null() noexcept; template simdjson_inline simdjson_result get() & noexcept; - template simdjson_inline simdjson_result get() && noexcept; + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept; template simdjson_inline error_code get(T &out) & noexcept; template simdjson_inline error_code get(T &out) && noexcept; @@ -57023,7 +57032,7 @@ class document_stream { friend class document; friend class json_iterator; friend struct simdjson_result; - friend struct internal::simdjson_result_base; + friend struct simdjson::internal::simdjson_result_base; }; // document_stream } // namespace ondemand @@ -57985,16 +57994,14 @@ simdjson_inline simdjson_result &simdjson_res /* amalgamation skipped (editor-only): #ifndef SIMDJSON_CONDITIONAL_INCLUDE */ /* amalgamation skipped (editor-only): #define SIMDJSON_GENERIC_ONDEMAND_DOCUMENT_INL_H */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/base.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/document.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value_iterator-inl.h" */ /* amalgamation skipped (editor-only): #endif // SIMDJSON_CONDITIONAL_INCLUDE */ @@ -58149,24 +58156,26 @@ template<> simdjson_inline simdjson_result document::get() & noexcept { template<> simdjson_inline simdjson_result document::get() & noexcept { return get_bool(); } template<> simdjson_inline simdjson_result document::get() & noexcept { return get_value(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } template simdjson_inline error_code document::get(T &out) & noexcept { return get().get(out); } -template simdjson_inline error_code document::get(T &out) && noexcept { +template simdjson_deprecated simdjson_inline error_code document::get(T &out) && noexcept { return std::forward(*this).get().get(out); } #if SIMDJSON_EXCEPTIONS template -simdjson_inline document::operator T() noexcept(false) { return get(); } +simdjson_deprecated simdjson_inline document::operator T() && noexcept(false) { return get(); } +template +simdjson_inline document::operator T() & noexcept(false) { return get(); } simdjson_inline document::operator array() & noexcept(false) { return get_array(); } simdjson_inline document::operator object() & noexcept(false) { return get_object(); } simdjson_inline document::operator uint64_t() noexcept(false) { return get_uint64(); } @@ -58453,7 +58462,7 @@ simdjson_inline simdjson_result simdjson_result: return first.get(); } template -simdjson_inline simdjson_result simdjson_result::get() && noexcept { +simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first).get(); } @@ -58469,7 +58478,7 @@ simdjson_inline error_code simdjson_result::get(T & } template<> simdjson_inline simdjson_result simdjson_result::get() & noexcept = delete; -template<> simdjson_inline simdjson_result simdjson_result::get() && noexcept { +template<> simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first); } @@ -61438,8 +61447,6 @@ simdjson_inline simdjson_result::simdjson_res /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ @@ -61985,9 +61992,9 @@ simdjson_inline simdjson_result simdjson_result implementation{}; + std::unique_ptr implementation{}; size_t _capacity{0}; size_t _max_capacity; size_t _max_depth{DEFAULT_MAX_DEPTH}; @@ -66938,7 +66945,7 @@ class document { " You may also add support for custom types, see our documentation."); } /** @overload template simdjson_result get() & noexcept */ - template simdjson_inline simdjson_result get() && noexcept { + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept { // Unless the simdjson library or the user provides an inline implementation, calling this method should // immediately fail. static_assert(!sizeof(T), "The get method with given type is not implemented by the simdjson library. " @@ -66961,7 +66968,7 @@ class document { */ template simdjson_inline error_code get(T &out) & noexcept; /** @overload template error_code get(T &out) & noexcept */ - template simdjson_inline error_code get(T &out) && noexcept; + template simdjson_deprecated simdjson_inline error_code get(T &out) && noexcept; #if SIMDJSON_EXCEPTIONS /** @@ -66974,7 +66981,10 @@ class document { * @returns An instance of type T */ template - explicit simdjson_inline operator T() noexcept(false); + explicit simdjson_inline operator T() & noexcept(false); + template + explicit simdjson_deprecated simdjson_inline operator T() && noexcept(false); + /** * Cast this JSON value to an array. * @@ -67540,7 +67550,7 @@ struct simdjson_result : public icelake::implementa simdjson_inline simdjson_result is_null() noexcept; template simdjson_inline simdjson_result get() & noexcept; - template simdjson_inline simdjson_result get() && noexcept; + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept; template simdjson_inline error_code get(T &out) & noexcept; template simdjson_inline error_code get(T &out) && noexcept; @@ -67980,7 +67990,7 @@ class document_stream { friend class document; friend class json_iterator; friend struct simdjson_result; - friend struct internal::simdjson_result_base; + friend struct simdjson::internal::simdjson_result_base; }; // document_stream } // namespace ondemand @@ -68942,16 +68952,14 @@ simdjson_inline simdjson_result &simdjson_res /* amalgamation skipped (editor-only): #ifndef SIMDJSON_CONDITIONAL_INCLUDE */ /* amalgamation skipped (editor-only): #define SIMDJSON_GENERIC_ONDEMAND_DOCUMENT_INL_H */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/base.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/document.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value_iterator-inl.h" */ /* amalgamation skipped (editor-only): #endif // SIMDJSON_CONDITIONAL_INCLUDE */ @@ -69106,24 +69114,26 @@ template<> simdjson_inline simdjson_result document::get() & noexcept { template<> simdjson_inline simdjson_result document::get() & noexcept { return get_bool(); } template<> simdjson_inline simdjson_result document::get() & noexcept { return get_value(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } template simdjson_inline error_code document::get(T &out) & noexcept { return get().get(out); } -template simdjson_inline error_code document::get(T &out) && noexcept { +template simdjson_deprecated simdjson_inline error_code document::get(T &out) && noexcept { return std::forward(*this).get().get(out); } #if SIMDJSON_EXCEPTIONS template -simdjson_inline document::operator T() noexcept(false) { return get(); } +simdjson_deprecated simdjson_inline document::operator T() && noexcept(false) { return get(); } +template +simdjson_inline document::operator T() & noexcept(false) { return get(); } simdjson_inline document::operator array() & noexcept(false) { return get_array(); } simdjson_inline document::operator object() & noexcept(false) { return get_object(); } simdjson_inline document::operator uint64_t() noexcept(false) { return get_uint64(); } @@ -69410,7 +69420,7 @@ simdjson_inline simdjson_result simdjson_result: return first.get(); } template -simdjson_inline simdjson_result simdjson_result::get() && noexcept { +simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first).get(); } @@ -69426,7 +69436,7 @@ simdjson_inline error_code simdjson_result::get(T & } template<> simdjson_inline simdjson_result simdjson_result::get() & noexcept = delete; -template<> simdjson_inline simdjson_result simdjson_result::get() && noexcept { +template<> simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first); } @@ -72395,8 +72405,6 @@ simdjson_inline simdjson_result::simdjson_res /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ @@ -72942,9 +72950,9 @@ simdjson_inline simdjson_result simdjson_result implementation{}; + std::unique_ptr implementation{}; size_t _capacity{0}; size_t _max_capacity; size_t _max_depth{DEFAULT_MAX_DEPTH}; @@ -78010,7 +78018,7 @@ class document { " You may also add support for custom types, see our documentation."); } /** @overload template simdjson_result get() & noexcept */ - template simdjson_inline simdjson_result get() && noexcept { + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept { // Unless the simdjson library or the user provides an inline implementation, calling this method should // immediately fail. static_assert(!sizeof(T), "The get method with given type is not implemented by the simdjson library. " @@ -78033,7 +78041,7 @@ class document { */ template simdjson_inline error_code get(T &out) & noexcept; /** @overload template error_code get(T &out) & noexcept */ - template simdjson_inline error_code get(T &out) && noexcept; + template simdjson_deprecated simdjson_inline error_code get(T &out) && noexcept; #if SIMDJSON_EXCEPTIONS /** @@ -78046,7 +78054,10 @@ class document { * @returns An instance of type T */ template - explicit simdjson_inline operator T() noexcept(false); + explicit simdjson_inline operator T() & noexcept(false); + template + explicit simdjson_deprecated simdjson_inline operator T() && noexcept(false); + /** * Cast this JSON value to an array. * @@ -78612,7 +78623,7 @@ struct simdjson_result : public ppc64::implementation simdjson_inline simdjson_result is_null() noexcept; template simdjson_inline simdjson_result get() & noexcept; - template simdjson_inline simdjson_result get() && noexcept; + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept; template simdjson_inline error_code get(T &out) & noexcept; template simdjson_inline error_code get(T &out) && noexcept; @@ -79052,7 +79063,7 @@ class document_stream { friend class document; friend class json_iterator; friend struct simdjson_result; - friend struct internal::simdjson_result_base; + friend struct simdjson::internal::simdjson_result_base; }; // document_stream } // namespace ondemand @@ -80014,16 +80025,14 @@ simdjson_inline simdjson_result &simdjson_resul /* amalgamation skipped (editor-only): #ifndef SIMDJSON_CONDITIONAL_INCLUDE */ /* amalgamation skipped (editor-only): #define SIMDJSON_GENERIC_ONDEMAND_DOCUMENT_INL_H */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/base.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/document.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value_iterator-inl.h" */ /* amalgamation skipped (editor-only): #endif // SIMDJSON_CONDITIONAL_INCLUDE */ @@ -80178,24 +80187,26 @@ template<> simdjson_inline simdjson_result document::get() & noexcept { template<> simdjson_inline simdjson_result document::get() & noexcept { return get_bool(); } template<> simdjson_inline simdjson_result document::get() & noexcept { return get_value(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } template simdjson_inline error_code document::get(T &out) & noexcept { return get().get(out); } -template simdjson_inline error_code document::get(T &out) && noexcept { +template simdjson_deprecated simdjson_inline error_code document::get(T &out) && noexcept { return std::forward(*this).get().get(out); } #if SIMDJSON_EXCEPTIONS template -simdjson_inline document::operator T() noexcept(false) { return get(); } +simdjson_deprecated simdjson_inline document::operator T() && noexcept(false) { return get(); } +template +simdjson_inline document::operator T() & noexcept(false) { return get(); } simdjson_inline document::operator array() & noexcept(false) { return get_array(); } simdjson_inline document::operator object() & noexcept(false) { return get_object(); } simdjson_inline document::operator uint64_t() noexcept(false) { return get_uint64(); } @@ -80482,7 +80493,7 @@ simdjson_inline simdjson_result simdjson_result::g return first.get(); } template -simdjson_inline simdjson_result simdjson_result::get() && noexcept { +simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first).get(); } @@ -80498,7 +80509,7 @@ simdjson_inline error_code simdjson_result::get(T &ou } template<> simdjson_inline simdjson_result simdjson_result::get() & noexcept = delete; -template<> simdjson_inline simdjson_result simdjson_result::get() && noexcept { +template<> simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first); } @@ -83467,8 +83478,6 @@ simdjson_inline simdjson_result::simdjson_resul /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ @@ -84014,9 +84023,9 @@ simdjson_inline simdjson_result simdjson_result implementation{}; + std::unique_ptr implementation{}; size_t _capacity{0}; size_t _max_capacity; size_t _max_depth{DEFAULT_MAX_DEPTH}; @@ -89405,7 +89414,7 @@ class document { " You may also add support for custom types, see our documentation."); } /** @overload template simdjson_result get() & noexcept */ - template simdjson_inline simdjson_result get() && noexcept { + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept { // Unless the simdjson library or the user provides an inline implementation, calling this method should // immediately fail. static_assert(!sizeof(T), "The get method with given type is not implemented by the simdjson library. " @@ -89428,7 +89437,7 @@ class document { */ template simdjson_inline error_code get(T &out) & noexcept; /** @overload template error_code get(T &out) & noexcept */ - template simdjson_inline error_code get(T &out) && noexcept; + template simdjson_deprecated simdjson_inline error_code get(T &out) && noexcept; #if SIMDJSON_EXCEPTIONS /** @@ -89441,7 +89450,10 @@ class document { * @returns An instance of type T */ template - explicit simdjson_inline operator T() noexcept(false); + explicit simdjson_inline operator T() & noexcept(false); + template + explicit simdjson_deprecated simdjson_inline operator T() && noexcept(false); + /** * Cast this JSON value to an array. * @@ -90007,7 +90019,7 @@ struct simdjson_result : public westmere::implemen simdjson_inline simdjson_result is_null() noexcept; template simdjson_inline simdjson_result get() & noexcept; - template simdjson_inline simdjson_result get() && noexcept; + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept; template simdjson_inline error_code get(T &out) & noexcept; template simdjson_inline error_code get(T &out) && noexcept; @@ -90447,7 +90459,7 @@ class document_stream { friend class document; friend class json_iterator; friend struct simdjson_result; - friend struct internal::simdjson_result_base; + friend struct simdjson::internal::simdjson_result_base; }; // document_stream } // namespace ondemand @@ -91409,16 +91421,14 @@ simdjson_inline simdjson_result &simdjson_re /* amalgamation skipped (editor-only): #ifndef SIMDJSON_CONDITIONAL_INCLUDE */ /* amalgamation skipped (editor-only): #define SIMDJSON_GENERIC_ONDEMAND_DOCUMENT_INL_H */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/base.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/document.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value_iterator-inl.h" */ /* amalgamation skipped (editor-only): #endif // SIMDJSON_CONDITIONAL_INCLUDE */ @@ -91573,24 +91583,26 @@ template<> simdjson_inline simdjson_result document::get() & noexcept { template<> simdjson_inline simdjson_result document::get() & noexcept { return get_bool(); } template<> simdjson_inline simdjson_result document::get() & noexcept { return get_value(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } template simdjson_inline error_code document::get(T &out) & noexcept { return get().get(out); } -template simdjson_inline error_code document::get(T &out) && noexcept { +template simdjson_deprecated simdjson_inline error_code document::get(T &out) && noexcept { return std::forward(*this).get().get(out); } #if SIMDJSON_EXCEPTIONS template -simdjson_inline document::operator T() noexcept(false) { return get(); } +simdjson_deprecated simdjson_inline document::operator T() && noexcept(false) { return get(); } +template +simdjson_inline document::operator T() & noexcept(false) { return get(); } simdjson_inline document::operator array() & noexcept(false) { return get_array(); } simdjson_inline document::operator object() & noexcept(false) { return get_object(); } simdjson_inline document::operator uint64_t() noexcept(false) { return get_uint64(); } @@ -91877,7 +91889,7 @@ simdjson_inline simdjson_result simdjson_result return first.get(); } template -simdjson_inline simdjson_result simdjson_result::get() && noexcept { +simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first).get(); } @@ -91893,7 +91905,7 @@ simdjson_inline error_code simdjson_result::get(T } template<> simdjson_inline simdjson_result simdjson_result::get() & noexcept = delete; -template<> simdjson_inline simdjson_result simdjson_result::get() && noexcept { +template<> simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first); } @@ -94862,8 +94874,6 @@ simdjson_inline simdjson_result::simdjson_re /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ @@ -95409,9 +95419,9 @@ simdjson_inline simdjson_result simdjson_result implementation{}; + std::unique_ptr implementation{}; size_t _capacity{0}; size_t _max_capacity; size_t _max_depth{DEFAULT_MAX_DEPTH}; @@ -100271,7 +100281,7 @@ class document { " You may also add support for custom types, see our documentation."); } /** @overload template simdjson_result get() & noexcept */ - template simdjson_inline simdjson_result get() && noexcept { + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept { // Unless the simdjson library or the user provides an inline implementation, calling this method should // immediately fail. static_assert(!sizeof(T), "The get method with given type is not implemented by the simdjson library. " @@ -100294,7 +100304,7 @@ class document { */ template simdjson_inline error_code get(T &out) & noexcept; /** @overload template error_code get(T &out) & noexcept */ - template simdjson_inline error_code get(T &out) && noexcept; + template simdjson_deprecated simdjson_inline error_code get(T &out) && noexcept; #if SIMDJSON_EXCEPTIONS /** @@ -100307,7 +100317,10 @@ class document { * @returns An instance of type T */ template - explicit simdjson_inline operator T() noexcept(false); + explicit simdjson_inline operator T() & noexcept(false); + template + explicit simdjson_deprecated simdjson_inline operator T() && noexcept(false); + /** * Cast this JSON value to an array. * @@ -100873,7 +100886,7 @@ struct simdjson_result : public lsx::implementation_sim simdjson_inline simdjson_result is_null() noexcept; template simdjson_inline simdjson_result get() & noexcept; - template simdjson_inline simdjson_result get() && noexcept; + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept; template simdjson_inline error_code get(T &out) & noexcept; template simdjson_inline error_code get(T &out) && noexcept; @@ -101313,7 +101326,7 @@ class document_stream { friend class document; friend class json_iterator; friend struct simdjson_result; - friend struct internal::simdjson_result_base; + friend struct simdjson::internal::simdjson_result_base; }; // document_stream } // namespace ondemand @@ -102275,16 +102288,14 @@ simdjson_inline simdjson_result &simdjson_result< /* amalgamation skipped (editor-only): #ifndef SIMDJSON_CONDITIONAL_INCLUDE */ /* amalgamation skipped (editor-only): #define SIMDJSON_GENERIC_ONDEMAND_DOCUMENT_INL_H */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/base.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/document.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value_iterator-inl.h" */ /* amalgamation skipped (editor-only): #endif // SIMDJSON_CONDITIONAL_INCLUDE */ @@ -102439,24 +102450,26 @@ template<> simdjson_inline simdjson_result document::get() & noexcept { template<> simdjson_inline simdjson_result document::get() & noexcept { return get_bool(); } template<> simdjson_inline simdjson_result document::get() & noexcept { return get_value(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } template simdjson_inline error_code document::get(T &out) & noexcept { return get().get(out); } -template simdjson_inline error_code document::get(T &out) && noexcept { +template simdjson_deprecated simdjson_inline error_code document::get(T &out) && noexcept { return std::forward(*this).get().get(out); } #if SIMDJSON_EXCEPTIONS template -simdjson_inline document::operator T() noexcept(false) { return get(); } +simdjson_deprecated simdjson_inline document::operator T() && noexcept(false) { return get(); } +template +simdjson_inline document::operator T() & noexcept(false) { return get(); } simdjson_inline document::operator array() & noexcept(false) { return get_array(); } simdjson_inline document::operator object() & noexcept(false) { return get_object(); } simdjson_inline document::operator uint64_t() noexcept(false) { return get_uint64(); } @@ -102743,7 +102756,7 @@ simdjson_inline simdjson_result simdjson_result::get return first.get(); } template -simdjson_inline simdjson_result simdjson_result::get() && noexcept { +simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first).get(); } @@ -102759,7 +102772,7 @@ simdjson_inline error_code simdjson_result::get(T &out) } template<> simdjson_inline simdjson_result simdjson_result::get() & noexcept = delete; -template<> simdjson_inline simdjson_result simdjson_result::get() && noexcept { +template<> simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first); } @@ -105728,8 +105741,6 @@ simdjson_inline simdjson_result::simdjson_result( /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ @@ -106275,9 +106286,9 @@ simdjson_inline simdjson_result simdjson_result implementation{}; + std::unique_ptr implementation{}; size_t _capacity{0}; size_t _max_capacity; size_t _max_depth{DEFAULT_MAX_DEPTH}; @@ -111150,7 +111161,7 @@ class document { " You may also add support for custom types, see our documentation."); } /** @overload template simdjson_result get() & noexcept */ - template simdjson_inline simdjson_result get() && noexcept { + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept { // Unless the simdjson library or the user provides an inline implementation, calling this method should // immediately fail. static_assert(!sizeof(T), "The get method with given type is not implemented by the simdjson library. " @@ -111173,7 +111184,7 @@ class document { */ template simdjson_inline error_code get(T &out) & noexcept; /** @overload template error_code get(T &out) & noexcept */ - template simdjson_inline error_code get(T &out) && noexcept; + template simdjson_deprecated simdjson_inline error_code get(T &out) && noexcept; #if SIMDJSON_EXCEPTIONS /** @@ -111186,7 +111197,10 @@ class document { * @returns An instance of type T */ template - explicit simdjson_inline operator T() noexcept(false); + explicit simdjson_inline operator T() & noexcept(false); + template + explicit simdjson_deprecated simdjson_inline operator T() && noexcept(false); + /** * Cast this JSON value to an array. * @@ -111752,7 +111766,7 @@ struct simdjson_result : public lasx::implementation_s simdjson_inline simdjson_result is_null() noexcept; template simdjson_inline simdjson_result get() & noexcept; - template simdjson_inline simdjson_result get() && noexcept; + template simdjson_deprecated simdjson_inline simdjson_result get() && noexcept; template simdjson_inline error_code get(T &out) & noexcept; template simdjson_inline error_code get(T &out) && noexcept; @@ -112192,7 +112206,7 @@ class document_stream { friend class document; friend class json_iterator; friend struct simdjson_result; - friend struct internal::simdjson_result_base; + friend struct simdjson::internal::simdjson_result_base; }; // document_stream } // namespace ondemand @@ -113154,16 +113168,14 @@ simdjson_inline simdjson_result &simdjson_result /* amalgamation skipped (editor-only): #ifndef SIMDJSON_CONDITIONAL_INCLUDE */ /* amalgamation skipped (editor-only): #define SIMDJSON_GENERIC_ONDEMAND_DOCUMENT_INL_H */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/base.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/document.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator-inl.h" */ +/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/value_iterator-inl.h" */ /* amalgamation skipped (editor-only): #endif // SIMDJSON_CONDITIONAL_INCLUDE */ @@ -113318,24 +113330,26 @@ template<> simdjson_inline simdjson_result document::get() & noexcept { template<> simdjson_inline simdjson_result document::get() & noexcept { return get_bool(); } template<> simdjson_inline simdjson_result document::get() & noexcept { return get_value(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } -template<> simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_raw_json_string(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_string(false); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_double(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_uint64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_int64(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return std::forward(*this).get_bool(); } +template<> simdjson_deprecated simdjson_inline simdjson_result document::get() && noexcept { return get_value(); } template simdjson_inline error_code document::get(T &out) & noexcept { return get().get(out); } -template simdjson_inline error_code document::get(T &out) && noexcept { +template simdjson_deprecated simdjson_inline error_code document::get(T &out) && noexcept { return std::forward(*this).get().get(out); } #if SIMDJSON_EXCEPTIONS template -simdjson_inline document::operator T() noexcept(false) { return get(); } +simdjson_deprecated simdjson_inline document::operator T() && noexcept(false) { return get(); } +template +simdjson_inline document::operator T() & noexcept(false) { return get(); } simdjson_inline document::operator array() & noexcept(false) { return get_array(); } simdjson_inline document::operator object() & noexcept(false) { return get_object(); } simdjson_inline document::operator uint64_t() noexcept(false) { return get_uint64(); } @@ -113622,7 +113636,7 @@ simdjson_inline simdjson_result simdjson_result::ge return first.get(); } template -simdjson_inline simdjson_result simdjson_result::get() && noexcept { +simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first).get(); } @@ -113638,7 +113652,7 @@ simdjson_inline error_code simdjson_result::get(T &out } template<> simdjson_inline simdjson_result simdjson_result::get() & noexcept = delete; -template<> simdjson_inline simdjson_result simdjson_result::get() && noexcept { +template<> simdjson_deprecated simdjson_inline simdjson_result simdjson_result::get() && noexcept { if (error()) { return error(); } return std::forward(first); } @@ -116607,8 +116621,6 @@ simdjson_inline simdjson_result::simdjson_result /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/array_iterator.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_iterator.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion.h" */ -/* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_path_to_pointer_conversion-inl.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/json_type.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/object.h" */ /* amalgamation skipped (editor-only): #include "simdjson/generic/ondemand/raw_json_string.h" */ @@ -117154,9 +117166,9 @@ simdjson_inline simdjson_result simdjson_result bool is_ascii_white_space(char_type c) { return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f'; } +template bool is_ascii_white_space_or_padding(char_type c) { + return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || + c == '='; +} + template bool is_eight_byte(char_type c) { if (sizeof(char_type) == 1) { return true; @@ -9491,6 +9496,21 @@ simdutf_warn_unused result base64_to_binary_safe_impl( if (r.error != error_code::INVALID_BASE64_CHARACTER && r.error != error_code::BASE64_EXTRA_BITS) { outlen = r.output_count; + if (last_chunk_handling_options == stop_before_partial) { + if ((r.output_count % 3) != 0) { + bool empty_trail = true; + for (size_t i = r.input_count; i < length; i++) { + if (!scalar::base64::is_ascii_white_space_or_padding(input[i])) { + empty_trail = false; + break; + } + } + if (empty_trail) { + r.input_count = length; + } + } + return {r.error, r.input_count}; + } return {r.error, length}; } return r; @@ -9557,7 +9577,11 @@ simdutf_warn_unused result base64_to_binary_safe_impl( } if (rr.error == error_code::SUCCESS && last_chunk_handling_options == stop_before_partial) { - rr.count = tail_input - input; + if (tail_input > input + input_index) { + rr.count = tail_input - input; + } else if (r.input_count > 0) { + rr.count = r.input_count + rr.count; + } return rr; } rr.count += input_index; @@ -15891,9 +15915,9 @@ compress_decode_base64(char *dst, const char_type *src, size_t srclen, if (src < srcend + equalsigns) { full_result r = scalar::base64::base64_tail_decode( dst, src, srcend - src, equalsigns, options, last_chunk_options); + r.input_count += size_t(src - srcinit); if (r.error == error_code::INVALID_BASE64_CHARACTER || r.error == error_code::BASE64_EXTRA_BITS) { - r.input_count += size_t(src - srcinit); return r; } else { r.output_count += size_t(dst - dstinit); @@ -23471,7 +23495,7 @@ size_t encode_base64(char *dst, const char *src, size_t srclen, } template -static inline uint64_t to_base64_mask(block64 *b, bool *error) { +static inline uint64_t to_base64_mask(block64 *b, uint64_t *error) { __m512i input = b->chunks[0]; const __m512i ascii_space_tbl = _mm512_set_epi8( 0, 0, 13, 12, 0, 10, 9, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 13, 12, 0, 10, @@ -23514,7 +23538,7 @@ static inline uint64_t to_base64_mask(block64 *b, bool *error) { if (mask) { const __mmask64 spaces = _mm512_cmpeq_epi8_mask( _mm512_shuffle_epi8(ascii_space_tbl, input), input); - *error |= (mask != spaces); + *error = (mask ^ spaces); } b->chunks[0] = translated; @@ -23622,16 +23646,13 @@ compress_decode_base64(char *dst, const chartype *src, size_t srclen, block64 b; load_block(&b, src); src += 64; - bool error = false; + uint64_t error = 0; uint64_t badcharmask = to_base64_mask(&b, &error); if (error) { src -= 64; - while (src < srcend && scalar::base64::is_eight_byte(*src) && - to_base64[uint8_t(*src)] <= 64) { - src++; - } - return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit), - size_t(dst - dstinit)}; + size_t error_offset = _tzcnt_u64(error); + return {error_code::INVALID_BASE64_CHARACTER, + size_t(src - srcinit + error_offset), size_t(dst - dstinit)}; } if (badcharmask != 0) { // optimization opportunity: check for simple masks like those made of @@ -23716,9 +23737,9 @@ compress_decode_base64(char *dst, const chartype *src, size_t srclen, if (src < srcend + equalsigns) { full_result r = scalar::base64::base64_tail_decode( dst, src, srcend - src, equalsigns, options, last_chunk_options); + r.input_count += size_t(src - srcinit); if (r.error == error_code::INVALID_BASE64_CHARACTER || r.error == error_code::BASE64_EXTRA_BITS) { - r.input_count += size_t(src - srcinit); return r; } else { r.output_count += size_t(dst - dstinit); @@ -28216,7 +28237,7 @@ struct block64 { }; template -static inline uint32_t to_base64_mask(__m256i *src, bool *error) { +static inline uint32_t to_base64_mask(__m256i *src, uint32_t *error) { const __m256i ascii_space_tbl = _mm256_setr_epi8(0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9, 0xa, 0x0, 0xc, 0xd, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, @@ -28300,17 +28321,19 @@ static inline uint32_t to_base64_mask(__m256i *src, bool *error) { if (mask) { __m256i ascii_space = _mm256_cmpeq_epi8(_mm256_shuffle_epi8(ascii_space_tbl, *src), *src); - *error |= (mask != _mm256_movemask_epi8(ascii_space)); + *error = (mask ^ _mm256_movemask_epi8(ascii_space)); } *src = out; return (uint32_t)mask; } template -static inline uint64_t to_base64_mask(block64 *b, bool *error) { - *error = 0; - uint64_t m0 = to_base64_mask(&b->chunks[0], error); - uint64_t m1 = to_base64_mask(&b->chunks[1], error); +static inline uint64_t to_base64_mask(block64 *b, uint64_t *error) { + uint32_t err0 = 0; + uint32_t err1 = 0; + uint64_t m0 = to_base64_mask(&b->chunks[0], &err0); + uint64_t m1 = to_base64_mask(&b->chunks[1], &err1); + *error = err0 | ((uint64_t)err1 << 32); return m0 | (m1 << 32); } @@ -28442,16 +28465,13 @@ compress_decode_base64(char *dst, const chartype *src, size_t srclen, block64 b; load_block(&b, src); src += 64; - bool error = false; + uint64_t error = 0; uint64_t badcharmask = to_base64_mask(&b, &error); if (error) { src -= 64; - while (src < srcend && scalar::base64::is_eight_byte(*src) && - to_base64[uint8_t(*src)] <= 64) { - src++; - } - return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit), - size_t(dst - dstinit)}; + size_t error_offset = _tzcnt_u64(error); + return {error_code::INVALID_BASE64_CHARACTER, + size_t(src - srcinit + error_offset), size_t(dst - dstinit)}; } if (badcharmask != 0) { // optimization opportunity: check for simple masks like those made of @@ -28552,9 +28572,9 @@ compress_decode_base64(char *dst, const chartype *src, size_t srclen, if (src < srcend + equalsigns) { full_result r = scalar::base64::base64_tail_decode( dst, src, srcend - src, equalsigns, options, last_chunk_options); + r.input_count += size_t(src - srcinit); if (r.error == error_code::INVALID_BASE64_CHARACTER || r.error == error_code::BASE64_EXTRA_BITS) { - r.input_count += size_t(src - srcinit); return r; } else { r.output_count += size_t(dst - dstinit); @@ -37968,7 +37988,7 @@ struct block64 { }; template -static inline uint16_t to_base64_mask(__m128i *src, bool *error) { +static inline uint16_t to_base64_mask(__m128i *src, uint32_t *error) { const __m128i ascii_space_tbl = _mm_setr_epi8(0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9, 0xa, 0x0, 0xc, 0xd, 0x0, 0x0); @@ -38035,22 +38055,42 @@ static inline uint16_t to_base64_mask(__m128i *src, bool *error) { if (mask) { __m128i ascii_space = _mm_cmpeq_epi8(_mm_shuffle_epi8(ascii_space_tbl, *src), *src); - *error |= (mask != _mm_movemask_epi8(ascii_space)); + *error = (mask ^ _mm_movemask_epi8(ascii_space)); } *src = out; return (uint16_t)mask; } template -static inline uint64_t to_base64_mask(block64 *b, bool *error) { - *error = 0; - uint64_t m0 = to_base64_mask(&b->chunks[0], error); - uint64_t m1 = to_base64_mask(&b->chunks[1], error); - uint64_t m2 = to_base64_mask(&b->chunks[2], error); - uint64_t m3 = to_base64_mask(&b->chunks[3], error); +static inline uint64_t to_base64_mask(block64 *b, uint64_t *error) { + uint32_t err0 = 0; + uint32_t err1 = 0; + uint32_t err2 = 0; + uint32_t err3 = 0; + uint64_t m0 = to_base64_mask(&b->chunks[0], &err0); + uint64_t m1 = to_base64_mask(&b->chunks[1], &err1); + uint64_t m2 = to_base64_mask(&b->chunks[2], &err2); + uint64_t m3 = to_base64_mask(&b->chunks[3], &err3); + *error = (err0) | ((uint64_t)err1 << 16) | ((uint64_t)err2 << 32) | + ((uint64_t)err3 << 48); return m0 | (m1 << 16) | (m2 << 32) | (m3 << 48); } +#if defined(_MSC_VER) && !defined(__clang__) +static inline size_t simdutf_tzcnt_u64(uint64_t num) { + unsigned long ret; + if (num == 0) { + return 64; + } + _BitScanForward64(&ret, num); + return ret; +} +#else // GCC or Clang +static inline size_t simdutf_tzcnt_u64(uint64_t num) { + return num ? __builtin_ctzll(num) : 64; +} +#endif + static inline void copy_block(block64 *b, char *output) { _mm_storeu_si128(reinterpret_cast<__m128i *>(output), b->chunks[0]); _mm_storeu_si128(reinterpret_cast<__m128i *>(output + 16), b->chunks[1]); @@ -38198,16 +38238,13 @@ compress_decode_base64(char *dst, const chartype *src, size_t srclen, block64 b; load_block(&b, src); src += 64; - bool error = false; + uint64_t error = 0; uint64_t badcharmask = to_base64_mask(&b, &error); if (error) { src -= 64; - while (src < srcend && scalar::base64::is_eight_byte(*src) && - to_base64[uint8_t(*src)] <= 64) { - src++; - } - return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit), - size_t(dst - dstinit)}; + size_t error_offset = simdutf_tzcnt_u64(error); + return {error_code::INVALID_BASE64_CHARACTER, + size_t(src - srcinit + error_offset), size_t(dst - dstinit)}; } if (badcharmask != 0) { // optimization opportunity: check for simple masks like those made of @@ -38307,9 +38344,9 @@ compress_decode_base64(char *dst, const chartype *src, size_t srclen, if (src < srcend + equalsigns) { full_result r = scalar::base64::base64_tail_decode( dst, src, srcend - src, equalsigns, options, last_chunk_options); + r.input_count += size_t(src - srcinit); if (r.error == error_code::INVALID_BASE64_CHARACTER || r.error == error_code::BASE64_EXTRA_BITS) { - r.input_count += size_t(src - srcinit); return r; } else { r.output_count += size_t(dst - dstinit); diff --git a/deps/simdutf/simdutf.h b/deps/simdutf/simdutf.h index a219e96ad9cd9f..5f82ca372ccfe3 100644 --- a/deps/simdutf/simdutf.h +++ b/deps/simdutf/simdutf.h @@ -1,4 +1,4 @@ -/* auto-generated on 2024-11-12 20:00:19 -0500. Do not edit! */ +/* auto-generated on 2024-11-21 10:33:28 -0500. Do not edit! */ /* begin file include/simdutf.h */ #ifndef SIMDUTF_H #define SIMDUTF_H @@ -155,11 +155,11 @@ // RISC-V 64-bit #define SIMDUTF_IS_RISCV64 1 - #if __clang_major__ >= 19 - // Does the compiler support target regions for RISC-V - #define SIMDUTF_HAS_RVV_TARGET_REGION 1 - #endif - + // #if __riscv_v_intrinsic >= 1000000 + // #define SIMDUTF_HAS_RVV_INTRINSICS 1 + // #define SIMDUTF_HAS_RVV_TARGET_REGION 1 + // #elif ... + // Check for special compiler versions that implement pre v1.0 intrinsics #if __riscv_v_intrinsic >= 11000 #define SIMDUTF_HAS_RVV_INTRINSICS 1 #endif @@ -670,7 +670,7 @@ SIMDUTF_DISABLE_UNDESIRED_WARNINGS #define SIMDUTF_SIMDUTF_VERSION_H /** The version of simdutf being used (major.minor.revision) */ -#define SIMDUTF_VERSION "5.6.1" +#define SIMDUTF_VERSION "5.6.3" namespace simdutf { enum { @@ -685,7 +685,7 @@ enum { /** * The revision (major.minor.REVISION) of simdutf being used. */ - SIMDUTF_VERSION_REVISION = 1 + SIMDUTF_VERSION_REVISION = 3 }; } // namespace simdutf diff --git a/deps/sqlite/sqlite3.c b/deps/sqlite/sqlite3.c index 2886d04ae5263b..099c5482f68df0 100644 --- a/deps/sqlite/sqlite3.c +++ b/deps/sqlite/sqlite3.c @@ -1,6 +1,6 @@ /****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.47.0. By combining all the individual C code files into this +** version 3.47.1. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements @@ -18,7 +18,7 @@ ** separate file. This file contains only code for the core SQLite library. ** ** The content in this amalgamation comes from Fossil check-in -** 03a9703e27c44437c39363d0baf82db4ebc9. +** b95d11e958643b969c47a8e5857f3793b9e6. */ #define SQLITE_CORE 1 #define SQLITE_AMALGAMATION 1 @@ -462,9 +462,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.47.0" -#define SQLITE_VERSION_NUMBER 3047000 -#define SQLITE_SOURCE_ID "2024-10-21 16:30:22 03a9703e27c44437c39363d0baf82db4ebc94538a0f28411c85dda156f82636e" +#define SQLITE_VERSION "3.47.1" +#define SQLITE_VERSION_NUMBER 3047001 +#define SQLITE_SOURCE_ID "2024-11-25 12:07:48 b95d11e958643b969c47a8e5857f3793b9e69700b8f1469371386369a26e577e" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -968,6 +968,13 @@ SQLITE_API int sqlite3_exec( ** filesystem supports doing multiple write operations atomically when those ** write operations are bracketed by [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE] and ** [SQLITE_FCNTL_COMMIT_ATOMIC_WRITE]. +** +** The SQLITE_IOCAP_SUBPAGE_READ property means that it is ok to read +** from the database file in amounts that are not a multiple of the +** page size and that do not begin at a page boundary. Without this +** property, SQLite is careful to only do full-page reads and write +** on aligned pages, with the one exception that it will do a sub-page +** read of the first page to access the database header. */ #define SQLITE_IOCAP_ATOMIC 0x00000001 #define SQLITE_IOCAP_ATOMIC512 0x00000002 @@ -984,6 +991,7 @@ SQLITE_API int sqlite3_exec( #define SQLITE_IOCAP_POWERSAFE_OVERWRITE 0x00001000 #define SQLITE_IOCAP_IMMUTABLE 0x00002000 #define SQLITE_IOCAP_BATCH_ATOMIC 0x00004000 +#define SQLITE_IOCAP_SUBPAGE_READ 0x00008000 /* ** CAPI3REF: File Locking Levels @@ -1130,6 +1138,7 @@ struct sqlite3_file { **
  • [SQLITE_IOCAP_POWERSAFE_OVERWRITE] **
  • [SQLITE_IOCAP_IMMUTABLE] **
  • [SQLITE_IOCAP_BATCH_ATOMIC] +**
  • [SQLITE_IOCAP_SUBPAGE_READ] ** ** ** The SQLITE_IOCAP_ATOMIC property means that all writes of @@ -32298,6 +32307,7 @@ SQLITE_PRIVATE void sqlite3RecordErrorOffsetOfExpr(sqlite3 *db, const Expr *pExp pExpr = pExpr->pLeft; } if( pExpr==0 ) return; + if( ExprHasProperty(pExpr, EP_FromDDL) ) return; db->errByteOffset = pExpr->w.iOfst; } @@ -42591,6 +42601,7 @@ static void setDeviceCharacteristics(unixFile *pFd){ if( pFd->ctrlFlags & UNIXFILE_PSOW ){ pFd->deviceCharacteristics |= SQLITE_IOCAP_POWERSAFE_OVERWRITE; } + pFd->deviceCharacteristics |= SQLITE_IOCAP_SUBPAGE_READ; pFd->sectorSize = SQLITE_DEFAULT_SECTOR_SIZE; } @@ -50391,7 +50402,7 @@ static int winSectorSize(sqlite3_file *id){ */ static int winDeviceCharacteristics(sqlite3_file *id){ winFile *p = (winFile*)id; - return SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN | + return SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN | SQLITE_IOCAP_SUBPAGE_READ | ((p->ctrlFlags & WINFILE_PSOW)?SQLITE_IOCAP_POWERSAFE_OVERWRITE:0); } @@ -51779,7 +51790,7 @@ static int winOpen( int rc = SQLITE_OK; /* Function Return Code */ #if !defined(NDEBUG) || SQLITE_OS_WINCE - int eType = flags&0xFFFFFF00; /* Type of file to open */ + int eType = flags&0x0FFF00; /* Type of file to open */ #endif int isExclusive = (flags & SQLITE_OPEN_EXCLUSIVE); @@ -57999,18 +58010,26 @@ static const unsigned char aJournalMagic[] = { ** Return true if page pgno can be read directly from the database file ** by the b-tree layer. This is the case if: ** -** * the database file is open, -** * there are no dirty pages in the cache, and -** * the desired page is not currently in the wal file. +** (1) the database file is open +** (2) the VFS for the database is able to do unaligned sub-page reads +** (3) there are no dirty pages in the cache, and +** (4) the desired page is not currently in the wal file. */ SQLITE_PRIVATE int sqlite3PagerDirectReadOk(Pager *pPager, Pgno pgno){ - if( pPager->fd->pMethods==0 ) return 0; - if( sqlite3PCacheIsDirty(pPager->pPCache) ) return 0; + assert( pPager!=0 ); + assert( pPager->fd!=0 ); + if( pPager->fd->pMethods==0 ) return 0; /* Case (1) */ + assert( pPager->fd->pMethods->xDeviceCharacteristics!=0 ); + if( (pPager->fd->pMethods->xDeviceCharacteristics(pPager->fd) + & SQLITE_IOCAP_SUBPAGE_READ)==0 ){ + return 0; /* Case (2) */ + } + if( sqlite3PCacheIsDirty(pPager->pPCache) ) return 0; /* Failed (3) */ #ifndef SQLITE_OMIT_WAL if( pPager->pWal ){ u32 iRead = 0; (void)sqlite3WalFindFrame(pPager->pWal, pgno, &iRead); - return iRead==0; + return iRead==0; /* Condition (4) */ } #endif return 1; @@ -158939,6 +158958,7 @@ static Expr *removeUnindexableInClauseTerms( pNew->pLeft->x.pList = pLhs; } pSelect->pEList = pRhs; + pSelect->selId = ++pParse->nSelect; /* Req'd for SubrtnSig validity */ if( pLhs && pLhs->nExpr==1 ){ /* Take care here not to generate a TK_VECTOR containing only a ** single value. Since the parser never creates such a vector, some @@ -189798,10 +189818,15 @@ static int fts3PoslistPhraseMerge( if( *p1==POS_COLUMN ){ p1++; p1 += fts3GetVarint32(p1, &iCol1); + /* iCol1==0 indicates corruption. Column 0 does not have a POS_COLUMN + ** entry, so this is actually end-of-doclist. */ + if( iCol1==0 ) return 0; } if( *p2==POS_COLUMN ){ p2++; p2 += fts3GetVarint32(p2, &iCol2); + /* As above, iCol2==0 indicates corruption. */ + if( iCol2==0 ) return 0; } while( 1 ){ @@ -192972,7 +192997,7 @@ static int fts3EvalNearTest(Fts3Expr *pExpr, int *pRc){ nTmp += p->pRight->pPhrase->doclist.nList; } nTmp += p->pPhrase->doclist.nList; - aTmp = sqlite3_malloc64(nTmp*2); + aTmp = sqlite3_malloc64(nTmp*2 + FTS3_VARINT_MAX); if( !aTmp ){ *pRc = SQLITE_NOMEM; res = 0; @@ -194525,10 +194550,11 @@ static int getNextString( Fts3PhraseToken *pToken; p = fts3ReallocOrFree(p, nSpace + ii*sizeof(Fts3PhraseToken)); - if( !p ) goto no_mem; - zTemp = fts3ReallocOrFree(zTemp, nTemp + nByte); - if( !zTemp ) goto no_mem; + if( !zTemp || !p ){ + rc = SQLITE_NOMEM; + goto getnextstring_out; + } assert( nToken==ii ); pToken = &((Fts3Phrase *)(&p[1]))->aToken[ii]; @@ -194543,9 +194569,6 @@ static int getNextString( nToken = ii+1; } } - - pModule->xClose(pCursor); - pCursor = 0; } if( rc==SQLITE_DONE ){ @@ -194553,7 +194576,10 @@ static int getNextString( char *zBuf = 0; p = fts3ReallocOrFree(p, nSpace + nToken*sizeof(Fts3PhraseToken) + nTemp); - if( !p ) goto no_mem; + if( !p ){ + rc = SQLITE_NOMEM; + goto getnextstring_out; + } memset(p, 0, (char *)&(((Fts3Phrase *)&p[1])->aToken[0])-(char *)p); p->eType = FTSQUERY_PHRASE; p->pPhrase = (Fts3Phrase *)&p[1]; @@ -194561,11 +194587,9 @@ static int getNextString( p->pPhrase->nToken = nToken; zBuf = (char *)&p->pPhrase->aToken[nToken]; + assert( nTemp==0 || zTemp ); if( zTemp ){ memcpy(zBuf, zTemp, nTemp); - sqlite3_free(zTemp); - }else{ - assert( nTemp==0 ); } for(jj=0; jjpPhrase->nToken; jj++){ @@ -194575,17 +194599,17 @@ static int getNextString( rc = SQLITE_OK; } - *ppExpr = p; - return rc; -no_mem: - + getnextstring_out: if( pCursor ){ pModule->xClose(pCursor); } sqlite3_free(zTemp); - sqlite3_free(p); - *ppExpr = 0; - return SQLITE_NOMEM; + if( rc!=SQLITE_OK ){ + sqlite3_free(p); + p = 0; + } + *ppExpr = p; + return rc; } /* @@ -232806,7 +232830,27 @@ SQLITE_API int sqlite3session_config(int op, void *pArg){ /************** End of sqlite3session.c **************************************/ /************** Begin file fts5.c ********************************************/ - +/* +** This, the "fts5.c" source file, is a composite file that is itself +** assembled from the following files: +** +** fts5.h +** fts5Int.h +** fts5parse.h <--- Generated from fts5parse.y by Lemon +** fts5parse.c <--- Generated from fts5parse.y by Lemon +** fts5_aux.c +** fts5_buffer.c +** fts5_config.c +** fts5_expr.c +** fts5_hash.c +** fts5_index.c +** fts5_main.c +** fts5_storage.c +** fts5_tokenize.c +** fts5_unicode2.c +** fts5_varint.c +** fts5_vocab.c +*/ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS5) #if !defined(NDEBUG) && !defined(SQLITE_DEBUG) @@ -232816,6 +232860,12 @@ SQLITE_API int sqlite3session_config(int op, void *pArg){ # undef NDEBUG #endif +#ifdef HAVE_STDINT_H +/* #include */ +#endif +#ifdef HAVE_INTTYPES_H +/* #include */ +#endif /* ** 2014 May 31 ** @@ -254888,7 +254938,7 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2024-10-21 16:30:22 03a9703e27c44437c39363d0baf82db4ebc94538a0f28411c85dda156f82636e", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2024-11-25 12:07:48 b95d11e958643b969c47a8e5857f3793b9e69700b8f1469371386369a26e577e", -1, SQLITE_TRANSIENT); } /* @@ -260079,7 +260129,7 @@ static int sqlite3Fts5VocabInit(Fts5Global *pGlobal, sqlite3 *db){ } - +/* Here ends the fts5.c composite file. */ #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS5) */ /************** End of fts5.c ************************************************/ diff --git a/deps/sqlite/sqlite3.h b/deps/sqlite/sqlite3.h index eaffd1ec167ad4..dbecc3fe896cf7 100644 --- a/deps/sqlite/sqlite3.h +++ b/deps/sqlite/sqlite3.h @@ -146,9 +146,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.47.0" -#define SQLITE_VERSION_NUMBER 3047000 -#define SQLITE_SOURCE_ID "2024-10-21 16:30:22 03a9703e27c44437c39363d0baf82db4ebc94538a0f28411c85dda156f82636e" +#define SQLITE_VERSION "3.47.1" +#define SQLITE_VERSION_NUMBER 3047001 +#define SQLITE_SOURCE_ID "2024-11-25 12:07:48 b95d11e958643b969c47a8e5857f3793b9e69700b8f1469371386369a26e577e" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -652,6 +652,13 @@ SQLITE_API int sqlite3_exec( ** filesystem supports doing multiple write operations atomically when those ** write operations are bracketed by [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE] and ** [SQLITE_FCNTL_COMMIT_ATOMIC_WRITE]. +** +** The SQLITE_IOCAP_SUBPAGE_READ property means that it is ok to read +** from the database file in amounts that are not a multiple of the +** page size and that do not begin at a page boundary. Without this +** property, SQLite is careful to only do full-page reads and write +** on aligned pages, with the one exception that it will do a sub-page +** read of the first page to access the database header. */ #define SQLITE_IOCAP_ATOMIC 0x00000001 #define SQLITE_IOCAP_ATOMIC512 0x00000002 @@ -668,6 +675,7 @@ SQLITE_API int sqlite3_exec( #define SQLITE_IOCAP_POWERSAFE_OVERWRITE 0x00001000 #define SQLITE_IOCAP_IMMUTABLE 0x00002000 #define SQLITE_IOCAP_BATCH_ATOMIC 0x00004000 +#define SQLITE_IOCAP_SUBPAGE_READ 0x00008000 /* ** CAPI3REF: File Locking Levels @@ -814,6 +822,7 @@ struct sqlite3_file { **
  • [SQLITE_IOCAP_POWERSAFE_OVERWRITE] **
  • [SQLITE_IOCAP_IMMUTABLE] **
  • [SQLITE_IOCAP_BATCH_ATOMIC] +**
  • [SQLITE_IOCAP_SUBPAGE_READ] ** ** ** The SQLITE_IOCAP_ATOMIC property means that all writes of diff --git a/deps/sqlite/sqlite3ext.h b/deps/sqlite/sqlite3ext.h new file mode 100644 index 00000000000000..ae0949baf75ae8 --- /dev/null +++ b/deps/sqlite/sqlite3ext.h @@ -0,0 +1,719 @@ +/* +** 2006 June 7 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This header file defines the SQLite interface for use by +** shared libraries that want to be imported as extensions into +** an SQLite instance. Shared libraries that intend to be loaded +** as extensions by SQLite should #include this file instead of +** sqlite3.h. +*/ +#ifndef SQLITE3EXT_H +#define SQLITE3EXT_H +#include "sqlite3.h" + +/* +** The following structure holds pointers to all of the SQLite API +** routines. +** +** WARNING: In order to maintain backwards compatibility, add new +** interfaces to the end of this structure only. If you insert new +** interfaces in the middle of this structure, then older different +** versions of SQLite will not be able to load each other's shared +** libraries! +*/ +struct sqlite3_api_routines { + void * (*aggregate_context)(sqlite3_context*,int nBytes); + int (*aggregate_count)(sqlite3_context*); + int (*bind_blob)(sqlite3_stmt*,int,const void*,int n,void(*)(void*)); + int (*bind_double)(sqlite3_stmt*,int,double); + int (*bind_int)(sqlite3_stmt*,int,int); + int (*bind_int64)(sqlite3_stmt*,int,sqlite_int64); + int (*bind_null)(sqlite3_stmt*,int); + int (*bind_parameter_count)(sqlite3_stmt*); + int (*bind_parameter_index)(sqlite3_stmt*,const char*zName); + const char * (*bind_parameter_name)(sqlite3_stmt*,int); + int (*bind_text)(sqlite3_stmt*,int,const char*,int n,void(*)(void*)); + int (*bind_text16)(sqlite3_stmt*,int,const void*,int,void(*)(void*)); + int (*bind_value)(sqlite3_stmt*,int,const sqlite3_value*); + int (*busy_handler)(sqlite3*,int(*)(void*,int),void*); + int (*busy_timeout)(sqlite3*,int ms); + int (*changes)(sqlite3*); + int (*close)(sqlite3*); + int (*collation_needed)(sqlite3*,void*,void(*)(void*,sqlite3*, + int eTextRep,const char*)); + int (*collation_needed16)(sqlite3*,void*,void(*)(void*,sqlite3*, + int eTextRep,const void*)); + const void * (*column_blob)(sqlite3_stmt*,int iCol); + int (*column_bytes)(sqlite3_stmt*,int iCol); + int (*column_bytes16)(sqlite3_stmt*,int iCol); + int (*column_count)(sqlite3_stmt*pStmt); + const char * (*column_database_name)(sqlite3_stmt*,int); + const void * (*column_database_name16)(sqlite3_stmt*,int); + const char * (*column_decltype)(sqlite3_stmt*,int i); + const void * (*column_decltype16)(sqlite3_stmt*,int); + double (*column_double)(sqlite3_stmt*,int iCol); + int (*column_int)(sqlite3_stmt*,int iCol); + sqlite_int64 (*column_int64)(sqlite3_stmt*,int iCol); + const char * (*column_name)(sqlite3_stmt*,int); + const void * (*column_name16)(sqlite3_stmt*,int); + const char * (*column_origin_name)(sqlite3_stmt*,int); + const void * (*column_origin_name16)(sqlite3_stmt*,int); + const char * (*column_table_name)(sqlite3_stmt*,int); + const void * (*column_table_name16)(sqlite3_stmt*,int); + const unsigned char * (*column_text)(sqlite3_stmt*,int iCol); + const void * (*column_text16)(sqlite3_stmt*,int iCol); + int (*column_type)(sqlite3_stmt*,int iCol); + sqlite3_value* (*column_value)(sqlite3_stmt*,int iCol); + void * (*commit_hook)(sqlite3*,int(*)(void*),void*); + int (*complete)(const char*sql); + int (*complete16)(const void*sql); + int (*create_collation)(sqlite3*,const char*,int,void*, + int(*)(void*,int,const void*,int,const void*)); + int (*create_collation16)(sqlite3*,const void*,int,void*, + int(*)(void*,int,const void*,int,const void*)); + int (*create_function)(sqlite3*,const char*,int,int,void*, + void (*xFunc)(sqlite3_context*,int,sqlite3_value**), + void (*xStep)(sqlite3_context*,int,sqlite3_value**), + void (*xFinal)(sqlite3_context*)); + int (*create_function16)(sqlite3*,const void*,int,int,void*, + void (*xFunc)(sqlite3_context*,int,sqlite3_value**), + void (*xStep)(sqlite3_context*,int,sqlite3_value**), + void (*xFinal)(sqlite3_context*)); + int (*create_module)(sqlite3*,const char*,const sqlite3_module*,void*); + int (*data_count)(sqlite3_stmt*pStmt); + sqlite3 * (*db_handle)(sqlite3_stmt*); + int (*declare_vtab)(sqlite3*,const char*); + int (*enable_shared_cache)(int); + int (*errcode)(sqlite3*db); + const char * (*errmsg)(sqlite3*); + const void * (*errmsg16)(sqlite3*); + int (*exec)(sqlite3*,const char*,sqlite3_callback,void*,char**); + int (*expired)(sqlite3_stmt*); + int (*finalize)(sqlite3_stmt*pStmt); + void (*free)(void*); + void (*free_table)(char**result); + int (*get_autocommit)(sqlite3*); + void * (*get_auxdata)(sqlite3_context*,int); + int (*get_table)(sqlite3*,const char*,char***,int*,int*,char**); + int (*global_recover)(void); + void (*interruptx)(sqlite3*); + sqlite_int64 (*last_insert_rowid)(sqlite3*); + const char * (*libversion)(void); + int (*libversion_number)(void); + void *(*malloc)(int); + char * (*mprintf)(const char*,...); + int (*open)(const char*,sqlite3**); + int (*open16)(const void*,sqlite3**); + int (*prepare)(sqlite3*,const char*,int,sqlite3_stmt**,const char**); + int (*prepare16)(sqlite3*,const void*,int,sqlite3_stmt**,const void**); + void * (*profile)(sqlite3*,void(*)(void*,const char*,sqlite_uint64),void*); + void (*progress_handler)(sqlite3*,int,int(*)(void*),void*); + void *(*realloc)(void*,int); + int (*reset)(sqlite3_stmt*pStmt); + void (*result_blob)(sqlite3_context*,const void*,int,void(*)(void*)); + void (*result_double)(sqlite3_context*,double); + void (*result_error)(sqlite3_context*,const char*,int); + void (*result_error16)(sqlite3_context*,const void*,int); + void (*result_int)(sqlite3_context*,int); + void (*result_int64)(sqlite3_context*,sqlite_int64); + void (*result_null)(sqlite3_context*); + void (*result_text)(sqlite3_context*,const char*,int,void(*)(void*)); + void (*result_text16)(sqlite3_context*,const void*,int,void(*)(void*)); + void (*result_text16be)(sqlite3_context*,const void*,int,void(*)(void*)); + void (*result_text16le)(sqlite3_context*,const void*,int,void(*)(void*)); + void (*result_value)(sqlite3_context*,sqlite3_value*); + void * (*rollback_hook)(sqlite3*,void(*)(void*),void*); + int (*set_authorizer)(sqlite3*,int(*)(void*,int,const char*,const char*, + const char*,const char*),void*); + void (*set_auxdata)(sqlite3_context*,int,void*,void (*)(void*)); + char * (*xsnprintf)(int,char*,const char*,...); + int (*step)(sqlite3_stmt*); + int (*table_column_metadata)(sqlite3*,const char*,const char*,const char*, + char const**,char const**,int*,int*,int*); + void (*thread_cleanup)(void); + int (*total_changes)(sqlite3*); + void * (*trace)(sqlite3*,void(*xTrace)(void*,const char*),void*); + int (*transfer_bindings)(sqlite3_stmt*,sqlite3_stmt*); + void * (*update_hook)(sqlite3*,void(*)(void*,int ,char const*,char const*, + sqlite_int64),void*); + void * (*user_data)(sqlite3_context*); + const void * (*value_blob)(sqlite3_value*); + int (*value_bytes)(sqlite3_value*); + int (*value_bytes16)(sqlite3_value*); + double (*value_double)(sqlite3_value*); + int (*value_int)(sqlite3_value*); + sqlite_int64 (*value_int64)(sqlite3_value*); + int (*value_numeric_type)(sqlite3_value*); + const unsigned char * (*value_text)(sqlite3_value*); + const void * (*value_text16)(sqlite3_value*); + const void * (*value_text16be)(sqlite3_value*); + const void * (*value_text16le)(sqlite3_value*); + int (*value_type)(sqlite3_value*); + char *(*vmprintf)(const char*,va_list); + /* Added ??? */ + int (*overload_function)(sqlite3*, const char *zFuncName, int nArg); + /* Added by 3.3.13 */ + int (*prepare_v2)(sqlite3*,const char*,int,sqlite3_stmt**,const char**); + int (*prepare16_v2)(sqlite3*,const void*,int,sqlite3_stmt**,const void**); + int (*clear_bindings)(sqlite3_stmt*); + /* Added by 3.4.1 */ + int (*create_module_v2)(sqlite3*,const char*,const sqlite3_module*,void*, + void (*xDestroy)(void *)); + /* Added by 3.5.0 */ + int (*bind_zeroblob)(sqlite3_stmt*,int,int); + int (*blob_bytes)(sqlite3_blob*); + int (*blob_close)(sqlite3_blob*); + int (*blob_open)(sqlite3*,const char*,const char*,const char*,sqlite3_int64, + int,sqlite3_blob**); + int (*blob_read)(sqlite3_blob*,void*,int,int); + int (*blob_write)(sqlite3_blob*,const void*,int,int); + int (*create_collation_v2)(sqlite3*,const char*,int,void*, + int(*)(void*,int,const void*,int,const void*), + void(*)(void*)); + int (*file_control)(sqlite3*,const char*,int,void*); + sqlite3_int64 (*memory_highwater)(int); + sqlite3_int64 (*memory_used)(void); + sqlite3_mutex *(*mutex_alloc)(int); + void (*mutex_enter)(sqlite3_mutex*); + void (*mutex_free)(sqlite3_mutex*); + void (*mutex_leave)(sqlite3_mutex*); + int (*mutex_try)(sqlite3_mutex*); + int (*open_v2)(const char*,sqlite3**,int,const char*); + int (*release_memory)(int); + void (*result_error_nomem)(sqlite3_context*); + void (*result_error_toobig)(sqlite3_context*); + int (*sleep)(int); + void (*soft_heap_limit)(int); + sqlite3_vfs *(*vfs_find)(const char*); + int (*vfs_register)(sqlite3_vfs*,int); + int (*vfs_unregister)(sqlite3_vfs*); + int (*xthreadsafe)(void); + void (*result_zeroblob)(sqlite3_context*,int); + void (*result_error_code)(sqlite3_context*,int); + int (*test_control)(int, ...); + void (*randomness)(int,void*); + sqlite3 *(*context_db_handle)(sqlite3_context*); + int (*extended_result_codes)(sqlite3*,int); + int (*limit)(sqlite3*,int,int); + sqlite3_stmt *(*next_stmt)(sqlite3*,sqlite3_stmt*); + const char *(*sql)(sqlite3_stmt*); + int (*status)(int,int*,int*,int); + int (*backup_finish)(sqlite3_backup*); + sqlite3_backup *(*backup_init)(sqlite3*,const char*,sqlite3*,const char*); + int (*backup_pagecount)(sqlite3_backup*); + int (*backup_remaining)(sqlite3_backup*); + int (*backup_step)(sqlite3_backup*,int); + const char *(*compileoption_get)(int); + int (*compileoption_used)(const char*); + int (*create_function_v2)(sqlite3*,const char*,int,int,void*, + void (*xFunc)(sqlite3_context*,int,sqlite3_value**), + void (*xStep)(sqlite3_context*,int,sqlite3_value**), + void (*xFinal)(sqlite3_context*), + void(*xDestroy)(void*)); + int (*db_config)(sqlite3*,int,...); + sqlite3_mutex *(*db_mutex)(sqlite3*); + int (*db_status)(sqlite3*,int,int*,int*,int); + int (*extended_errcode)(sqlite3*); + void (*log)(int,const char*,...); + sqlite3_int64 (*soft_heap_limit64)(sqlite3_int64); + const char *(*sourceid)(void); + int (*stmt_status)(sqlite3_stmt*,int,int); + int (*strnicmp)(const char*,const char*,int); + int (*unlock_notify)(sqlite3*,void(*)(void**,int),void*); + int (*wal_autocheckpoint)(sqlite3*,int); + int (*wal_checkpoint)(sqlite3*,const char*); + void *(*wal_hook)(sqlite3*,int(*)(void*,sqlite3*,const char*,int),void*); + int (*blob_reopen)(sqlite3_blob*,sqlite3_int64); + int (*vtab_config)(sqlite3*,int op,...); + int (*vtab_on_conflict)(sqlite3*); + /* Version 3.7.16 and later */ + int (*close_v2)(sqlite3*); + const char *(*db_filename)(sqlite3*,const char*); + int (*db_readonly)(sqlite3*,const char*); + int (*db_release_memory)(sqlite3*); + const char *(*errstr)(int); + int (*stmt_busy)(sqlite3_stmt*); + int (*stmt_readonly)(sqlite3_stmt*); + int (*stricmp)(const char*,const char*); + int (*uri_boolean)(const char*,const char*,int); + sqlite3_int64 (*uri_int64)(const char*,const char*,sqlite3_int64); + const char *(*uri_parameter)(const char*,const char*); + char *(*xvsnprintf)(int,char*,const char*,va_list); + int (*wal_checkpoint_v2)(sqlite3*,const char*,int,int*,int*); + /* Version 3.8.7 and later */ + int (*auto_extension)(void(*)(void)); + int (*bind_blob64)(sqlite3_stmt*,int,const void*,sqlite3_uint64, + void(*)(void*)); + int (*bind_text64)(sqlite3_stmt*,int,const char*,sqlite3_uint64, + void(*)(void*),unsigned char); + int (*cancel_auto_extension)(void(*)(void)); + int (*load_extension)(sqlite3*,const char*,const char*,char**); + void *(*malloc64)(sqlite3_uint64); + sqlite3_uint64 (*msize)(void*); + void *(*realloc64)(void*,sqlite3_uint64); + void (*reset_auto_extension)(void); + void (*result_blob64)(sqlite3_context*,const void*,sqlite3_uint64, + void(*)(void*)); + void (*result_text64)(sqlite3_context*,const char*,sqlite3_uint64, + void(*)(void*), unsigned char); + int (*strglob)(const char*,const char*); + /* Version 3.8.11 and later */ + sqlite3_value *(*value_dup)(const sqlite3_value*); + void (*value_free)(sqlite3_value*); + int (*result_zeroblob64)(sqlite3_context*,sqlite3_uint64); + int (*bind_zeroblob64)(sqlite3_stmt*, int, sqlite3_uint64); + /* Version 3.9.0 and later */ + unsigned int (*value_subtype)(sqlite3_value*); + void (*result_subtype)(sqlite3_context*,unsigned int); + /* Version 3.10.0 and later */ + int (*status64)(int,sqlite3_int64*,sqlite3_int64*,int); + int (*strlike)(const char*,const char*,unsigned int); + int (*db_cacheflush)(sqlite3*); + /* Version 3.12.0 and later */ + int (*system_errno)(sqlite3*); + /* Version 3.14.0 and later */ + int (*trace_v2)(sqlite3*,unsigned,int(*)(unsigned,void*,void*,void*),void*); + char *(*expanded_sql)(sqlite3_stmt*); + /* Version 3.18.0 and later */ + void (*set_last_insert_rowid)(sqlite3*,sqlite3_int64); + /* Version 3.20.0 and later */ + int (*prepare_v3)(sqlite3*,const char*,int,unsigned int, + sqlite3_stmt**,const char**); + int (*prepare16_v3)(sqlite3*,const void*,int,unsigned int, + sqlite3_stmt**,const void**); + int (*bind_pointer)(sqlite3_stmt*,int,void*,const char*,void(*)(void*)); + void (*result_pointer)(sqlite3_context*,void*,const char*,void(*)(void*)); + void *(*value_pointer)(sqlite3_value*,const char*); + int (*vtab_nochange)(sqlite3_context*); + int (*value_nochange)(sqlite3_value*); + const char *(*vtab_collation)(sqlite3_index_info*,int); + /* Version 3.24.0 and later */ + int (*keyword_count)(void); + int (*keyword_name)(int,const char**,int*); + int (*keyword_check)(const char*,int); + sqlite3_str *(*str_new)(sqlite3*); + char *(*str_finish)(sqlite3_str*); + void (*str_appendf)(sqlite3_str*, const char *zFormat, ...); + void (*str_vappendf)(sqlite3_str*, const char *zFormat, va_list); + void (*str_append)(sqlite3_str*, const char *zIn, int N); + void (*str_appendall)(sqlite3_str*, const char *zIn); + void (*str_appendchar)(sqlite3_str*, int N, char C); + void (*str_reset)(sqlite3_str*); + int (*str_errcode)(sqlite3_str*); + int (*str_length)(sqlite3_str*); + char *(*str_value)(sqlite3_str*); + /* Version 3.25.0 and later */ + int (*create_window_function)(sqlite3*,const char*,int,int,void*, + void (*xStep)(sqlite3_context*,int,sqlite3_value**), + void (*xFinal)(sqlite3_context*), + void (*xValue)(sqlite3_context*), + void (*xInv)(sqlite3_context*,int,sqlite3_value**), + void(*xDestroy)(void*)); + /* Version 3.26.0 and later */ + const char *(*normalized_sql)(sqlite3_stmt*); + /* Version 3.28.0 and later */ + int (*stmt_isexplain)(sqlite3_stmt*); + int (*value_frombind)(sqlite3_value*); + /* Version 3.30.0 and later */ + int (*drop_modules)(sqlite3*,const char**); + /* Version 3.31.0 and later */ + sqlite3_int64 (*hard_heap_limit64)(sqlite3_int64); + const char *(*uri_key)(const char*,int); + const char *(*filename_database)(const char*); + const char *(*filename_journal)(const char*); + const char *(*filename_wal)(const char*); + /* Version 3.32.0 and later */ + const char *(*create_filename)(const char*,const char*,const char*, + int,const char**); + void (*free_filename)(const char*); + sqlite3_file *(*database_file_object)(const char*); + /* Version 3.34.0 and later */ + int (*txn_state)(sqlite3*,const char*); + /* Version 3.36.1 and later */ + sqlite3_int64 (*changes64)(sqlite3*); + sqlite3_int64 (*total_changes64)(sqlite3*); + /* Version 3.37.0 and later */ + int (*autovacuum_pages)(sqlite3*, + unsigned int(*)(void*,const char*,unsigned int,unsigned int,unsigned int), + void*, void(*)(void*)); + /* Version 3.38.0 and later */ + int (*error_offset)(sqlite3*); + int (*vtab_rhs_value)(sqlite3_index_info*,int,sqlite3_value**); + int (*vtab_distinct)(sqlite3_index_info*); + int (*vtab_in)(sqlite3_index_info*,int,int); + int (*vtab_in_first)(sqlite3_value*,sqlite3_value**); + int (*vtab_in_next)(sqlite3_value*,sqlite3_value**); + /* Version 3.39.0 and later */ + int (*deserialize)(sqlite3*,const char*,unsigned char*, + sqlite3_int64,sqlite3_int64,unsigned); + unsigned char *(*serialize)(sqlite3*,const char *,sqlite3_int64*, + unsigned int); + const char *(*db_name)(sqlite3*,int); + /* Version 3.40.0 and later */ + int (*value_encoding)(sqlite3_value*); + /* Version 3.41.0 and later */ + int (*is_interrupted)(sqlite3*); + /* Version 3.43.0 and later */ + int (*stmt_explain)(sqlite3_stmt*,int); + /* Version 3.44.0 and later */ + void *(*get_clientdata)(sqlite3*,const char*); + int (*set_clientdata)(sqlite3*, const char*, void*, void(*)(void*)); +}; + +/* +** This is the function signature used for all extension entry points. It +** is also defined in the file "loadext.c". +*/ +typedef int (*sqlite3_loadext_entry)( + sqlite3 *db, /* Handle to the database. */ + char **pzErrMsg, /* Used to set error string on failure. */ + const sqlite3_api_routines *pThunk /* Extension API function pointers. */ +); + +/* +** The following macros redefine the API routines so that they are +** redirected through the global sqlite3_api structure. +** +** This header file is also used by the loadext.c source file +** (part of the main SQLite library - not an extension) so that +** it can get access to the sqlite3_api_routines structure +** definition. But the main library does not want to redefine +** the API. So the redefinition macros are only valid if the +** SQLITE_CORE macros is undefined. +*/ +#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) +#define sqlite3_aggregate_context sqlite3_api->aggregate_context +#ifndef SQLITE_OMIT_DEPRECATED +#define sqlite3_aggregate_count sqlite3_api->aggregate_count +#endif +#define sqlite3_bind_blob sqlite3_api->bind_blob +#define sqlite3_bind_double sqlite3_api->bind_double +#define sqlite3_bind_int sqlite3_api->bind_int +#define sqlite3_bind_int64 sqlite3_api->bind_int64 +#define sqlite3_bind_null sqlite3_api->bind_null +#define sqlite3_bind_parameter_count sqlite3_api->bind_parameter_count +#define sqlite3_bind_parameter_index sqlite3_api->bind_parameter_index +#define sqlite3_bind_parameter_name sqlite3_api->bind_parameter_name +#define sqlite3_bind_text sqlite3_api->bind_text +#define sqlite3_bind_text16 sqlite3_api->bind_text16 +#define sqlite3_bind_value sqlite3_api->bind_value +#define sqlite3_busy_handler sqlite3_api->busy_handler +#define sqlite3_busy_timeout sqlite3_api->busy_timeout +#define sqlite3_changes sqlite3_api->changes +#define sqlite3_close sqlite3_api->close +#define sqlite3_collation_needed sqlite3_api->collation_needed +#define sqlite3_collation_needed16 sqlite3_api->collation_needed16 +#define sqlite3_column_blob sqlite3_api->column_blob +#define sqlite3_column_bytes sqlite3_api->column_bytes +#define sqlite3_column_bytes16 sqlite3_api->column_bytes16 +#define sqlite3_column_count sqlite3_api->column_count +#define sqlite3_column_database_name sqlite3_api->column_database_name +#define sqlite3_column_database_name16 sqlite3_api->column_database_name16 +#define sqlite3_column_decltype sqlite3_api->column_decltype +#define sqlite3_column_decltype16 sqlite3_api->column_decltype16 +#define sqlite3_column_double sqlite3_api->column_double +#define sqlite3_column_int sqlite3_api->column_int +#define sqlite3_column_int64 sqlite3_api->column_int64 +#define sqlite3_column_name sqlite3_api->column_name +#define sqlite3_column_name16 sqlite3_api->column_name16 +#define sqlite3_column_origin_name sqlite3_api->column_origin_name +#define sqlite3_column_origin_name16 sqlite3_api->column_origin_name16 +#define sqlite3_column_table_name sqlite3_api->column_table_name +#define sqlite3_column_table_name16 sqlite3_api->column_table_name16 +#define sqlite3_column_text sqlite3_api->column_text +#define sqlite3_column_text16 sqlite3_api->column_text16 +#define sqlite3_column_type sqlite3_api->column_type +#define sqlite3_column_value sqlite3_api->column_value +#define sqlite3_commit_hook sqlite3_api->commit_hook +#define sqlite3_complete sqlite3_api->complete +#define sqlite3_complete16 sqlite3_api->complete16 +#define sqlite3_create_collation sqlite3_api->create_collation +#define sqlite3_create_collation16 sqlite3_api->create_collation16 +#define sqlite3_create_function sqlite3_api->create_function +#define sqlite3_create_function16 sqlite3_api->create_function16 +#define sqlite3_create_module sqlite3_api->create_module +#define sqlite3_create_module_v2 sqlite3_api->create_module_v2 +#define sqlite3_data_count sqlite3_api->data_count +#define sqlite3_db_handle sqlite3_api->db_handle +#define sqlite3_declare_vtab sqlite3_api->declare_vtab +#define sqlite3_enable_shared_cache sqlite3_api->enable_shared_cache +#define sqlite3_errcode sqlite3_api->errcode +#define sqlite3_errmsg sqlite3_api->errmsg +#define sqlite3_errmsg16 sqlite3_api->errmsg16 +#define sqlite3_exec sqlite3_api->exec +#ifndef SQLITE_OMIT_DEPRECATED +#define sqlite3_expired sqlite3_api->expired +#endif +#define sqlite3_finalize sqlite3_api->finalize +#define sqlite3_free sqlite3_api->free +#define sqlite3_free_table sqlite3_api->free_table +#define sqlite3_get_autocommit sqlite3_api->get_autocommit +#define sqlite3_get_auxdata sqlite3_api->get_auxdata +#define sqlite3_get_table sqlite3_api->get_table +#ifndef SQLITE_OMIT_DEPRECATED +#define sqlite3_global_recover sqlite3_api->global_recover +#endif +#define sqlite3_interrupt sqlite3_api->interruptx +#define sqlite3_last_insert_rowid sqlite3_api->last_insert_rowid +#define sqlite3_libversion sqlite3_api->libversion +#define sqlite3_libversion_number sqlite3_api->libversion_number +#define sqlite3_malloc sqlite3_api->malloc +#define sqlite3_mprintf sqlite3_api->mprintf +#define sqlite3_open sqlite3_api->open +#define sqlite3_open16 sqlite3_api->open16 +#define sqlite3_prepare sqlite3_api->prepare +#define sqlite3_prepare16 sqlite3_api->prepare16 +#define sqlite3_prepare_v2 sqlite3_api->prepare_v2 +#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2 +#define sqlite3_profile sqlite3_api->profile +#define sqlite3_progress_handler sqlite3_api->progress_handler +#define sqlite3_realloc sqlite3_api->realloc +#define sqlite3_reset sqlite3_api->reset +#define sqlite3_result_blob sqlite3_api->result_blob +#define sqlite3_result_double sqlite3_api->result_double +#define sqlite3_result_error sqlite3_api->result_error +#define sqlite3_result_error16 sqlite3_api->result_error16 +#define sqlite3_result_int sqlite3_api->result_int +#define sqlite3_result_int64 sqlite3_api->result_int64 +#define sqlite3_result_null sqlite3_api->result_null +#define sqlite3_result_text sqlite3_api->result_text +#define sqlite3_result_text16 sqlite3_api->result_text16 +#define sqlite3_result_text16be sqlite3_api->result_text16be +#define sqlite3_result_text16le sqlite3_api->result_text16le +#define sqlite3_result_value sqlite3_api->result_value +#define sqlite3_rollback_hook sqlite3_api->rollback_hook +#define sqlite3_set_authorizer sqlite3_api->set_authorizer +#define sqlite3_set_auxdata sqlite3_api->set_auxdata +#define sqlite3_snprintf sqlite3_api->xsnprintf +#define sqlite3_step sqlite3_api->step +#define sqlite3_table_column_metadata sqlite3_api->table_column_metadata +#define sqlite3_thread_cleanup sqlite3_api->thread_cleanup +#define sqlite3_total_changes sqlite3_api->total_changes +#define sqlite3_trace sqlite3_api->trace +#ifndef SQLITE_OMIT_DEPRECATED +#define sqlite3_transfer_bindings sqlite3_api->transfer_bindings +#endif +#define sqlite3_update_hook sqlite3_api->update_hook +#define sqlite3_user_data sqlite3_api->user_data +#define sqlite3_value_blob sqlite3_api->value_blob +#define sqlite3_value_bytes sqlite3_api->value_bytes +#define sqlite3_value_bytes16 sqlite3_api->value_bytes16 +#define sqlite3_value_double sqlite3_api->value_double +#define sqlite3_value_int sqlite3_api->value_int +#define sqlite3_value_int64 sqlite3_api->value_int64 +#define sqlite3_value_numeric_type sqlite3_api->value_numeric_type +#define sqlite3_value_text sqlite3_api->value_text +#define sqlite3_value_text16 sqlite3_api->value_text16 +#define sqlite3_value_text16be sqlite3_api->value_text16be +#define sqlite3_value_text16le sqlite3_api->value_text16le +#define sqlite3_value_type sqlite3_api->value_type +#define sqlite3_vmprintf sqlite3_api->vmprintf +#define sqlite3_vsnprintf sqlite3_api->xvsnprintf +#define sqlite3_overload_function sqlite3_api->overload_function +#define sqlite3_prepare_v2 sqlite3_api->prepare_v2 +#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2 +#define sqlite3_clear_bindings sqlite3_api->clear_bindings +#define sqlite3_bind_zeroblob sqlite3_api->bind_zeroblob +#define sqlite3_blob_bytes sqlite3_api->blob_bytes +#define sqlite3_blob_close sqlite3_api->blob_close +#define sqlite3_blob_open sqlite3_api->blob_open +#define sqlite3_blob_read sqlite3_api->blob_read +#define sqlite3_blob_write sqlite3_api->blob_write +#define sqlite3_create_collation_v2 sqlite3_api->create_collation_v2 +#define sqlite3_file_control sqlite3_api->file_control +#define sqlite3_memory_highwater sqlite3_api->memory_highwater +#define sqlite3_memory_used sqlite3_api->memory_used +#define sqlite3_mutex_alloc sqlite3_api->mutex_alloc +#define sqlite3_mutex_enter sqlite3_api->mutex_enter +#define sqlite3_mutex_free sqlite3_api->mutex_free +#define sqlite3_mutex_leave sqlite3_api->mutex_leave +#define sqlite3_mutex_try sqlite3_api->mutex_try +#define sqlite3_open_v2 sqlite3_api->open_v2 +#define sqlite3_release_memory sqlite3_api->release_memory +#define sqlite3_result_error_nomem sqlite3_api->result_error_nomem +#define sqlite3_result_error_toobig sqlite3_api->result_error_toobig +#define sqlite3_sleep sqlite3_api->sleep +#define sqlite3_soft_heap_limit sqlite3_api->soft_heap_limit +#define sqlite3_vfs_find sqlite3_api->vfs_find +#define sqlite3_vfs_register sqlite3_api->vfs_register +#define sqlite3_vfs_unregister sqlite3_api->vfs_unregister +#define sqlite3_threadsafe sqlite3_api->xthreadsafe +#define sqlite3_result_zeroblob sqlite3_api->result_zeroblob +#define sqlite3_result_error_code sqlite3_api->result_error_code +#define sqlite3_test_control sqlite3_api->test_control +#define sqlite3_randomness sqlite3_api->randomness +#define sqlite3_context_db_handle sqlite3_api->context_db_handle +#define sqlite3_extended_result_codes sqlite3_api->extended_result_codes +#define sqlite3_limit sqlite3_api->limit +#define sqlite3_next_stmt sqlite3_api->next_stmt +#define sqlite3_sql sqlite3_api->sql +#define sqlite3_status sqlite3_api->status +#define sqlite3_backup_finish sqlite3_api->backup_finish +#define sqlite3_backup_init sqlite3_api->backup_init +#define sqlite3_backup_pagecount sqlite3_api->backup_pagecount +#define sqlite3_backup_remaining sqlite3_api->backup_remaining +#define sqlite3_backup_step sqlite3_api->backup_step +#define sqlite3_compileoption_get sqlite3_api->compileoption_get +#define sqlite3_compileoption_used sqlite3_api->compileoption_used +#define sqlite3_create_function_v2 sqlite3_api->create_function_v2 +#define sqlite3_db_config sqlite3_api->db_config +#define sqlite3_db_mutex sqlite3_api->db_mutex +#define sqlite3_db_status sqlite3_api->db_status +#define sqlite3_extended_errcode sqlite3_api->extended_errcode +#define sqlite3_log sqlite3_api->log +#define sqlite3_soft_heap_limit64 sqlite3_api->soft_heap_limit64 +#define sqlite3_sourceid sqlite3_api->sourceid +#define sqlite3_stmt_status sqlite3_api->stmt_status +#define sqlite3_strnicmp sqlite3_api->strnicmp +#define sqlite3_unlock_notify sqlite3_api->unlock_notify +#define sqlite3_wal_autocheckpoint sqlite3_api->wal_autocheckpoint +#define sqlite3_wal_checkpoint sqlite3_api->wal_checkpoint +#define sqlite3_wal_hook sqlite3_api->wal_hook +#define sqlite3_blob_reopen sqlite3_api->blob_reopen +#define sqlite3_vtab_config sqlite3_api->vtab_config +#define sqlite3_vtab_on_conflict sqlite3_api->vtab_on_conflict +/* Version 3.7.16 and later */ +#define sqlite3_close_v2 sqlite3_api->close_v2 +#define sqlite3_db_filename sqlite3_api->db_filename +#define sqlite3_db_readonly sqlite3_api->db_readonly +#define sqlite3_db_release_memory sqlite3_api->db_release_memory +#define sqlite3_errstr sqlite3_api->errstr +#define sqlite3_stmt_busy sqlite3_api->stmt_busy +#define sqlite3_stmt_readonly sqlite3_api->stmt_readonly +#define sqlite3_stricmp sqlite3_api->stricmp +#define sqlite3_uri_boolean sqlite3_api->uri_boolean +#define sqlite3_uri_int64 sqlite3_api->uri_int64 +#define sqlite3_uri_parameter sqlite3_api->uri_parameter +#define sqlite3_uri_vsnprintf sqlite3_api->xvsnprintf +#define sqlite3_wal_checkpoint_v2 sqlite3_api->wal_checkpoint_v2 +/* Version 3.8.7 and later */ +#define sqlite3_auto_extension sqlite3_api->auto_extension +#define sqlite3_bind_blob64 sqlite3_api->bind_blob64 +#define sqlite3_bind_text64 sqlite3_api->bind_text64 +#define sqlite3_cancel_auto_extension sqlite3_api->cancel_auto_extension +#define sqlite3_load_extension sqlite3_api->load_extension +#define sqlite3_malloc64 sqlite3_api->malloc64 +#define sqlite3_msize sqlite3_api->msize +#define sqlite3_realloc64 sqlite3_api->realloc64 +#define sqlite3_reset_auto_extension sqlite3_api->reset_auto_extension +#define sqlite3_result_blob64 sqlite3_api->result_blob64 +#define sqlite3_result_text64 sqlite3_api->result_text64 +#define sqlite3_strglob sqlite3_api->strglob +/* Version 3.8.11 and later */ +#define sqlite3_value_dup sqlite3_api->value_dup +#define sqlite3_value_free sqlite3_api->value_free +#define sqlite3_result_zeroblob64 sqlite3_api->result_zeroblob64 +#define sqlite3_bind_zeroblob64 sqlite3_api->bind_zeroblob64 +/* Version 3.9.0 and later */ +#define sqlite3_value_subtype sqlite3_api->value_subtype +#define sqlite3_result_subtype sqlite3_api->result_subtype +/* Version 3.10.0 and later */ +#define sqlite3_status64 sqlite3_api->status64 +#define sqlite3_strlike sqlite3_api->strlike +#define sqlite3_db_cacheflush sqlite3_api->db_cacheflush +/* Version 3.12.0 and later */ +#define sqlite3_system_errno sqlite3_api->system_errno +/* Version 3.14.0 and later */ +#define sqlite3_trace_v2 sqlite3_api->trace_v2 +#define sqlite3_expanded_sql sqlite3_api->expanded_sql +/* Version 3.18.0 and later */ +#define sqlite3_set_last_insert_rowid sqlite3_api->set_last_insert_rowid +/* Version 3.20.0 and later */ +#define sqlite3_prepare_v3 sqlite3_api->prepare_v3 +#define sqlite3_prepare16_v3 sqlite3_api->prepare16_v3 +#define sqlite3_bind_pointer sqlite3_api->bind_pointer +#define sqlite3_result_pointer sqlite3_api->result_pointer +#define sqlite3_value_pointer sqlite3_api->value_pointer +/* Version 3.22.0 and later */ +#define sqlite3_vtab_nochange sqlite3_api->vtab_nochange +#define sqlite3_value_nochange sqlite3_api->value_nochange +#define sqlite3_vtab_collation sqlite3_api->vtab_collation +/* Version 3.24.0 and later */ +#define sqlite3_keyword_count sqlite3_api->keyword_count +#define sqlite3_keyword_name sqlite3_api->keyword_name +#define sqlite3_keyword_check sqlite3_api->keyword_check +#define sqlite3_str_new sqlite3_api->str_new +#define sqlite3_str_finish sqlite3_api->str_finish +#define sqlite3_str_appendf sqlite3_api->str_appendf +#define sqlite3_str_vappendf sqlite3_api->str_vappendf +#define sqlite3_str_append sqlite3_api->str_append +#define sqlite3_str_appendall sqlite3_api->str_appendall +#define sqlite3_str_appendchar sqlite3_api->str_appendchar +#define sqlite3_str_reset sqlite3_api->str_reset +#define sqlite3_str_errcode sqlite3_api->str_errcode +#define sqlite3_str_length sqlite3_api->str_length +#define sqlite3_str_value sqlite3_api->str_value +/* Version 3.25.0 and later */ +#define sqlite3_create_window_function sqlite3_api->create_window_function +/* Version 3.26.0 and later */ +#define sqlite3_normalized_sql sqlite3_api->normalized_sql +/* Version 3.28.0 and later */ +#define sqlite3_stmt_isexplain sqlite3_api->stmt_isexplain +#define sqlite3_value_frombind sqlite3_api->value_frombind +/* Version 3.30.0 and later */ +#define sqlite3_drop_modules sqlite3_api->drop_modules +/* Version 3.31.0 and later */ +#define sqlite3_hard_heap_limit64 sqlite3_api->hard_heap_limit64 +#define sqlite3_uri_key sqlite3_api->uri_key +#define sqlite3_filename_database sqlite3_api->filename_database +#define sqlite3_filename_journal sqlite3_api->filename_journal +#define sqlite3_filename_wal sqlite3_api->filename_wal +/* Version 3.32.0 and later */ +#define sqlite3_create_filename sqlite3_api->create_filename +#define sqlite3_free_filename sqlite3_api->free_filename +#define sqlite3_database_file_object sqlite3_api->database_file_object +/* Version 3.34.0 and later */ +#define sqlite3_txn_state sqlite3_api->txn_state +/* Version 3.36.1 and later */ +#define sqlite3_changes64 sqlite3_api->changes64 +#define sqlite3_total_changes64 sqlite3_api->total_changes64 +/* Version 3.37.0 and later */ +#define sqlite3_autovacuum_pages sqlite3_api->autovacuum_pages +/* Version 3.38.0 and later */ +#define sqlite3_error_offset sqlite3_api->error_offset +#define sqlite3_vtab_rhs_value sqlite3_api->vtab_rhs_value +#define sqlite3_vtab_distinct sqlite3_api->vtab_distinct +#define sqlite3_vtab_in sqlite3_api->vtab_in +#define sqlite3_vtab_in_first sqlite3_api->vtab_in_first +#define sqlite3_vtab_in_next sqlite3_api->vtab_in_next +/* Version 3.39.0 and later */ +#ifndef SQLITE_OMIT_DESERIALIZE +#define sqlite3_deserialize sqlite3_api->deserialize +#define sqlite3_serialize sqlite3_api->serialize +#endif +#define sqlite3_db_name sqlite3_api->db_name +/* Version 3.40.0 and later */ +#define sqlite3_value_encoding sqlite3_api->value_encoding +/* Version 3.41.0 and later */ +#define sqlite3_is_interrupted sqlite3_api->is_interrupted +/* Version 3.43.0 and later */ +#define sqlite3_stmt_explain sqlite3_api->stmt_explain +/* Version 3.44.0 and later */ +#define sqlite3_get_clientdata sqlite3_api->get_clientdata +#define sqlite3_set_clientdata sqlite3_api->set_clientdata +#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ + +#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) + /* This case when the file really is being compiled as a loadable + ** extension */ +# define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api=0; +# define SQLITE_EXTENSION_INIT2(v) sqlite3_api=v; +# define SQLITE_EXTENSION_INIT3 \ + extern const sqlite3_api_routines *sqlite3_api; +#else + /* This case when the file is being statically linked into the + ** application */ +# define SQLITE_EXTENSION_INIT1 /*no-op*/ +# define SQLITE_EXTENSION_INIT2(v) (void)v; /* unused parameter */ +# define SQLITE_EXTENSION_INIT3 /*no-op*/ +#endif + +#endif /* SQLITE3EXT_H */ diff --git a/deps/sqlite/unofficial.gni b/deps/sqlite/unofficial.gni index 6eda916ad23e07..b26e1335eac339 100644 --- a/deps/sqlite/unofficial.gni +++ b/deps/sqlite/unofficial.gni @@ -7,6 +7,10 @@ template("sqlite_gn_build") { config("sqlite_config") { include_dirs = [ "." ] + defines = [ + "SQLITE_ENABLE_SESSION", + "SQLITE_ENABLE_PREUPDATE_HOOK", + ] } gypi_values = exec_script("../../tools/gypi_to_gn.py", diff --git a/deps/zlib/README.chromium b/deps/zlib/README.chromium index 31b9d55860f9e5..92c5bfd1af200e 100644 --- a/deps/zlib/README.chromium +++ b/deps/zlib/README.chromium @@ -2,6 +2,7 @@ Name: zlib Short Name: zlib URL: http://zlib.net/ Version: 1.3.0.1 +Revision: ac8f12c97d1afd9bafa9c710f827d40a407d3266 CPEPrefix: cpe:/a:zlib:zlib:1.3.0.1 Security Critical: yes Shipped: yes diff --git a/deps/zlib/google/zip_internal.cc b/deps/zlib/google/zip_internal.cc index aa49f4546caa0e..e6b5a4fc3bcb00 100644 --- a/deps/zlib/google/zip_internal.cc +++ b/deps/zlib/google/zip_internal.cc @@ -165,8 +165,7 @@ struct ZipBuffer { // writing compressed data and it returns NULL for this case.) void* OpenZipBuffer(void* opaque, const void* /*filename*/, int mode) { if ((mode & ZLIB_FILEFUNC_MODE_READWRITEFILTER) != ZLIB_FILEFUNC_MODE_READ) { - NOTREACHED_IN_MIGRATION(); - return NULL; + NOTREACHED(); } ZipBuffer* buffer = static_cast(opaque); if (!buffer || !buffer->data || !buffer->length) @@ -196,8 +195,7 @@ uLong WriteZipBuffer(void* /*opaque*/, void* /*stream*/, const void* /*buf*/, uLong /*size*/) { - NOTREACHED_IN_MIGRATION(); - return 0; + NOTREACHED(); } // Returns the offset from the beginning of the data. @@ -228,8 +226,7 @@ long SeekZipBuffer(void* opaque, buffer->offset = std::min(buffer->length, offset); return 0; } - NOTREACHED_IN_MIGRATION(); - return -1; + NOTREACHED(); } // Closes the input offset and deletes all resources used for compressing or diff --git a/deps/zlib/google/zip_reader_unittest.cc b/deps/zlib/google/zip_reader_unittest.cc index 9d1406feff9887..46c0beb1453237 100644 --- a/deps/zlib/google/zip_reader_unittest.cc +++ b/deps/zlib/google/zip_reader_unittest.cc @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -555,10 +556,10 @@ TEST_F(ZipReaderTest, ExtractToFileAsync_RegularFile) { const std::string md5 = base::MD5String(output); EXPECT_EQ(kQuuxExpectedMD5, md5); - int64_t file_size = 0; - ASSERT_TRUE(base::GetFileSize(target_file, &file_size)); + std::optional file_size = base::GetFileSize(target_file); + ASSERT_TRUE(file_size.has_value()); - EXPECT_EQ(file_size, listener.current_progress()); + EXPECT_EQ(file_size.value(), listener.current_progress()); } TEST_F(ZipReaderTest, ExtractToFileAsync_Encrypted_NoPassword) { diff --git a/deps/zlib/google/zip_unittest.cc b/deps/zlib/google/zip_unittest.cc index 58bafb809d6bf9..2bcfa309281fce 100644 --- a/deps/zlib/google/zip_unittest.cc +++ b/deps/zlib/google/zip_unittest.cc @@ -63,8 +63,9 @@ bool CreateFile(const std::string& content, if (!base::CreateTemporaryFile(file_path)) return false; - if (base::WriteFile(*file_path, content.data(), content.size()) == -1) + if (!base::WriteFile(*file_path, content)) { return false; + } *file = base::File( *file_path, base::File::Flags::FLAG_OPEN | base::File::Flags::FLAG_READ); @@ -350,7 +351,7 @@ class ZipTest : public PlatformTest { base::Time now_time; EXPECT_TRUE(base::Time::FromUTCExploded(now_parts, &now_time)); - EXPECT_EQ(1, base::WriteFile(src_file, "1", 1)); + EXPECT_TRUE(base::WriteFile(src_file, "1")); EXPECT_TRUE(base::TouchFile(src_file, base::Time::Now(), test_mtime)); EXPECT_TRUE(zip::Zip(src_dir, zip_file, true)); @@ -748,6 +749,8 @@ TEST_F(ZipTest, UnzipMixedPaths) { "Space→", // #else " ", // + "...", // Disappears on Windows + "....", // Disappears on Windows "AUX", // Disappears on Windows "COM1", // Disappears on Windows "COM2", // Disappears on Windows @@ -1113,9 +1116,9 @@ TEST_F(ZipTest, UnzipFilesWithIncorrectSize) { SCOPED_TRACE(base::StringPrintf("Processing %d.txt", i)); base::FilePath file_path = temp_dir.AppendASCII(base::StringPrintf("%d.txt", i)); - int64_t file_size = -1; - EXPECT_TRUE(base::GetFileSize(file_path, &file_size)); - EXPECT_EQ(static_cast(i), file_size); + std::optional file_size = base::GetFileSize(file_path); + EXPECT_TRUE(file_size.has_value()); + EXPECT_EQ(static_cast(i), file_size.value()); } } @@ -1306,10 +1309,10 @@ TEST_F(ZipTest, Compressed) { // Since the source files compress well, the destination ZIP file should be // smaller than the source files. - int64_t dest_file_size; - ASSERT_TRUE(base::GetFileSize(dest_file, &dest_file_size)); - EXPECT_GT(dest_file_size, 300); - EXPECT_LT(dest_file_size, 1000); + std::optional dest_file_size = base::GetFileSize(dest_file); + ASSERT_TRUE(dest_file_size.has_value()); + EXPECT_GT(dest_file_size.value(), 300); + EXPECT_LT(dest_file_size.value(), 1000); } // Tests that a ZIP put inside a ZIP is simply stored instead of being @@ -1338,10 +1341,10 @@ TEST_F(ZipTest, NestedZip) { // Since the dummy source (inner) ZIP file should simply be stored in the // destination (outer) ZIP file, the destination file should be bigger than // the source file, but not much bigger. - int64_t dest_file_size; - ASSERT_TRUE(base::GetFileSize(dest_file, &dest_file_size)); - EXPECT_GT(dest_file_size, src_size + 100); - EXPECT_LT(dest_file_size, src_size + 300); + std::optional dest_file_size = base::GetFileSize(dest_file); + ASSERT_TRUE(dest_file_size.has_value()); + EXPECT_GT(dest_file_size.value(), src_size + 100); + EXPECT_LT(dest_file_size.value(), src_size + 300); } // Tests that there is no 2GB or 4GB limits. Tests that big files can be zipped @@ -1402,10 +1405,10 @@ TEST_F(ZipTest, BigFile) { // Since the dummy source (inner) ZIP file should simply be stored in the // destination (outer) ZIP file, the destination file should be bigger than // the source file, but not much bigger. - int64_t dest_file_size; - ASSERT_TRUE(base::GetFileSize(dest_file, &dest_file_size)); - EXPECT_GT(dest_file_size, src_size + 100); - EXPECT_LT(dest_file_size, src_size + 300); + std::optional dest_file_size = base::GetFileSize(dest_file); + ASSERT_TRUE(dest_file_size.has_value()); + EXPECT_GT(dest_file_size.value(), src_size + 100); + EXPECT_LT(dest_file_size.value(), src_size + 300); LOG(INFO) << "Reading big ZIP " << dest_file; zip::ZipReader reader; diff --git a/deps/zlib/google/zip_writer.cc b/deps/zlib/google/zip_writer.cc index 31161ae86c3b7a..34ab0ad9ef2887 100644 --- a/deps/zlib/google/zip_writer.cc +++ b/deps/zlib/google/zip_writer.cc @@ -5,6 +5,7 @@ #include "third_party/zlib/google/zip_writer.h" #include +#include #include "base/files/file.h" #include "base/logging.h" @@ -193,8 +194,8 @@ bool ZipWriter::AddMixedEntries(Paths paths) { while (!paths.empty()) { // Work with chunks of 50 paths at most. const size_t n = std::min(paths.size(), 50); - const Paths relative_paths = paths.subspan(0, n); - paths = paths.subspan(n, paths.size() - n); + Paths relative_paths; + std::tie(relative_paths, paths) = paths.split_at(n); files.clear(); if (!file_accessor_->Open(relative_paths, &files) || files.size() != n) @@ -233,8 +234,8 @@ bool ZipWriter::AddFileEntries(Paths paths) { while (!paths.empty()) { // Work with chunks of 50 paths at most. const size_t n = std::min(paths.size(), 50); - const Paths relative_paths = paths.subspan(0, n); - paths = paths.subspan(n, paths.size() - n); + Paths relative_paths; + std::tie(relative_paths, paths) = paths.split_at(n); DCHECK_EQ(relative_paths.size(), n); diff --git a/doc/api/assert.md b/doc/api/assert.md index 70f4ac6c6db5bd..444815e0041432 100644 --- a/doc/api/assert.md +++ b/doc/api/assert.md @@ -2548,6 +2548,96 @@ assert.throws(throwingFirst, /Second$/); Due to the confusing error-prone notation, avoid a string as the second argument. +## `assert.partialDeepStrictEqual(actual, expected[, message])` + + + +> Stability: 1.0 - Early development + +* `actual` {any} +* `expected` {any} +* `message` {string|Error} + +[`assert.partialDeepStrictEqual()`][] Asserts the equivalence between the `actual` and `expected` parameters through a +deep comparison, ensuring that all properties in the `expected` parameter are +present in the `actual` parameter with equivalent values, not allowing type coercion. +The main difference with [`assert.deepStrictEqual()`][] is that [`assert.partialDeepStrictEqual()`][] does not require +all properties in the `actual` parameter to be present in the `expected` parameter. +This method should always pass the same test cases as [`assert.deepStrictEqual()`][], behaving as a super set of it. + +```mjs +import assert from 'node:assert'; + +assert.partialDeepStrictEqual({ a: 1, b: 2 }, { a: 1, b: 2 }); +// OK + +assert.partialDeepStrictEqual({ a: { b: { c: 1 } } }, { a: { b: { c: 1 } } }); +// OK + +assert.partialDeepStrictEqual({ a: 1, b: 2, c: 3 }, { a: 1, b: 2 }); +// OK + +assert.partialDeepStrictEqual(new Set(['value1', 'value2']), new Set(['value1', 'value2'])); +// OK + +assert.partialDeepStrictEqual(new Map([['key1', 'value1']]), new Map([['key1', 'value1']])); +// OK + +assert.partialDeepStrictEqual(new Uint8Array([1, 2, 3]), new Uint8Array([1, 2, 3])); +// OK + +assert.partialDeepStrictEqual(/abc/, /abc/); +// OK + +assert.partialDeepStrictEqual([{ a: 5 }, { b: 5 }], [{ a: 5 }]); +// OK + +assert.partialDeepStrictEqual(new Set([{ a: 1 }, { b: 1 }]), new Set([{ a: 1 }])); +// OK + +assert.partialDeepStrictEqual(new Date(0), new Date(0)); +// OK + +assert.partialDeepStrictEqual({ a: 1 }, { a: 1, b: 2 }); +// AssertionError + +assert.partialDeepStrictEqual({ a: 1, b: '2' }, { a: 1, b: 2 }); +// AssertionError + +assert.partialDeepStrictEqual({ a: { b: 2 } }, { a: { b: '2' } }); +// AssertionError +``` + +```cjs +const assert = require('node:assert'); + +assert.partialDeepStrictEqual({ a: 1, b: 2 }, { a: 1, b: 2 }); +// OK + +assert.partialDeepStrictEqual({ a: { b: { c: 1 } } }, { a: { b: { c: 1 } } }); +// OK + +assert.partialDeepStrictEqual({ a: 1, b: 2, c: 3 }, { a: 1, b: 2 }); +// OK + +assert.partialDeepStrictEqual([{ a: 5 }, { b: 5 }], [{ a: 5 }]); +// OK + +assert.partialDeepStrictEqual(new Set([{ a: 1 }, { b: 1 }]), new Set([{ a: 1 }])); +// OK + +assert.partialDeepStrictEqual({ a: 1 }, { a: 1, b: 2 }); +// AssertionError + +assert.partialDeepStrictEqual({ a: 1, b: '2' }, { a: 1, b: 2 }); +// AssertionError + +assert.partialDeepStrictEqual({ a: { b: 2 } }, { a: { b: '2' } }); +// AssertionError +``` + [Object wrappers]: https://developer.mozilla.org/en-US/docs/Glossary/Primitive#Primitive_wrapper_objects_in_JavaScript [Object.prototype.toString()]: https://tc39.github.io/ecma262/#sec-object.prototype.tostring [`!=` operator]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Inequality @@ -2576,6 +2666,7 @@ argument. [`assert.notEqual()`]: #assertnotequalactual-expected-message [`assert.notStrictEqual()`]: #assertnotstrictequalactual-expected-message [`assert.ok()`]: #assertokvalue-message +[`assert.partialDeepStrictEqual()`]: #assertpartialdeepstrictequalactual-expected-message [`assert.strictEqual()`]: #assertstrictequalactual-expected-message [`assert.throws()`]: #assertthrowsfn-error-message [`getColorDepth()`]: tty.md#writestreamgetcolordepthenv diff --git a/doc/api/async_context.md b/doc/api/async_context.md index d1be2fb3807e17..6b018bc9d4e292 100644 --- a/doc/api/async_context.md +++ b/doc/api/async_context.md @@ -626,7 +626,6 @@ a Worker pool around it could use the following structure: ```mjs import { AsyncResource } from 'node:async_hooks'; import { EventEmitter } from 'node:events'; -import path from 'node:path'; import { Worker } from 'node:worker_threads'; const kTaskInfo = Symbol('kTaskInfo'); diff --git a/doc/api/child_process.md b/doc/api/child_process.md index 0c0c80b35dc40c..7a9345416e0545 100644 --- a/doc/api/child_process.md +++ b/doc/api/child_process.md @@ -1700,8 +1700,8 @@ may not actually terminate the process. See kill(2) for reference. On Windows, where POSIX signals do not exist, the `signal` argument will be -ignored, and the process will be killed forcefully and abruptly (similar to -`'SIGKILL'`). +ignored except for `'SIGKILL'`, `'SIGTERM'`, `'SIGINT'` and `'SIGQUIT'`, and the +process will always be killed forcefully and abruptly (similar to `'SIGKILL'`). See [Signal Events][] for more details. On Linux, child processes of child processes will not be terminated diff --git a/doc/api/cli.md b/doc/api/cli.md index 3440fbc160b25b..8a4f376360008f 100644 --- a/doc/api/cli.md +++ b/doc/api/cli.md @@ -25,16 +25,14 @@ For more info about `node inspect`, see the [debugger][] documentation. The program entry point is a specifier-like string. If the string is not an absolute path, it's resolved as a relative path from the current working -directory. That path is then resolved by [CommonJS][] module loader, or by the -[ES module loader][Modules loaders] if [`--experimental-default-type=module`][] -is passed. If no corresponding file is found, an error is thrown. +directory. That path is then resolved by [CommonJS][] module loader. If no +corresponding file is found, an error is thrown. If a file is found, its path will be passed to the [ES module loader][Modules loaders] under any of the following conditions: * The program was started with a command-line flag that forces the entry - point to be loaded with ECMAScript module loader, such as `--import` or - [`--experimental-default-type=module`][]. + point to be loaded with ECMAScript module loader, such as `--import`. * The file has an `.mjs` extension. * The file does not have a `.cjs` extension, and the nearest parent `package.json` file contains a top-level [`"type"`][] field with a value of @@ -47,9 +45,8 @@ Otherwise, the file is loaded using the CommonJS module loader. See When loading, the [ES module loader][Modules loaders] loads the program entry point, the `node` command will accept as input only files with `.js`, -`.mjs`, or `.cjs` extensions; with `.wasm` extensions when -[`--experimental-wasm-modules`][] is enabled; and with no extension when -[`--experimental-default-type=module`][] is passed. +`.mjs`, or `.cjs` extensions; and with `.wasm` extensions when +[`--experimental-wasm-modules`][] is enabled. ## Options @@ -208,23 +205,18 @@ The valid arguments for the `--allow-fs-read` flag are: * Multiple paths can be allowed using multiple `--allow-fs-read` flags. Example `--allow-fs-read=/folder1/ --allow-fs-read=/folder1/` -Paths delimited by comma (`,`) are no longer allowed. -When passing a single flag with a comma a warning will be displayed. - Examples can be found in the [File System Permissions][] documentation. -Relative paths are NOT yet supported by the CLI flag. - The initializer module also needs to be allowed. Consider the following example: ```console -$ node --experimental-permission t.js +$ node --experimental-permission index.js Error: Access to this API has been restricted at node:internal/main/run_main_module:23:47 { code: 'ERR_ACCESS_DENIED', permission: 'FileSystemRead', - resource: '/Users/rafaelgss/repos/os/node/t.js' + resource: '/Users/rafaelgss/repos/os/node/index.js' } ``` @@ -260,8 +252,6 @@ When passing a single flag with a comma a warning will be displayed. Examples can be found in the [File System Permissions][] documentation. -Relative paths are NOT supported through the CLI flag. - ### `--allow-wasi` + +Disable the `Object.prototype.__proto__` property. If `mode` is `delete`, the +property is removed entirely. If `mode` is `throw`, accesses to the +property throw an exception with the code `ERR_PROTO_ACCESS`. + ### `--disable-warning=code-or-type` > Stability: 1.1 - Active development @@ -657,18 +659,6 @@ users can at least run WebAssembly (with less optimal performance) when the virtual memory address space available to their Node.js process is lower than what the V8 WebAssembly memory cage needs. -### `--disable-proto=mode` - - - -Disable the `Object.prototype.__proto__` property. If `mode` is `delete`, the -property is removed entirely. If `mode` is `throw`, accesses to the -property throw an exception with the code `ERR_PROTO_ACCESS`. - ### `--disallow-code-generation-from-strings` - -> Stability: 1 - Experimental. This flag is inherited from V8 and is subject to -> change upstream. - -This flag will expose the gc extension from V8. - -```js -if (globalThis.gc) { - globalThis.gc(); -} -``` - ### `--dns-result-order=order` + +Behavior is the same as [`--env-file`][], but an error is not thrown if the file +does not exist. + ### `--env-file=config` > Stability: 1.1 - Active development @@ -870,15 +850,6 @@ export USERNAME="nodejs" # will result in `nodejs` as the value. If you want to load environment variables from a file that may not exist, you can use the [`--env-file-if-exists`][] flag instead. -### `--env-file-if-exists=config` - - - -Behavior is the same as [`--env-file`][], but an error is not thrown if the file -does not exist. - ### `-e`, `--eval "script"` - -> Stability: 1.0 - Early development - -Define which module system, `module` or `commonjs`, to use for the following: - -* String input provided via `--eval` or STDIN, if `--input-type` is unspecified. - -* Files ending in `.js` or with no extension, if there is no `package.json` file - present in the same folder or any parent folder. - -* Files ending in `.js` or with no extension, if the nearest parent - `package.json` field lacks a `"type"` field; unless the `package.json` folder - or any parent folder is inside a `node_modules` folder. - -In other words, `--experimental-default-type=module` flips all the places where -Node.js currently defaults to CommonJS to instead default to ECMAScript modules, -with the exception of folders and subfolders below `node_modules`, for backward -compatibility. - -Under `--experimental-default-type=module` and `--experimental-wasm-modules`, -files with no extension will be treated as WebAssembly if they begin with the -WebAssembly magic number (`\0asm`); otherwise they will be treated as ES module -JavaScript. - -### `--experimental-transform-types` - - - -> Stability: 1.1 - Active development - -Enables the transformation of TypeScript-only syntax into JavaScript code. -Implies `--experimental-strip-types` and `--enable-source-maps`. - ### `--experimental-eventsource` + +If the ES module being `require()`'d contains top-level `await`, this flag +allows Node.js to evaluate the module, try to locate the +top-level awaits, and print their location to help users find them. + ### `--experimental-require-module` - -Enable the experimental [`node:sqlite`][] module. - ### `--experimental-strip-types` -> Stability: 1.0 - Early development +> Stability: 1.1 - Active development -Enable [snapshot testing][] in the test runner. +Enables the transformation of TypeScript-only syntax into JavaScript code. +Implies `--experimental-strip-types` and `--enable-source-maps`. ### `--experimental-vm-modules` @@ -1198,6 +1131,25 @@ added: v22.4.0 Enable experimental [`Web Storage`][] support. +### `--expose-gc` + + + +> Stability: 1 - Experimental. This flag is inherited from V8 and is subject to +> change upstream. + +This flag will expose the gc extension from V8. + +```js +if (globalThis.gc) { + globalThis.gc(); +} +``` + ### `--force-context-aware` - -Activate inspector on `host:port`. Default is `127.0.0.1:9229`. If port `0` is -specified, a random available port will be used. - -V8 inspector integration allows tools such as Chrome DevTools and IDEs to debug -and profile Node.js instances. The tools attach to Node.js instances via a -tcp port and communicate using the [Chrome DevTools Protocol][]. -See [V8 Inspector integration for Node.js][] for further explanation on Node.js debugger. - @@ -1558,6 +1496,20 @@ a random available port will be used. See [V8 Inspector integration for Node.js][] for further explanation on Node.js debugger. +### `--inspect[=[host:]port]` + + + +Activate inspector on `host:port`. Default is `127.0.0.1:9229`. If port `0` is +specified, a random available port will be used. + +V8 inspector integration allows tools such as Chrome DevTools and IDEs to debug +and profile Node.js instances. The tools attach to Node.js instances via a +tcp port and communicate using the [Chrome DevTools Protocol][]. +See [V8 Inspector integration for Node.js][] for further explanation on Node.js debugger. + ### `-i`, `--interactive` + +Disable the experimental [`node:sqlite`][] module. + ### `--no-experimental-websocket` - -If the ES module being `require()`'d contains top-level `await`, this flag -allows Node.js to evaluate the module, try to locate the -top-level awaits, and print their location to help users find them. - ### `--prof` + +When using `--secure-heap`, the `--secure-heap-min` flag specifies the +minimum allocation from the secure heap. The minimum value is `2`. +The maximum value is the lesser of `--secure-heap` or `2147483647`. +The value given must be a power of two. + ### `--secure-heap=n` - -When using `--secure-heap`, the `--secure-heap-min` flag specifies the -minimum allocation from the secure heap. The minimum value is `2`. -The maximum value is the lesser of `--secure-heap` or `2147483647`. -The value given must be a power of two. - ### `--snapshot-blob=path` -> Stability: 1.0 - Early development - Regenerates the snapshot files used by the test runner for [snapshot testing][]. -Node.js must be started with the `--experimental-test-snapshots` flag in order -to use this functionality. ### `--throw-deprecation` @@ -2585,6 +2537,45 @@ added: v0.8.0 Print stack traces for deprecations. +### `--trace-env` + + + +Print information about any access to environment variables done in the current Node.js +instance to stderr, including: + +* The environment variable reads that Node.js does internally. +* Writes in the form of `process.env.KEY = "SOME VALUE"`. +* Reads in the form of `process.env.KEY`. +* Definitions in the form of `Object.defineProperty(process.env, 'KEY', {...})`. +* Queries in the form of `Object.hasOwn(process.env, 'KEY')`, + `process.env.hasOwnProperty('KEY')` or `'KEY' in process.env`. +* Deletions in the form of `delete process.env.KEY`. +* Enumerations inf the form of `...process.env` or `Object.keys(process.env)`. + +Only the names of the environment variables being accessed are printed. The values are not printed. + +To print the stack trace of the access, use `--trace-env-js-stack` and/or +`--trace-env-native-stack`. + +### `--trace-env-js-stack` + + + +In addition to what `--trace-env` does, this prints the JavaScript stack trace of the access. + +### `--trace-env-native-stack` + + + +In addition to what `--trace-env` does, this prints the native stack trace of the access. + ### `--trace-event-categories` @@ -3543,6 +3526,16 @@ for MiB in 16 32 64 128; do done ``` +### `--perf-basic-prof` + +### `--perf-basic-prof-only-functions` + +### `--perf-prof` + +### `--perf-prof-unwinding-info` + +### `--prof` + ### `--security-revert` ### `--stack-trace-limit=limit` @@ -3592,7 +3585,6 @@ node --stack-trace-limit=12 -p -e "Error.stackTraceLimit" # prints 12 [`--diagnostic-dir`]: #--diagnostic-dirdirectory [`--env-file-if-exists`]: #--env-file-if-existsconfig [`--env-file`]: #--env-fileconfig -[`--experimental-default-type=module`]: #--experimental-default-typetype [`--experimental-sea-config`]: single-executable-applications.md#generating-single-executable-preparation-blobs [`--experimental-strip-types`]: #--experimental-strip-types [`--experimental-wasm-modules`]: #--experimental-wasm-modules diff --git a/doc/api/crypto.md b/doc/api/crypto.md index 788c52d8864660..ecd379f694e441 100644 --- a/doc/api/crypto.md +++ b/doc/api/crypto.md @@ -2059,6 +2059,22 @@ types are: This property is `undefined` for unrecognized `KeyObject` types and symmetric keys. +### `keyObject.equals(otherKeyObject)` + + + +* `otherKeyObject`: {KeyObject} A `KeyObject` with which to + compare `keyObject`. +* Returns: {boolean} + +Returns `true` or `false` depending on whether the keys have exactly the same +type, value, and parameters. This method is not +[constant time](https://en.wikipedia.org/wiki/Timing_attack). + ### `keyObject.export([options])` - -* `otherKeyObject`: {KeyObject} A `KeyObject` with which to - compare `keyObject`. -* Returns: {boolean} - -Returns `true` or `false` depending on whether the keys have exactly the same -type, value, and parameters. This method is not -[constant time](https://en.wikipedia.org/wiki/Timing_attack). - ### `keyObject.symmetricKeySize` + +* Type: {string\[]} + +An array detailing the key extended usages for this certificate. + ### `x509.fingerprint` - -* Type: {string\[]} - -An array detailing the key extended usages for this certificate. - ### `x509.publicKey` - -* {Object} - -An object containing commonly used constants for crypto and security related -operations. The specific constants currently defined are described in -[Crypto constants][]. - -### `crypto.fips` - - - -> Stability: 0 - Deprecated - -Property for checking and controlling whether a FIPS compliant crypto provider -is currently in use. Setting to true requires a FIPS build of Node.js. - -This property is deprecated. Please use `crypto.setFips()` and -`crypto.getFips()` instead. - ### `crypto.checkPrime(candidate[, options], callback)` + +* {Object} + +An object containing commonly used constants for crypto and security related +operations. The specific constants currently defined are described in +[Crypto constants][]. + ### `crypto.createCipheriv(algorithm, key, iv[, options])` -> Stability: 1.2 - Release candidate - -* `algorithm` {string|undefined} -* `data` {string|Buffer|TypedArray|DataView} When `data` is a - string, it will be encoded as UTF-8 before being hashed. If a different - input encoding is desired for a string input, user could encode the string - into a `TypedArray` using either `TextEncoder` or `Buffer.from()` and passing - the encoded `TypedArray` into this API instead. -* `outputEncoding` {string|undefined} [Encoding][encoding] used to encode the - returned digest. **Default:** `'hex'`. -* Returns: {string|Buffer} - -A utility for creating one-shot hash digests of data. It can be faster than -the object-based `crypto.createHash()` when hashing a smaller amount of data -(<= 5MB) that's readily available. If the data can be big or if it is streamed, -it's still recommended to use `crypto.createHash()` instead. - -The `algorithm` is dependent on the available algorithms supported by the -version of OpenSSL on the platform. Examples are `'sha256'`, `'sha512'`, etc. -On recent releases of OpenSSL, `openssl list -digest-algorithms` will -display the available digest algorithms. - -Example: - -```cjs -const crypto = require('node:crypto'); -const { Buffer } = require('node:buffer'); - -// Hashing a string and return the result as a hex-encoded string. -const string = 'Node.js'; -// 10b3493287f831e81a438811a1ffba01f8cec4b7 -console.log(crypto.hash('sha1', string)); - -// Encode a base64-encoded string into a Buffer, hash it and return -// the result as a buffer. -const base64 = 'Tm9kZS5qcw=='; -// -console.log(crypto.hash('sha1', Buffer.from(base64, 'base64'), 'buffer')); -``` - -```mjs -import crypto from 'node:crypto'; -import { Buffer } from 'node:buffer'; +> Stability: 0 - Deprecated -// Hashing a string and return the result as a hex-encoded string. -const string = 'Node.js'; -// 10b3493287f831e81a438811a1ffba01f8cec4b7 -console.log(crypto.hash('sha1', string)); +Property for checking and controlling whether a FIPS compliant crypto provider +is currently in use. Setting to true requires a FIPS build of Node.js. -// Encode a base64-encoded string into a Buffer, hash it and return -// the result as a buffer. -const base64 = 'Tm9kZS5qcw=='; -// -console.log(crypto.hash('sha1', Buffer.from(base64, 'base64'), 'buffer')); -``` +This property is deprecated. Please use `crypto.setFips()` and +`crypto.getFips()` instead. ### `crypto.generateKey(type, options, callback)` @@ -4222,6 +4158,70 @@ A convenient alias for [`crypto.webcrypto.getRandomValues()`][]. This implementation is not compliant with the Web Crypto spec, to write web-compatible code use [`crypto.webcrypto.getRandomValues()`][] instead. +### `crypto.hash(algorithm, data[, outputEncoding])` + + + +> Stability: 1.2 - Release candidate + +* `algorithm` {string|undefined} +* `data` {string|Buffer|TypedArray|DataView} When `data` is a + string, it will be encoded as UTF-8 before being hashed. If a different + input encoding is desired for a string input, user could encode the string + into a `TypedArray` using either `TextEncoder` or `Buffer.from()` and passing + the encoded `TypedArray` into this API instead. +* `outputEncoding` {string|undefined} [Encoding][encoding] used to encode the + returned digest. **Default:** `'hex'`. +* Returns: {string|Buffer} + +A utility for creating one-shot hash digests of data. It can be faster than +the object-based `crypto.createHash()` when hashing a smaller amount of data +(<= 5MB) that's readily available. If the data can be big or if it is streamed, +it's still recommended to use `crypto.createHash()` instead. + +The `algorithm` is dependent on the available algorithms supported by the +version of OpenSSL on the platform. Examples are `'sha256'`, `'sha512'`, etc. +On recent releases of OpenSSL, `openssl list -digest-algorithms` will +display the available digest algorithms. + +Example: + +```cjs +const crypto = require('node:crypto'); +const { Buffer } = require('node:buffer'); + +// Hashing a string and return the result as a hex-encoded string. +const string = 'Node.js'; +// 10b3493287f831e81a438811a1ffba01f8cec4b7 +console.log(crypto.hash('sha1', string)); + +// Encode a base64-encoded string into a Buffer, hash it and return +// the result as a buffer. +const base64 = 'Tm9kZS5qcw=='; +// +console.log(crypto.hash('sha1', Buffer.from(base64, 'base64'), 'buffer')); +``` + +```mjs +import crypto from 'node:crypto'; +import { Buffer } from 'node:buffer'; + +// Hashing a string and return the result as a hex-encoded string. +const string = 'Node.js'; +// 10b3493287f831e81a438811a1ffba01f8cec4b7 +console.log(crypto.hash('sha1', string)); + +// Encode a base64-encoded string into a Buffer, hash it and return +// the result as a buffer. +const base64 = 'Tm9kZS5qcw=='; +// +console.log(crypto.hash('sha1', Buffer.from(base64, 'base64'), 'buffer')); +``` + ### `crypto.hkdf(digest, ikm, salt, info, keylen, callback)` - -* `buffer` {ArrayBuffer|Buffer|TypedArray|DataView} Must be supplied. The - size of the provided `buffer` must not be larger than `2**31 - 1`. -* `offset` {number} **Default:** `0` -* `size` {number} **Default:** `buffer.length - offset`. The `size` must - not be larger than `2**31 - 1`. -* Returns: {ArrayBuffer|Buffer|TypedArray|DataView} The object passed as - `buffer` argument. - -Synchronous version of [`crypto.randomFill()`][]. - -```mjs -import { Buffer } from 'node:buffer'; -const { randomFillSync } = await import('node:crypto'); - -const buf = Buffer.alloc(10); -console.log(randomFillSync(buf).toString('hex')); - -randomFillSync(buf, 5); -console.log(buf.toString('hex')); - -// The above is equivalent to the following: -randomFillSync(buf, 5, 5); -console.log(buf.toString('hex')); -``` - -```cjs -const { randomFillSync } = require('node:crypto'); -const { Buffer } = require('node:buffer'); - -const buf = Buffer.alloc(10); -console.log(randomFillSync(buf).toString('hex')); - -randomFillSync(buf, 5); -console.log(buf.toString('hex')); - -// The above is equivalent to the following: -randomFillSync(buf, 5, 5); -console.log(buf.toString('hex')); -``` - -Any `ArrayBuffer`, `TypedArray` or `DataView` instance may be passed as -`buffer`. - -```mjs -import { Buffer } from 'node:buffer'; -const { randomFillSync } = await import('node:crypto'); - -const a = new Uint32Array(10); -console.log(Buffer.from(randomFillSync(a).buffer, - a.byteOffset, a.byteLength).toString('hex')); - -const b = new DataView(new ArrayBuffer(10)); -console.log(Buffer.from(randomFillSync(b).buffer, - b.byteOffset, b.byteLength).toString('hex')); - -const c = new ArrayBuffer(10); -console.log(Buffer.from(randomFillSync(c)).toString('hex')); -``` - -```cjs -const { randomFillSync } = require('node:crypto'); -const { Buffer } = require('node:buffer'); - -const a = new Uint32Array(10); -console.log(Buffer.from(randomFillSync(a).buffer, - a.byteOffset, a.byteLength).toString('hex')); - -const b = new DataView(new ArrayBuffer(10)); -console.log(Buffer.from(randomFillSync(b).buffer, - b.byteOffset, b.byteLength).toString('hex')); - -const c = new ArrayBuffer(10); -console.log(Buffer.from(randomFillSync(c)).toString('hex')); -``` - ### `crypto.randomFill(buffer[, offset][, size], callback)` + +* `buffer` {ArrayBuffer|Buffer|TypedArray|DataView} Must be supplied. The + size of the provided `buffer` must not be larger than `2**31 - 1`. +* `offset` {number} **Default:** `0` +* `size` {number} **Default:** `buffer.length - offset`. The `size` must + not be larger than `2**31 - 1`. +* Returns: {ArrayBuffer|Buffer|TypedArray|DataView} The object passed as + `buffer` argument. + +Synchronous version of [`crypto.randomFill()`][]. + +```mjs +import { Buffer } from 'node:buffer'; +const { randomFillSync } = await import('node:crypto'); + +const buf = Buffer.alloc(10); +console.log(randomFillSync(buf).toString('hex')); + +randomFillSync(buf, 5); +console.log(buf.toString('hex')); + +// The above is equivalent to the following: +randomFillSync(buf, 5, 5); +console.log(buf.toString('hex')); +``` + +```cjs +const { randomFillSync } = require('node:crypto'); +const { Buffer } = require('node:buffer'); + +const buf = Buffer.alloc(10); +console.log(randomFillSync(buf).toString('hex')); + +randomFillSync(buf, 5); +console.log(buf.toString('hex')); + +// The above is equivalent to the following: +randomFillSync(buf, 5, 5); +console.log(buf.toString('hex')); +``` + +Any `ArrayBuffer`, `TypedArray` or `DataView` instance may be passed as +`buffer`. + +```mjs +import { Buffer } from 'node:buffer'; +const { randomFillSync } = await import('node:crypto'); + +const a = new Uint32Array(10); +console.log(Buffer.from(randomFillSync(a).buffer, + a.byteOffset, a.byteLength).toString('hex')); + +const b = new DataView(new ArrayBuffer(10)); +console.log(Buffer.from(randomFillSync(b).buffer, + b.byteOffset, b.byteLength).toString('hex')); + +const c = new ArrayBuffer(10); +console.log(Buffer.from(randomFillSync(c)).toString('hex')); +``` + +```cjs +const { randomFillSync } = require('node:crypto'); +const { Buffer } = require('node:buffer'); + +const a = new Uint32Array(10); +console.log(Buffer.from(randomFillSync(a).buffer, + a.byteOffset, a.byteLength).toString('hex')); + +const b = new DataView(new ArrayBuffer(10)); +console.log(Buffer.from(randomFillSync(b).buffer, + b.byteOffset, b.byteLength).toString('hex')); + +const c = new ArrayBuffer(10); +console.log(Buffer.from(randomFillSync(c)).toString('hex')); +``` + ### `crypto.randomInt([min, ]max[, callback])` +### DEP0187: Passing invalid argument types to `fs.existsSync` + + + +Type: Documentation-only + +Passing non-supported argument types is deprecated and, instead of returning `false`, +will throw an error in a future version. + +### DEP0188: `process.features.ipv6` and `process.features.uv` + + + +Type: Documentation-only + +These properties are unconditionally `true`. Any checks based on these properties are redundant. + +### DEP0189: `process.features.tls_*` + + + +Type: Documentation-only + +`process.features.tls_alpn`, `process.features.tls_ocsp`, and `process.features.tls_sni` are +deprecated, as their values are guaranteed to be identical to that of `process.features.tls`. + [NIST SP 800-38D]: https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf [RFC 6066]: https://tools.ietf.org/html/rfc6066#section-3 [RFC 8247 Section 2.4]: https://www.rfc-editor.org/rfc/rfc8247#section-2.4 diff --git a/doc/api/errors.md b/doc/api/errors.md index 7d076312a726d9..f66cebd088ae4c 100644 --- a/doc/api/errors.md +++ b/doc/api/errors.md @@ -61,7 +61,7 @@ Errors that occur within _Asynchronous APIs_ may be reported in multiple ways: ```js - const fs = require('fs/promises'); + const fs = require('node:fs/promises'); (async () => { let data; @@ -2153,6 +2153,12 @@ An attempt was made to open an IPC communication channel with a synchronously forked Node.js process. See the documentation for the [`child_process`][] module for more information. + + +### `ERR_IP_BLOCKED` + +IP is blocked by `net.BlockList`. + ### `ERR_LOADER_CHAIN_INCOMPLETE` @@ -2434,6 +2440,18 @@ Accessing `Object.prototype.__proto__` has been forbidden using [`Object.setPrototypeOf`][] should be used to get and set the prototype of an object. + + +### `ERR_QUIC_APPLICATION_ERROR` + + + +> Stability: 1 - Experimental + +A QUIC application error occurred. + ### `ERR_QUIC_CONNECTION_FAILED` @@ -2470,6 +2488,30 @@ added: v23.0.0 Opening a QUIC stream failed. + + +### `ERR_QUIC_TRANSPORT_ERROR` + + + +> Stability: 1 - Experimental + +A QUIC transport error occurred. + + + +### `ERR_QUIC_VERSION_NEGOTIATION_ERROR` + + + +> Stability: 1 - Experimental + +A QUIC session failed because version negotiation is required. + ### `ERR_REQUIRE_ASYNC_MODULE` diff --git a/doc/api/esm.md b/doc/api/esm.md index 797e551d130452..ecb27680129737 100644 --- a/doc/api/esm.md +++ b/doc/api/esm.md @@ -10,6 +10,9 @@ changes: - version: v23.1.0 pr-url: https://github.com/nodejs/node/pull/55333 description: Import attributes are no longer experimental. + - version: v22.0.0 + pr-url: https://github.com/nodejs/node/pull/52104 + description: Drop support for import assertions. - version: - v21.0.0 - v20.10.0 @@ -119,14 +122,13 @@ Node.js has two module systems: [CommonJS][] modules and ECMAScript modules. Authors can tell Node.js to interpret JavaScript as an ES module via the `.mjs` file extension, the `package.json` [`"type"`][] field with a value `"module"`, -the [`--input-type`][] flag with a value of `"module"`, or the -[`--experimental-default-type`][] flag with a value of `"module"`. These are -explicit markers of code being intended to run as an ES module. +or the [`--input-type`][] flag with a value of `"module"`. These are explicit +markers of code being intended to run as an ES module. -Inversely, authors can tell Node.js to interpret JavaScript as CommonJS via the -`.cjs` file extension, the `package.json` [`"type"`][] field with a value -`"commonjs"`, the [`--input-type`][] flag with a value of `"commonjs"`, or the -[`--experimental-default-type`][] flag with a value of `"commonjs"`. +Inversely, authors can explicitly tell Node.js to interpret JavaScript as +CommonJS via the `.cjs` file extension, the `package.json` [`"type"`][] field +with a value `"commonjs"`, or the [`--input-type`][] flag with a value of +`"commonjs"`. When code lacks explicit markers for either module system, Node.js will inspect the source code of a module to look for ES module syntax. If such syntax is @@ -506,7 +508,7 @@ module default import or its corresponding sugar syntax: ```js import { default as cjs } from 'cjs'; -// identical to the above +// Identical to the above import cjsSugar from 'cjs'; console.log(cjs); @@ -1117,7 +1119,6 @@ resolution for ESM specifiers is [commonjs-extension-resolution-loader][]. [URL]: https://url.spec.whatwg.org/ [`"exports"`]: packages.md#exports [`"type"`]: packages.md#type -[`--experimental-default-type`]: cli.md#--experimental-default-typetype [`--input-type`]: cli.md#--input-typetype [`data:` URLs]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URIs [`export`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/export diff --git a/doc/api/inspector.md b/doc/api/inspector.md index 64db50b8fc1da4..2892779403d256 100644 --- a/doc/api/inspector.md +++ b/doc/api/inspector.md @@ -505,7 +505,7 @@ inspector.Network.requestWillBeSent({ request: { url: 'https://nodejs.org/en', method: 'GET', - } + }, }); ``` diff --git a/doc/api/module.md b/doc/api/module.md index d5d3187439c0c0..19346182bf42d7 100644 --- a/doc/api/module.md +++ b/doc/api/module.md @@ -980,8 +980,7 @@ Omitting vs providing a `source` for `'commonjs'` has very different effects: registered hooks. This behavior for nullish `source` is temporary — in the future, nullish `source` will not be supported. -When `node` is run with `--experimental-default-type=commonjs`, the Node.js -internal `load` implementation, which is the value of `next` for the +The Node.js internal `load` implementation, which is the value of `next` for the last hook in the `load` chain, returns `null` for `source` when `format` is `'commonjs'` for backward compatibility. Here is an example hook that would opt-in to using the non-default behavior: diff --git a/doc/api/modules.md b/doc/api/modules.md index 6d62b85bd2f12f..720a47205edb2a 100644 --- a/doc/api/modules.md +++ b/doc/api/modules.md @@ -890,7 +890,7 @@ built-in modules and if a name matching a built-in module is added to the cache, only `node:`-prefixed require calls are going to receive the built-in module. Use with care! - + ```js const assert = require('node:assert'); diff --git a/doc/api/net.md b/doc/api/net.md index 7d8a27bf0b1769..26b7e4c1985d80 100644 --- a/doc/api/net.md +++ b/doc/api/net.md @@ -170,6 +170,15 @@ added: The list of rules added to the blocklist. +### `BlockList.isBlockList(value)` + + + +* `value` {any} Any JS value +* Returns `true` if the `value` is a `net.BlockList`. + ## Class: `net.SocketAddress` + +* `input` {string} An input string containing an IP address and optional port, + e.g. `123.1.2.3:1234` or `[1::1]:1234`. +* Returns: {net.SocketAddress} Returns a `SocketAddress` if parsing was successful. + Otherwise returns `undefined`. + ## Class: `net.Server` + +* Returns: {PerformanceEntry\[]} Current list of entries stored in the performance observer, emptying it out. + ## Class: `PerformanceObserverEntryList` +> Stability: 0 - Deprecated. This property is always true, and any checks based on it are +> redundant. + * {boolean} A boolean value that is `true` if the current Node.js build includes support for IPv6. +Since all Node.js builds have IPv6 support, this value is always `true`. + ## `process.features.require_module` +> Stability: 0 - Deprecated. Use `process.features.tls` instead. + * {boolean} A boolean value that is `true` if the current Node.js build includes support for ALPN in TLS. +In Node.js 11.0.0 and later versions, the OpenSSL dependencies feature unconditional ALPN support. +This value is therefore identical to that of `process.features.tls`. + ## `process.features.tls_ocsp` +> Stability: 0 - Deprecated. Use `process.features.tls` instead. + * {boolean} A boolean value that is `true` if the current Node.js build includes support for OCSP in TLS. +In Node.js 11.0.0 and later versions, the OpenSSL dependencies feature unconditional OCSP support. +This value is therefore identical to that of `process.features.tls`. + ## `process.features.tls_sni` +> Stability: 0 - Deprecated. Use `process.features.tls` instead. + * {boolean} A boolean value that is `true` if the current Node.js build includes support for SNI in TLS. +In Node.js 11.0.0 and later versions, the OpenSSL dependencies feature unconditional SNI support. +This value is therefore identical to that of `process.features.tls`. + ## `process.features.typescript` +> Stability: 0 - Deprecated. This property is always true, and any checks based on it are +> redundant. + * {boolean} A boolean value that is `true` if the current Node.js build includes support for libuv. -Since it's currently not possible to build Node.js without libuv, this value is always `true`. + +Since it's not possible to build Node.js without libuv, this value is always `true`. ## `process.finalization.register(ref, callback)` @@ -2107,10 +2136,10 @@ class Test { constructor() { finalization.register(this, (ref) => ref.dispose()); - // even something like this is highly discouraged + // Even something like this is highly discouraged // finalization.register(this, () => this.dispose()); - } - dispose() {} + } + dispose() {} } ``` diff --git a/doc/api/punycode.md b/doc/api/punycode.md index 6aa01a8348797b..f2e9bc09fbbb00 100644 --- a/doc/api/punycode.md +++ b/doc/api/punycode.md @@ -21,7 +21,7 @@ The `punycode` module is a bundled version of the [Punycode.js][] module. It can be accessed using: ```js -const punycode = require('punycode'); +const punycode = require('node:punycode'); ``` [Punycode][] is a character encoding scheme defined by RFC 3492 that is diff --git a/doc/api/report.md b/doc/api/report.md index 90434dfa7d3c0c..c74dc7e4c0880b 100644 --- a/doc/api/report.md +++ b/doc/api/report.md @@ -347,28 +347,6 @@ is provided below for reference. "writeQueueSize": 0, "readable": true, "writable": true - }, - { - "type": "tcp", - "is_active": true, - "is_referenced": true, - "address": "0x000055e70fcd68c8", - "localEndpoint": { - "host": "ip6-localhost", - "ip6": "::1", - "port": 52266 - }, - "remoteEndpoint": { - "host": "ip6-localhost", - "ip6": "::1", - "port": 38573 - }, - "sendBufferSize": 2626560, - "recvBufferSize": 131072, - "fd": 25, - "writeQueueSize": 0, - "readable": false, - "writable": false } ], "workers": [], @@ -601,11 +579,119 @@ includes the date, time, PID, and a sequence number. The sequence number helps in associating the report dump with the runtime state if generated multiple times for the same Node.js process. +## Report Version + Diagnostic report has an associated single-digit version number (`report.header.reportVersion`), uniquely representing the report format. The version number is bumped when new key is added or removed, or the data type of a value is changed. Report version definitions are consistent across LTS releases. +### Version history + +#### Version 4 + + + +New fields `ipv4` and `ipv6` are added to `tcp` and `udp` libuv handles endpoints. Examples: + +```json +{ + "libuv": [ + { + "type": "tcp", + "is_active": true, + "is_referenced": true, + "address": "0x000055e70fcb85d8", + "localEndpoint": { + "host": "localhost", + "ip4": "127.0.0.1", // new key + "port": 48986 + }, + "remoteEndpoint": { + "host": "localhost", + "ip4": "127.0.0.1", // new key + "port": 38573 + }, + "sendBufferSize": 2626560, + "recvBufferSize": 131072, + "fd": 24, + "writeQueueSize": 0, + "readable": true, + "writable": true + }, + { + "type": "tcp", + "is_active": true, + "is_referenced": true, + "address": "0x000055e70fcd68c8", + "localEndpoint": { + "host": "ip6-localhost", + "ip6": "::1", // new key + "port": 52266 + }, + "remoteEndpoint": { + "host": "ip6-localhost", + "ip6": "::1", // new key + "port": 38573 + }, + "sendBufferSize": 2626560, + "recvBufferSize": 131072, + "fd": 25, + "writeQueueSize": 0, + "readable": false, + "writable": false + } + ] +} +``` + +#### Version 3 + + + +The following memory usage keys are added to the `resourceUsage` section. + +```json +{ + "resourceUsage": { + "rss": "35766272", + "free_memory": "1598337024", + "total_memory": "17179869184", + "available_memory": "1598337024", + "constrained_memory": "36624662528" + } +} +``` + +#### Version 2 + + + +Added [`Worker`][] support. Refer to [Interaction with workers](#interaction-with-workers) section for more details. + +#### Version 1 + +This is the first version of the diagnostic report. + ## Configuration Additional runtime configuration of report generation is available via diff --git a/doc/api/sqlite.md b/doc/api/sqlite.md index ac12a5b2582ed7..cdf5d690521a06 100644 --- a/doc/api/sqlite.md +++ b/doc/api/sqlite.md @@ -6,8 +6,7 @@ added: v22.5.0 --> -> Stability: 1.1 - Active development. Enable this API with the -> [`--experimental-sqlite`][] CLI flag. +> Stability: 1.1 - Active development. @@ -157,6 +156,10 @@ around [`sqlite3_prepare_v2()`][]. ### `database.createSession([options])` + + * `options` {Object} The configuration options for the session. * `table` {string} A specific table to track changes for. By default, changes to all tables are tracked. * `db` {string} Name of the database to track. This is useful when multiple databases have been added using [`ATTACH DATABASE`][]. **Default**: `'main'`. @@ -166,6 +169,10 @@ Creates and attaches a session to the database. This method is a wrapper around ### `database.applyChangeset(changeset[, options])` + + * `changeset` {Uint8Array} A binary changeset or patchset. * `options` {Object} The configuration options for how the changes will be applied. * `filter` {Function} Skip changes that, when targeted table name is supplied to this function, return a truthy value. @@ -173,7 +180,7 @@ Creates and attaches a session to the database. This method is a wrapper around * `onConflict` {number} Determines how conflicts are handled. **Default**: `SQLITE_CHANGESET_ABORT`. * `SQLITE_CHANGESET_OMIT`: conflicting changes are omitted. * `SQLITE_CHANGESET_REPLACE`: conflicting changes replace existing values. - * `SQLITE_CHANGESET_ABORT`: abort on conflict and roll back databsase. + * `SQLITE_CHANGESET_ABORT`: abort on conflict and roll back database. * Returns: {boolean} Whether the changeset was applied succesfully without being aborted. An exception is thrown if the database is not @@ -199,8 +206,16 @@ targetDb.applyChangeset(changeset); ## Class: `Session` + + ### `session.changeset()` + + * Returns: {Uint8Array} Binary changeset that can be applied to other databases. Retrieves a changeset containing all changes since the changeset was created. Can be called multiple times. @@ -208,6 +223,10 @@ An exception is thrown if the database or the session is not open. This method i ### `session.patchset()` + + * Returns: {Uint8Array} Binary patchset that can be applied to other databases. Similar to the method above, but generates a more compact patchset. See [Changesets and Patchsets][] @@ -288,6 +307,25 @@ object. If the prepared statement does not return any results, this method returns `undefined`. The prepared statement [parameters are bound][] using the values in `namedParameters` and `anonymousParameters`. +### `statement.iterate([namedParameters][, ...anonymousParameters])` + + + +* `namedParameters` {Object} An optional object used to bind named parameters. + The keys of this object are used to configure the mapping. +* `...anonymousParameters` {null|number|bigint|string|Buffer|Uint8Array} Zero or + more values to bind to anonymous parameters. +* Returns: {Iterator} An iterable iterator of objects. Each object corresponds to a row + returned by executing the prepared statement. The keys and values of each + object correspond to the column names and values of the row. + +This method executes a prepared statement and returns an iterator of +objects. If the prepared statement does not return any results, this method +returns an empty iterator. The prepared statement [parameters are bound][] using +the values in `namedParameters` and `anonymousParameters`. + ### `statement.run([namedParameters][, ...anonymousParameters])` -> Stability: 1 - Experimental - * `count` {number} The number of assertions and subtests that are expected to run. This function is used to set the number of assertions and subtests that are expected to run @@ -3589,7 +3590,6 @@ Can be used to abort test subtasks when the test has been aborted. [`--experimental-strip-types`]: cli.md#--experimental-strip-types [`--experimental-test-coverage`]: cli.md#--experimental-test-coverage [`--experimental-test-module-mocks`]: cli.md#--experimental-test-module-mocks -[`--experimental-test-snapshots`]: cli.md#--experimental-test-snapshots [`--import`]: cli.md#--importmodule [`--test-concurrency`]: cli.md#--test-concurrency [`--test-coverage-include`]: cli.md#--test-coverage-include diff --git a/doc/api/timers.md b/doc/api/timers.md index 83edc1d73b0e77..9c4ca2ea17d263 100644 --- a/doc/api/timers.md +++ b/doc/api/timers.md @@ -292,7 +292,24 @@ returned Promises will be rejected with an `'AbortError'`. For `setImmediate()`: -```js +```mjs +import { setImmediate as setImmediatePromise } from 'node:timers/promises'; + +const ac = new AbortController(); +const signal = ac.signal; + +// We do not `await` the promise so `ac.abort()` is called concurrently. +setImmediatePromise('foobar', { signal }) + .then(console.log) + .catch((err) => { + if (err.name === 'AbortError') + console.error('The immediate was aborted'); + }); + +ac.abort(); +``` + +```cjs const { setImmediate: setImmediatePromise } = require('node:timers/promises'); const ac = new AbortController(); @@ -310,7 +327,24 @@ ac.abort(); For `setTimeout()`: -```js +```mjs +import { setTimeout as setTimeoutPromise } from 'node:timers/promises'; + +const ac = new AbortController(); +const signal = ac.signal; + +// We do not `await` the promise so `ac.abort()` is called concurrently. +setTimeoutPromise(1000, 'foobar', { signal }) + .then(console.log) + .catch((err) => { + if (err.name === 'AbortError') + console.error('The timeout was aborted'); + }); + +ac.abort(); +``` + +```cjs const { setTimeout: setTimeoutPromise } = require('node:timers/promises'); const ac = new AbortController(); diff --git a/doc/api/tls.md b/doc/api/tls.md index f19a75370b1cf5..3436ba6697950f 100644 --- a/doc/api/tls.md +++ b/doc/api/tls.md @@ -465,13 +465,13 @@ to set the security level to 0 while using the default OpenSSL cipher list, you const tls = require('node:tls'); const port = 443; -tls.createServer({ciphers: 'DEFAULT@SECLEVEL=0', minVersion: 'TLSv1'}, function (socket) { +tls.createServer({ ciphers: 'DEFAULT@SECLEVEL=0', minVersion: 'TLSv1' }, function(socket) { console.log('Client connected with protocol:', socket.getProtocol()); socket.end(); this.close(); -}). -listen(port, () => { - tls.connect(port, {ciphers: 'DEFAULT@SECLEVEL=0', maxVersion: 'TLSv1'}); +}) +.listen(port, () => { + tls.connect(port, { ciphers: 'DEFAULT@SECLEVEL=0', maxVersion: 'TLSv1' }); }); ``` diff --git a/doc/api/tracing.md b/doc/api/tracing.md index e1a72a708863a5..ddd1bae52a62c4 100644 --- a/doc/api/tracing.md +++ b/doc/api/tracing.md @@ -245,7 +245,7 @@ console.log(trace_events.getEnabledCategories()); ```js 'use strict'; -const { Session } = require('inspector'); +const { Session } = require('node:inspector'); const session = new Session(); session.connect(); diff --git a/doc/api/util.md b/doc/api/util.md index dc8e3c38ddfca6..eb06e93c503811 100644 --- a/doc/api/util.md +++ b/doc/api/util.md @@ -513,7 +513,7 @@ The mapping between error codes and string messages is platform-dependent. ```js fs.access('file/that/does/not/exist', (err) => { const name = util.getSystemErrorMessage(err.errno); - console.error(name); // no such file or directory + console.error(name); // No such file or directory }); ``` @@ -1998,6 +1998,10 @@ The full list of formats can be found in [modifiers][]. An implementation of the [WHATWG Encoding Standard][] `TextDecoder` API. @@ -2076,14 +2080,6 @@ is not supported. ### `new TextDecoder([encoding[, options]])` - - * `encoding` {string} Identifies the `encoding` that this `TextDecoder` instance supports. **Default:** `'utf-8'`. * `options` {Object} @@ -2166,6 +2162,10 @@ encoded bytes. ### `textEncoder.encodeInto(src, dest)` + + * `src` {string} The text to encode. * `dest` {Uint8Array} The array to hold the encode result. * Returns: {Object} diff --git a/doc/api/v8.md b/doc/api/v8.md index 4df6e98a48fe78..670283e17f5d80 100644 --- a/doc/api/v8.md +++ b/doc/api/v8.md @@ -1296,7 +1296,7 @@ is as follows. Here's an example. ```js -const { GCProfiler } = require('v8'); +const { GCProfiler } = require('node:v8'); const profiler = new GCProfiler(); profiler.start(); setTimeout(() => { diff --git a/doc/api/vm.md b/doc/api/vm.md index 01317b5843ad34..a5dd038070a498 100644 --- a/doc/api/vm.md +++ b/doc/api/vm.md @@ -1622,12 +1622,12 @@ in the outer context. const vm = require('node:vm'); // An undefined `contextObject` option makes the global object contextified. -let context = vm.createContext(); +const context = vm.createContext(); console.log(vm.runInContext('globalThis', context) === context); // false // A contextified global object cannot be frozen. try { vm.runInContext('Object.freeze(globalThis);', context); -} catch(e) { +} catch (e) { console.log(e); // TypeError: Cannot freeze } console.log(vm.runInContext('globalThis.foo = 1; foo;', context)); // 1 @@ -1652,7 +1652,7 @@ const context = vm.createContext(vm.constants.DONT_CONTEXTIFY); vm.runInContext('Object.freeze(globalThis);', context); try { vm.runInContext('bar = 1; bar;', context); -} catch(e) { +} catch (e) { console.log(e); // Uncaught ReferenceError: bar is not defined } ``` @@ -1681,7 +1681,7 @@ console.log(vm.runInContext('bar;', context)); // 1 Object.freeze(context); try { vm.runInContext('baz = 1; baz;', context); -} catch(e) { +} catch (e) { console.log(e); // Uncaught ReferenceError: baz is not defined } ``` diff --git a/doc/api/webcrypto.md b/doc/api/webcrypto.md index 27c37bf4829f3e..349fb116b1c0d6 100644 --- a/doc/api/webcrypto.md +++ b/doc/api/webcrypto.md @@ -600,9 +600,6 @@ Using the method and parameters specified in `algorithm` and the keying material provided by `baseKey`, `subtle.deriveBits()` attempts to generate `length` bits. -The Node.js implementation requires that `length`, when a number, is a multiple -of `8`. - When `length` is not provided or `null` the maximum number of bits for a given algorithm is generated. This is allowed for the `'ECDH'`, `'X25519'`, and `'X448'` algorithms, for other algorithms `length` is required to be a number. diff --git a/doc/api/worker_threads.md b/doc/api/worker_threads.md index f1b5c5b652f4f1..afecd991da9dd3 100644 --- a/doc/api/worker_threads.md +++ b/doc/api/worker_threads.md @@ -218,7 +218,7 @@ markAsUncloneable(anyObject); const { port1 } = new MessageChannel(); try { // This will throw an error, because anyObject is not cloneable. - port1.postMessage(anyObject) + port1.postMessage(anyObject); } catch (error) { // error.name === 'DataCloneError' } @@ -908,6 +908,8 @@ not preserved. In particular, [`Buffer`][] objects will be read as plain [`Uint8Array`][]s on the receiving side, and instances of JavaScript classes will be cloned as plain JavaScript objects. + + ```js const b = Symbol('b'); diff --git a/doc/api/zlib.md b/doc/api/zlib.md index 36977eae86af6f..a175ded844d5c0 100644 --- a/doc/api/zlib.md +++ b/doc/api/zlib.md @@ -11,7 +11,11 @@ Gzip, Deflate/Inflate, and Brotli. To access it: -```js +```mjs +import os from 'node:zlib'; +``` + +```cjs const zlib = require('node:zlib'); ``` @@ -21,13 +25,35 @@ Compressing or decompressing a stream (such as a file) can be accomplished by piping the source stream through a `zlib` `Transform` stream into a destination stream: -```js -const { createGzip } = require('node:zlib'); -const { pipeline } = require('node:stream'); +```mjs +import { + createReadStream, + createWriteStream, +} from 'node:fs'; +import process from 'node:process'; +import { createGzip } from 'node:zlib'; +import { pipeline } from 'node:stream'; + +const gzip = createGzip(); +const source = createReadStream('input.txt'); +const destination = createWriteStream('input.txt.gz'); + +pipeline(source, gzip, destination, (err) => { + if (err) { + console.error('An error occurred:', err); + process.exitCode = 1; + } +}); +``` + +```cjs const { createReadStream, createWriteStream, } = require('node:fs'); +const process = require('node:process'); +const { createGzip } = require('node:zlib'); +const { pipeline } = require('node:stream'); const gzip = createGzip(); const source = createReadStream('input.txt'); @@ -39,17 +65,43 @@ pipeline(source, gzip, destination, (err) => { process.exitCode = 1; } }); +``` -// Or, Promisified +Or, using the promise `pipeline` API: -const { promisify } = require('node:util'); -const pipe = promisify(pipeline); +```mjs +import { + createReadStream, + createWriteStream, +} from 'node:fs'; +import process from 'node:process'; +import { createGzip } from 'node:zlib'; +import { pipeline } from 'node:stream/promises'; + +async function do_gzip(input, output) { + const gzip = createGzip(); + const source = createReadStream(input); + const destination = createWriteStream(output); + await pipeline(source, gzip, destination); +} + +await do_gzip('input.txt', 'input.txt.gz'); +``` + +```cjs +const { + createReadStream, + createWriteStream, +} = require('node:fs'); +const process = require('node:process'); +const { createGzip } = require('node:zlib'); +const { pipeline } = require('node:stream/promises'); async function do_gzip(input, output) { const gzip = createGzip(); const source = createReadStream(input); const destination = createWriteStream(output); - await pipe(source, gzip, destination); + await pipeline(source, gzip, destination); } do_gzip('input.txt', 'input.txt.gz') @@ -61,7 +113,39 @@ do_gzip('input.txt', 'input.txt.gz') It is also possible to compress or decompress data in a single step: -```js +```mjs +import process from 'node:process'; +import { Buffer } from 'node:buffer'; +import { deflate, unzip } from 'node:zlib'; + +const input = '.................................'; +deflate(input, (err, buffer) => { + if (err) { + console.error('An error occurred:', err); + process.exitCode = 1; + } + console.log(buffer.toString('base64')); +}); + +const buffer = Buffer.from('eJzT0yMAAGTvBe8=', 'base64'); +unzip(buffer, (err, buffer) => { + if (err) { + console.error('An error occurred:', err); + process.exitCode = 1; + } + console.log(buffer.toString()); +}); + +// Or, Promisified + +import { promisify } from 'node:util'; +const do_unzip = promisify(unzip); + +const unzippedBuffer = await do_unzip(buffer); +console.log(unzippedBuffer.toString()); +``` + +```cjs const { deflate, unzip } = require('node:zlib'); const input = '.................................'; @@ -104,7 +188,19 @@ limitations in some applications. Creating and using a large number of zlib objects simultaneously can cause significant memory fragmentation. -```js +```mjs +import zlib from 'node:zlib'; +import { Buffer } from 'node:buffer'; + +const payload = Buffer.from('This is some data'); + +// WARNING: DO NOT DO THIS! +for (let i = 0; i < 30000; ++i) { + zlib.deflate(payload, (err, buffer) => {}); +} +``` + +```cjs const zlib = require('node:zlib'); const payload = Buffer.from('This is some data'); @@ -138,7 +234,47 @@ Using `zlib` encoding can be expensive, and the results ought to be cached. See [Memory usage tuning][] for more information on the speed/memory/compression tradeoffs involved in `zlib` usage. -```js +```mjs +// Client request example +import fs from 'node:fs'; +import zlib from 'node:zlib'; +import http from 'node:http'; +import process from 'node:process'; +import { pipeline } from 'node:stream'; + +const request = http.get({ host: 'example.com', + path: '/', + port: 80, + headers: { 'Accept-Encoding': 'br,gzip,deflate' } }); +request.on('response', (response) => { + const output = fs.createWriteStream('example.com_index.html'); + + const onError = (err) => { + if (err) { + console.error('An error occurred:', err); + process.exitCode = 1; + } + }; + + switch (response.headers['content-encoding']) { + case 'br': + pipeline(response, zlib.createBrotliDecompress(), output, onError); + break; + // Or, just use zlib.createUnzip() to handle both of the following cases: + case 'gzip': + pipeline(response, zlib.createGunzip(), output, onError); + break; + case 'deflate': + pipeline(response, zlib.createInflate(), output, onError); + break; + default: + pipeline(response, output, onError); + break; + } +}); +``` + +```cjs // Client request example const zlib = require('node:zlib'); const http = require('node:http'); @@ -177,7 +313,52 @@ request.on('response', (response) => { }); ``` -```js +```mjs +// server example +// Running a gzip operation on every request is quite expensive. +// It would be much more efficient to cache the compressed buffer. +import zlib from 'node:zlib'; +import http from 'node:http'; +import fs from 'node:fs'; +import { pipeline } from 'node:stream'; + +http.createServer((request, response) => { + const raw = fs.createReadStream('index.html'); + // Store both a compressed and an uncompressed version of the resource. + response.setHeader('Vary', 'Accept-Encoding'); + const acceptEncoding = request.headers['accept-encoding'] || ''; + + const onError = (err) => { + if (err) { + // If an error occurs, there's not much we can do because + // the server has already sent the 200 response code and + // some amount of data has already been sent to the client. + // The best we can do is terminate the response immediately + // and log the error. + response.end(); + console.error('An error occurred:', err); + } + }; + + // Note: This is not a conformant accept-encoding parser. + // See https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.3 + if (/\bdeflate\b/.test(acceptEncoding)) { + response.writeHead(200, { 'Content-Encoding': 'deflate' }); + pipeline(raw, zlib.createDeflate(), response, onError); + } else if (/\bgzip\b/.test(acceptEncoding)) { + response.writeHead(200, { 'Content-Encoding': 'gzip' }); + pipeline(raw, zlib.createGzip(), response, onError); + } else if (/\bbr\b/.test(acceptEncoding)) { + response.writeHead(200, { 'Content-Encoding': 'br' }); + pipeline(raw, zlib.createBrotliCompress(), response, onError); + } else { + response.writeHead(200, {}); + pipeline(raw, response, onError); + } +}).listen(1337); +``` + +```cjs // server example // Running a gzip operation on every request is quite expensive. // It would be much more efficient to cache the compressed buffer. @@ -190,10 +371,7 @@ http.createServer((request, response) => { const raw = fs.createReadStream('index.html'); // Store both a compressed and an uncompressed version of the resource. response.setHeader('Vary', 'Accept-Encoding'); - let acceptEncoding = request.headers['accept-encoding']; - if (!acceptEncoding) { - acceptEncoding = ''; - } + const acceptEncoding = request.headers['accept-encoding'] || ''; const onError = (err) => { if (err) { @@ -318,7 +496,43 @@ quality, but can be useful when data needs to be available as soon as possible. In the following example, `flush()` is used to write a compressed partial HTTP response to the client: -```js +```mjs +import zlib from 'node:zlib'; +import http from 'node:http'; +import { pipeline } from 'node:stream'; + +http.createServer((request, response) => { + // For the sake of simplicity, the Accept-Encoding checks are omitted. + response.writeHead(200, { 'content-encoding': 'gzip' }); + const output = zlib.createGzip(); + let i; + + pipeline(output, response, (err) => { + if (err) { + // If an error occurs, there's not much we can do because + // the server has already sent the 200 response code and + // some amount of data has already been sent to the client. + // The best we can do is terminate the response immediately + // and log the error. + clearInterval(i); + response.end(); + console.error('An error occurred:', err); + } + }); + + i = setInterval(() => { + output.write(`The current time is ${Date()}\n`, () => { + // The data has been passed to zlib, but the compression algorithm may + // have decided to buffer the data for more efficient compression. + // Calling .flush() will make the data available as soon as the client + // is ready to receive it. + output.flush(); + }); + }, 1000); +}).listen(1337); +``` + +```cjs const zlib = require('node:zlib'); const http = require('node:http'); const { pipeline } = require('node:stream'); diff --git a/doc/changelogs/CHANGELOG_V23.md b/doc/changelogs/CHANGELOG_V23.md index 47b30d19b91bae..9e723609b72bca 100644 --- a/doc/changelogs/CHANGELOG_V23.md +++ b/doc/changelogs/CHANGELOG_V23.md @@ -8,6 +8,7 @@ +23.4.0
    23.3.0
    23.2.0
    23.1.0
    @@ -41,6 +42,187 @@ * [io.js](CHANGELOG_IOJS.md) * [Archive](CHANGELOG_ARCHIVE.md) + + +## 2024-12-10, Version 23.4.0 (Current), @aduh95 prepared by @targos + +### Notable Changes + +#### Introducing experimental `assert.partialDeepStrictEqual` + +Sometimes, when writing test, we want to validate that some specific properties +are present, and the mere presence of additional keys are not exactly relevant +for that specific test. For this use case, we can now use +`assert.partialDeepStrictEqual`, which should be familiar to those already using +`assert.deepStrictEqual`, with the main difference that it does not require all +properties in the `actual` parameter to be present in the `expected` parameter. + +Here are a few examples of usage: + +```js +assert.partialDeepStrictEqual( + { a: 1, b: 2, c: 3 }, + { a: 1, b: 2 }, +); + +assert.partialDeepStrictEqual( + [1, 2, 3, 4], + [2, 3], +); + +assert.partialDeepStrictEqual( + { a: { b: { c: 1, d: 2 } }, e: 3 }, + { a: { b: { c: 1 } } }, +); + +assert.partialDeepStrictEqual( + { a: { b: { c: 1, d: 2 } }, e: 3 }, + { a: { b: { c: 1 } } }, +); + +assert.partialDeepStrictEqual( + new Set([{ a: 1 }, { b: 1 }]), + new Set([{ a: 1 }]), +); + +assert.partialDeepStrictEqual( + { a: new Set([{ a: 1 }, { b: 1 }]), b: new Map(), c: [1, 2, 3] }, + { a: new Set([{ a: 1 }]), c: [2] }, +); +``` + +Contributed by Giovanni Bucci in [#54630](https://github.com/nodejs/node/pull/54630). + +#### Other notable changes + +* \[[`816d37a187`](https://github.com/nodejs/node/commit/816d37a187)] - **(SEMVER-MINOR)** **cli**: implement `--trace-env` and `--trace-env-[js|native]-stack` (Joyee Cheung) [#55604](https://github.com/nodejs/node/pull/55604) +* \[[`59d6891872`](https://github.com/nodejs/node/commit/59d6891872)] - **doc**: add LJHarb to collaborators (Jordan Harband) [#56132](https://github.com/nodejs/node/pull/56132) +* \[[`565b04a7be`](https://github.com/nodejs/node/commit/565b04a7be)] - **(SEMVER-MINOR)** **net**: add `BlockList.isBlockList(value)` (James M Snell) [#56078](https://github.com/nodejs/node/pull/56078) +* \[[`c9698ed6a4`](https://github.com/nodejs/node/commit/c9698ed6a4)] - **(SEMVER-MINOR)** **net**: support `blockList` in `net.connect` (theanarkh) [#56075](https://github.com/nodejs/node/pull/56075) +* \[[`30d604180d`](https://github.com/nodejs/node/commit/30d604180d)] - **(SEMVER-MINOR)** **net**: support `blockList` in `net.Server` (theanarkh) [#56079](https://github.com/nodejs/node/pull/56079) +* \[[`9fba5e1df1`](https://github.com/nodejs/node/commit/9fba5e1df1)] - **(SEMVER-MINOR)** **net**: add `SocketAddress.parse` (James M Snell) [#56076](https://github.com/nodejs/node/pull/56076) +* \[[`4cdb03201e`](https://github.com/nodejs/node/commit/4cdb03201e)] - **(SEMVER-MINOR)** **process**: deprecate `features.{ipv6,uv}` and `features.tls_*` (René) [#55545](https://github.com/nodejs/node/pull/55545) +* \[[`efb9f05f59`](https://github.com/nodejs/node/commit/efb9f05f59)] - **(SEMVER-MINOR)** **sqlite**: unflag `node:sqlite` module (Colin Ihrig) [#55890](https://github.com/nodejs/node/pull/55890) +* \[[`d777d4a52d`](https://github.com/nodejs/node/commit/d777d4a52d)] - **(SEMVER-MINOR)** **sqlite**: add `StatementSync.prototype.iterate` method (tpoisseau) [#54213](https://github.com/nodejs/node/pull/54213) + +### Commits + +* \[[`5b0ce376a2`](https://github.com/nodejs/node/commit/5b0ce376a2)] - **assert**: optimize partial comparison of two `Set`s (Antoine du Hamel) [#55970](https://github.com/nodejs/node/pull/55970) +* \[[`a4f57f0293`](https://github.com/nodejs/node/commit/a4f57f0293)] - **(SEMVER-MINOR)** **assert**: add partialDeepStrictEqual (Giovanni Bucci) [#54630](https://github.com/nodejs/node/pull/54630) +* \[[`1b81a7d003`](https://github.com/nodejs/node/commit/1b81a7d003)] - **build**: allow overriding clang usage (Shelley Vohr) [#56016](https://github.com/nodejs/node/pull/56016) +* \[[`39c901307f`](https://github.com/nodejs/node/commit/39c901307f)] - **build**: remove defaults for create-release-proposal (Rafael Gonzaga) [#56042](https://github.com/nodejs/node/pull/56042) +* \[[`7133c0459f`](https://github.com/nodejs/node/commit/7133c0459f)] - **build**: avoid compiling with VS v17.12 (Stefan Stojanovic) [#55930](https://github.com/nodejs/node/pull/55930) +* \[[`ce53f1689f`](https://github.com/nodejs/node/commit/ce53f1689f)] - **build**: set node\_arch to target\_cpu in GN (Shelley Vohr) [#55967](https://github.com/nodejs/node/pull/55967) +* \[[`2023b09d27`](https://github.com/nodejs/node/commit/2023b09d27)] - **build**: add create release proposal action (Rafael Gonzaga) [#55690](https://github.com/nodejs/node/pull/55690) +* \[[`26ec99634c`](https://github.com/nodejs/node/commit/26ec99634c)] - **build**: use variable for crypto dep path (Shelley Vohr) [#55928](https://github.com/nodejs/node/pull/55928) +* \[[`f48e289580`](https://github.com/nodejs/node/commit/f48e289580)] - **build**: fix GN build for sqlite (Cheng) [#55912](https://github.com/nodejs/node/pull/55912) +* \[[`fffabca6b8`](https://github.com/nodejs/node/commit/fffabca6b8)] - **build**: compile bundled simdutf conditionally (Jakub Jirutka) [#55886](https://github.com/nodejs/node/pull/55886) +* \[[`d8eb83c5c5`](https://github.com/nodejs/node/commit/d8eb83c5c5)] - **build**: compile bundled simdjson conditionally (Jakub Jirutka) [#55886](https://github.com/nodejs/node/pull/55886) +* \[[`83e02dc482`](https://github.com/nodejs/node/commit/83e02dc482)] - **build**: compile bundled ada conditionally (Jakub Jirutka) [#55886](https://github.com/nodejs/node/pull/55886) +* \[[`816d37a187`](https://github.com/nodejs/node/commit/816d37a187)] - **(SEMVER-MINOR)** **cli**: implement --trace-env and --trace-env-\[js|native]-stack (Joyee Cheung) [#55604](https://github.com/nodejs/node/pull/55604) +* \[[`53c0f2f186`](https://github.com/nodejs/node/commit/53c0f2f186)] - **crypto**: ensure CryptoKey usages and algorithm are cached objects (Filip Skokan) [#56108](https://github.com/nodejs/node/pull/56108) +* \[[`93d36bf1c8`](https://github.com/nodejs/node/commit/93d36bf1c8)] - **crypto**: allow non-multiple of 8 in SubtleCrypto.deriveBits (Filip Skokan) [#55296](https://github.com/nodejs/node/pull/55296) +* \[[`8680b8030c`](https://github.com/nodejs/node/commit/8680b8030c)] - **deps**: update ngtcp2 to 1.9.1 (Node.js GitHub Bot) [#56095](https://github.com/nodejs/node/pull/56095) +* \[[`78a2a6ca1e`](https://github.com/nodejs/node/commit/78a2a6ca1e)] - **deps**: upgrade npm to 10.9.2 (npm team) [#56135](https://github.com/nodejs/node/pull/56135) +* \[[`52dfe5af4b`](https://github.com/nodejs/node/commit/52dfe5af4b)] - **deps**: update sqlite to 3.47.1 (Node.js GitHub Bot) [#56094](https://github.com/nodejs/node/pull/56094) +* \[[`3852b5c8d1`](https://github.com/nodejs/node/commit/3852b5c8d1)] - **deps**: update zlib to 1.3.0.1-motley-82a5fec (Node.js GitHub Bot) [#55980](https://github.com/nodejs/node/pull/55980) +* \[[`f99f95f62f`](https://github.com/nodejs/node/commit/f99f95f62f)] - **deps**: update corepack to 0.30.0 (Node.js GitHub Bot) [#55977](https://github.com/nodejs/node/pull/55977) +* \[[`96e846de89`](https://github.com/nodejs/node/commit/96e846de89)] - **deps**: update ngtcp2 to 1.9.0 (Node.js GitHub Bot) [#55975](https://github.com/nodejs/node/pull/55975) +* \[[`d180a8aedb`](https://github.com/nodejs/node/commit/d180a8aedb)] - **deps**: update simdutf to 5.6.3 (Node.js GitHub Bot) [#55973](https://github.com/nodejs/node/pull/55973) +* \[[`288416a764`](https://github.com/nodejs/node/commit/288416a764)] - **deps**: upgrade npm to 10.9.1 (npm team) [#55951](https://github.com/nodejs/node/pull/55951) +* \[[`cf3f7ac512`](https://github.com/nodejs/node/commit/cf3f7ac512)] - **deps**: update zlib to 1.3.0.1-motley-7e2e4d7 (Node.js GitHub Bot) [#54432](https://github.com/nodejs/node/pull/54432) +* \[[`7768b3d054`](https://github.com/nodejs/node/commit/7768b3d054)] - **deps**: update simdjson to 3.10.1 (Node.js GitHub Bot) [#54678](https://github.com/nodejs/node/pull/54678) +* \[[`9c6103833b`](https://github.com/nodejs/node/commit/9c6103833b)] - **deps**: update simdutf to 5.6.2 (Node.js GitHub Bot) [#55889](https://github.com/nodejs/node/pull/55889) +* \[[`7b133d6220`](https://github.com/nodejs/node/commit/7b133d6220)] - **dgram**: check udp buffer size to avoid fd leak (theanarkh) [#56084](https://github.com/nodejs/node/pull/56084) +* \[[`e4529b8179`](https://github.com/nodejs/node/commit/e4529b8179)] - **doc**: add report version and history section (Chengzhong Wu) [#56130](https://github.com/nodejs/node/pull/56130) +* \[[`718625a03a`](https://github.com/nodejs/node/commit/718625a03a)] - **doc**: mention `-a` flag for the release script (Ruy Adorno) [#56124](https://github.com/nodejs/node/pull/56124) +* \[[`59d6891872`](https://github.com/nodejs/node/commit/59d6891872)] - **doc**: add LJHarb to collaborators (Jordan Harband) [#56132](https://github.com/nodejs/node/pull/56132) +* \[[`d7ed32404a`](https://github.com/nodejs/node/commit/d7ed32404a)] - **doc**: add create-release-action to process (Rafael Gonzaga) [#55993](https://github.com/nodejs/node/pull/55993) +* \[[`3b4ef93371`](https://github.com/nodejs/node/commit/3b4ef93371)] - **doc**: rename file to advocacy-ambassador-program.md (Tobias Nießen) [#56046](https://github.com/nodejs/node/pull/56046) +* \[[`59e4087d5e`](https://github.com/nodejs/node/commit/59e4087d5e)] - **doc**: add added tag and fix typo sqlite.md (Bart Louwers) [#56012](https://github.com/nodejs/node/pull/56012) +* \[[`a1b26608ae`](https://github.com/nodejs/node/commit/a1b26608ae)] - **doc**: remove unused import from sample code (Blended Bram) [#55570](https://github.com/nodejs/node/pull/55570) +* \[[`498f44ad73`](https://github.com/nodejs/node/commit/498f44ad73)] - **doc**: add FAQ to releases section (Rafael Gonzaga) [#55992](https://github.com/nodejs/node/pull/55992) +* \[[`d48348afaa`](https://github.com/nodejs/node/commit/d48348afaa)] - **doc**: move history entry to class description (Luigi Pinca) [#55991](https://github.com/nodejs/node/pull/55991) +* \[[`96926ce13c`](https://github.com/nodejs/node/commit/96926ce13c)] - **doc**: add history entry for textEncoder.encodeInto() (Luigi Pinca) [#55990](https://github.com/nodejs/node/pull/55990) +* \[[`e92d51d511`](https://github.com/nodejs/node/commit/e92d51d511)] - **doc**: improve GN build documentation a bit (Shelley Vohr) [#55968](https://github.com/nodejs/node/pull/55968) +* \[[`6be3824d6f`](https://github.com/nodejs/node/commit/6be3824d6f)] - **doc**: fix deprecation codes (Filip Skokan) [#56018](https://github.com/nodejs/node/pull/56018) +* \[[`fa2b35d28d`](https://github.com/nodejs/node/commit/fa2b35d28d)] - **doc**: remove confusing and outdated sentence (Luigi Pinca) [#55988](https://github.com/nodejs/node/pull/55988) +* \[[`baed2763df`](https://github.com/nodejs/node/commit/baed2763df)] - **doc**: deprecate passing invalid types in `fs.existsSync` (Carlos Espa) [#55892](https://github.com/nodejs/node/pull/55892) +* \[[`a3f7db6b6d`](https://github.com/nodejs/node/commit/a3f7db6b6d)] - **doc**: add doc for PerformanceObserver.takeRecords() (skyclouds2001) [#55786](https://github.com/nodejs/node/pull/55786) +* \[[`770572423b`](https://github.com/nodejs/node/commit/770572423b)] - **doc**: add vetted courses to the ambassador benefits (Matteo Collina) [#55934](https://github.com/nodejs/node/pull/55934) +* \[[`98f8f4a8a9`](https://github.com/nodejs/node/commit/98f8f4a8a9)] - **doc**: order `node:crypto` APIs alphabetically (Julian Gassner) [#55831](https://github.com/nodejs/node/pull/55831) +* \[[`1e0decb44c`](https://github.com/nodejs/node/commit/1e0decb44c)] - **doc**: doc how to add message for promotion (Michael Dawson) [#55843](https://github.com/nodejs/node/pull/55843) +* \[[`ff48c29724`](https://github.com/nodejs/node/commit/ff48c29724)] - **doc**: add esm example for zlib (Leonardo Peixoto) [#55946](https://github.com/nodejs/node/pull/55946) +* \[[`ccc5a6d552`](https://github.com/nodejs/node/commit/ccc5a6d552)] - **doc**: document approach for building wasm in deps (Michael Dawson) [#55940](https://github.com/nodejs/node/pull/55940) +* \[[`c8bb8a6ac5`](https://github.com/nodejs/node/commit/c8bb8a6ac5)] - **doc**: fix Node.js 23 column in CHANGELOG.md (Richard Lau) [#55935](https://github.com/nodejs/node/pull/55935) +* \[[`9d078802ad`](https://github.com/nodejs/node/commit/9d078802ad)] - **doc**: remove RedYetiDev from triagers team (Aviv Keller) [#55947](https://github.com/nodejs/node/pull/55947) +* \[[`5a2a757119`](https://github.com/nodejs/node/commit/5a2a757119)] - **doc**: add esm examples to node:timers (Alfredo González) [#55857](https://github.com/nodejs/node/pull/55857) +* \[[`f711a48e15`](https://github.com/nodejs/node/commit/f711a48e15)] - **doc**: fix relative path mention in --allow-fs (Rafael Gonzaga) [#55791](https://github.com/nodejs/node/pull/55791) +* \[[`219f5f2627`](https://github.com/nodejs/node/commit/219f5f2627)] - **doc**: include git node release --promote to steps (Rafael Gonzaga) [#55835](https://github.com/nodejs/node/pull/55835) +* \[[`f9d25ed3e4`](https://github.com/nodejs/node/commit/f9d25ed3e4)] - **doc**: add history entry for import assertion removal (Antoine du Hamel) [#55883](https://github.com/nodejs/node/pull/55883) +* \[[`efb9f05f59`](https://github.com/nodejs/node/commit/efb9f05f59)] - **(SEMVER-MINOR)** **doc,lib,src,test**: unflag sqlite module (Colin Ihrig) [#55890](https://github.com/nodejs/node/pull/55890) +* \[[`a37e5fe5f8`](https://github.com/nodejs/node/commit/a37e5fe5f8)] - **fs**: lazily load ReadFileContext (Gürgün Dayıoğlu) [#55998](https://github.com/nodejs/node/pull/55998) +* \[[`9289374248`](https://github.com/nodejs/node/commit/9289374248)] - **http2**: fix memory leak caused by premature listener removing (ywave620) [#55966](https://github.com/nodejs/node/pull/55966) +* \[[`49af1c33ac`](https://github.com/nodejs/node/commit/49af1c33ac)] - **lib**: add validation for options in compileFunction (Taejin Kim) [#56023](https://github.com/nodejs/node/pull/56023) +* \[[`8faf91846b`](https://github.com/nodejs/node/commit/8faf91846b)] - **lib**: fix `fs.readdir` recursive async (Rafael Gonzaga) [#56041](https://github.com/nodejs/node/pull/56041) +* \[[`a2382303d7`](https://github.com/nodejs/node/commit/a2382303d7)] - **lib**: refactor code to improve readability (Pietro Marchini) [#55995](https://github.com/nodejs/node/pull/55995) +* \[[`30f26ba254`](https://github.com/nodejs/node/commit/30f26ba254)] - **lib**: avoid excluding symlinks in recursive fs.readdir with filetypes (Juan José) [#55714](https://github.com/nodejs/node/pull/55714) +* \[[`9b272ae339`](https://github.com/nodejs/node/commit/9b272ae339)] - **meta**: bump github/codeql-action from 3.27.0 to 3.27.5 (dependabot\[bot]) [#56103](https://github.com/nodejs/node/pull/56103) +* \[[`fb0e6ca68b`](https://github.com/nodejs/node/commit/fb0e6ca68b)] - **meta**: bump actions/checkout from 4.1.7 to 4.2.2 (dependabot\[bot]) [#56102](https://github.com/nodejs/node/pull/56102) +* \[[`0ab611513c`](https://github.com/nodejs/node/commit/0ab611513c)] - **meta**: bump step-security/harden-runner from 2.10.1 to 2.10.2 (dependabot\[bot]) [#56101](https://github.com/nodejs/node/pull/56101) +* \[[`ff4839b8ab`](https://github.com/nodejs/node/commit/ff4839b8ab)] - **meta**: bump actions/setup-node from 4.0.3 to 4.1.0 (dependabot\[bot]) [#56100](https://github.com/nodejs/node/pull/56100) +* \[[`f262207356`](https://github.com/nodejs/node/commit/f262207356)] - **meta**: add releasers as CODEOWNERS to proposal action (Rafael Gonzaga) [#56043](https://github.com/nodejs/node/pull/56043) +* \[[`b6005b3fac`](https://github.com/nodejs/node/commit/b6005b3fac)] - **module**: mark evaluation rejection in require(esm) as handled (Joyee Cheung) [#56122](https://github.com/nodejs/node/pull/56122) +* \[[`b8ab5332a9`](https://github.com/nodejs/node/commit/b8ab5332a9)] - **module**: remove --experimental-default-type (Geoffrey Booth) [#56092](https://github.com/nodejs/node/pull/56092) +* \[[`4be5047030`](https://github.com/nodejs/node/commit/4be5047030)] - **module**: do not warn when require(esm) comes from node\_modules (Joyee Cheung) [#55960](https://github.com/nodejs/node/pull/55960) +* \[[`c9698ed6a4`](https://github.com/nodejs/node/commit/c9698ed6a4)] - **(SEMVER-MINOR)** **net**: support blocklist in net.connect (theanarkh) [#56075](https://github.com/nodejs/node/pull/56075) +* \[[`9fba5e1df1`](https://github.com/nodejs/node/commit/9fba5e1df1)] - **(SEMVER-MINOR)** **net**: add SocketAddress.parse (James M Snell) [#56076](https://github.com/nodejs/node/pull/56076) +* \[[`565b04a7be`](https://github.com/nodejs/node/commit/565b04a7be)] - **(SEMVER-MINOR)** **net**: add net.BlockList.isBlockList(value) (James M Snell) [#56078](https://github.com/nodejs/node/pull/56078) +* \[[`30d604180d`](https://github.com/nodejs/node/commit/30d604180d)] - **(SEMVER-MINOR)** **net**: support blocklist for net.Server (theanarkh) [#56079](https://github.com/nodejs/node/pull/56079) +* \[[`4cdb03201e`](https://github.com/nodejs/node/commit/4cdb03201e)] - **(SEMVER-MINOR)** **process**: deprecate `features.{ipv6,uv}` and `features.tls_*` (René) [#55545](https://github.com/nodejs/node/pull/55545) +* \[[`d09e57b26d`](https://github.com/nodejs/node/commit/d09e57b26d)] - **quic**: update more QUIC implementation (James M Snell) [#55986](https://github.com/nodejs/node/pull/55986) +* \[[`1fb30d6e86`](https://github.com/nodejs/node/commit/1fb30d6e86)] - **quic**: multiple updates to quic impl (James M Snell) [#55971](https://github.com/nodejs/node/pull/55971) +* \[[`9e4f7aa808`](https://github.com/nodejs/node/commit/9e4f7aa808)] - **sqlite**: deps include `sqlite3ext.h` (Alex Yang) [#56010](https://github.com/nodejs/node/pull/56010) +* \[[`d777d4a52d`](https://github.com/nodejs/node/commit/d777d4a52d)] - **(SEMVER-MINOR)** **sqlite**: add `StatementSync.prototype.iterate` method (tpoisseau) [#54213](https://github.com/nodejs/node/pull/54213) +* \[[`66451bb9ba`](https://github.com/nodejs/node/commit/66451bb9ba)] - **src**: use spaceship operator in SocketAddress (James M Snell) [#56059](https://github.com/nodejs/node/pull/56059) +* \[[`ad9ebe417a`](https://github.com/nodejs/node/commit/ad9ebe417a)] - **src**: add missing qualifiers to env.cc (Yagiz Nizipli) [#56062](https://github.com/nodejs/node/pull/56062) +* \[[`56c4da240d`](https://github.com/nodejs/node/commit/56c4da240d)] - **src**: use std::string\_view for process emit fns (Yagiz Nizipli) [#56086](https://github.com/nodejs/node/pull/56086) +* \[[`26ab8e9823`](https://github.com/nodejs/node/commit/26ab8e9823)] - **src**: remove dead code in async\_wrap (Gerhard Stöbich) [#56065](https://github.com/nodejs/node/pull/56065) +* \[[`4dea44e468`](https://github.com/nodejs/node/commit/4dea44e468)] - **src**: avoid copy on getV8FastApiCallCount (Yagiz Nizipli) [#56081](https://github.com/nodejs/node/pull/56081) +* \[[`b778a4fe46`](https://github.com/nodejs/node/commit/b778a4fe46)] - **src**: fix check fd (theanarkh) [#56000](https://github.com/nodejs/node/pull/56000) +* \[[`971f5f54df`](https://github.com/nodejs/node/commit/971f5f54df)] - **src**: safely remove the last line from dotenv (Shima Ryuhei) [#55982](https://github.com/nodejs/node/pull/55982) +* \[[`497a9aea1c`](https://github.com/nodejs/node/commit/497a9aea1c)] - **src**: fix kill signal on Windows (Hüseyin Açacak) [#55514](https://github.com/nodejs/node/pull/55514) +* \[[`8a935489f9`](https://github.com/nodejs/node/commit/8a935489f9)] - **src,build**: add no user defined deduction guides of CTAD check (Chengzhong Wu) [#56071](https://github.com/nodejs/node/pull/56071) +* \[[`5edb8d5919`](https://github.com/nodejs/node/commit/5edb8d5919)] - **test**: remove test-fs-utimes flaky designation (Luigi Pinca) [#56052](https://github.com/nodejs/node/pull/56052) +* \[[`046e642a80`](https://github.com/nodejs/node/commit/046e642a80)] - **test**: ensure `cli.md` is in alphabetical order (Antoine du Hamel) [#56025](https://github.com/nodejs/node/pull/56025) +* \[[`da354f46cd`](https://github.com/nodejs/node/commit/da354f46cd)] - **test**: update WPT for WebCryptoAPI to 3e3374efde (Node.js GitHub Bot) [#56093](https://github.com/nodejs/node/pull/56093) +* \[[`9486c7ce4c`](https://github.com/nodejs/node/commit/9486c7ce4c)] - **test**: update WPT for WebCryptoAPI to 76dfa54e5d (Node.js GitHub Bot) [#56093](https://github.com/nodejs/node/pull/56093) +* \[[`a8809fc0f5`](https://github.com/nodejs/node/commit/a8809fc0f5)] - **test**: move test-worker-arraybuffer-zerofill to parallel (Luigi Pinca) [#56053](https://github.com/nodejs/node/pull/56053) +* \[[`6194435b9e`](https://github.com/nodejs/node/commit/6194435b9e)] - **test**: update WPT for url to 67880a4eb83ca9aa732eec4b35a1971ff5bf37ff (Node.js GitHub Bot) [#55999](https://github.com/nodejs/node/pull/55999) +* \[[`f7567d46d8`](https://github.com/nodejs/node/commit/f7567d46d8)] - **test**: make HTTP/1.0 connection test more robust (Arne Keller) [#55959](https://github.com/nodejs/node/pull/55959) +* \[[`c157e026fc`](https://github.com/nodejs/node/commit/c157e026fc)] - **test**: convert readdir test to use test runner (Thomas Chetwin) [#55750](https://github.com/nodejs/node/pull/55750) +* \[[`29362ce673`](https://github.com/nodejs/node/commit/29362ce673)] - **test**: make x509 crypto tests work with BoringSSL (Shelley Vohr) [#55927](https://github.com/nodejs/node/pull/55927) +* \[[`493e16c852`](https://github.com/nodejs/node/commit/493e16c852)] - **test**: fix determining lower priority (Livia Medeiros) [#55908](https://github.com/nodejs/node/pull/55908) +* \[[`99858ceb9f`](https://github.com/nodejs/node/commit/99858ceb9f)] - **test,crypto**: update WebCryptoAPI WPT (Filip Skokan) [#55997](https://github.com/nodejs/node/pull/55997) +* \[[`7c3a4d4bcd`](https://github.com/nodejs/node/commit/7c3a4d4bcd)] - **test\_runner**: refactor Promise chain in run() (Colin Ihrig) [#55958](https://github.com/nodejs/node/pull/55958) +* \[[`95e8c4ef6c`](https://github.com/nodejs/node/commit/95e8c4ef6c)] - **test\_runner**: refactor build Promise in Suite() (Colin Ihrig) [#55958](https://github.com/nodejs/node/pull/55958) +* \[[`c048865199`](https://github.com/nodejs/node/commit/c048865199)] - **test\_runner**: simplify hook running logic (Colin Ihrig) [#55963](https://github.com/nodejs/node/pull/55963) +* \[[`8197815fe8`](https://github.com/nodejs/node/commit/8197815fe8)] - **test\_runner**: mark snapshot testing as stable (Colin Ihrig) [#55897](https://github.com/nodejs/node/pull/55897) +* \[[`8a5d8c7669`](https://github.com/nodejs/node/commit/8a5d8c7669)] - **test\_runner**: mark context.plan() as stable (Colin Ihrig) [#55895](https://github.com/nodejs/node/pull/55895) +* \[[`790a2ca3b7`](https://github.com/nodejs/node/commit/790a2ca3b7)] - **tools**: update `create-release-proposal` workflow (Antoine du Hamel) [#56054](https://github.com/nodejs/node/pull/56054) +* \[[`98ce4652e2`](https://github.com/nodejs/node/commit/98ce4652e2)] - **tools**: fix update-undici script (Michaël Zasso) [#56069](https://github.com/nodejs/node/pull/56069) +* \[[`d6a6c8ace1`](https://github.com/nodejs/node/commit/d6a6c8ace1)] - **tools**: allow dispatch of `tools.yml` from forks (Antoine du Hamel) [#56008](https://github.com/nodejs/node/pull/56008) +* \[[`cc96fce5eb`](https://github.com/nodejs/node/commit/cc96fce5eb)] - **tools**: fix nghttp3 updater script (Antoine du Hamel) [#56007](https://github.com/nodejs/node/pull/56007) +* \[[`2cd939cb95`](https://github.com/nodejs/node/commit/2cd939cb95)] - **tools**: filter release keys to reduce interactivity (Antoine du Hamel) [#55950](https://github.com/nodejs/node/pull/55950) +* \[[`4b3919f1be`](https://github.com/nodejs/node/commit/4b3919f1be)] - **tools**: update WPT updater (Antoine du Hamel) [#56003](https://github.com/nodejs/node/pull/56003) +* \[[`54c46b8464`](https://github.com/nodejs/node/commit/54c46b8464)] - **tools**: add WPT updater for specific subsystems (Mert Can Altin) [#54460](https://github.com/nodejs/node/pull/54460) +* \[[`32b1681b7f`](https://github.com/nodejs/node/commit/32b1681b7f)] - **tools**: use tokenless Codecov uploads (Michaël Zasso) [#55943](https://github.com/nodejs/node/pull/55943) +* \[[`475141e370`](https://github.com/nodejs/node/commit/475141e370)] - **tools**: add linter for release commit proposals (Antoine du Hamel) [#55923](https://github.com/nodejs/node/pull/55923) +* \[[`d093820f64`](https://github.com/nodejs/node/commit/d093820f64)] - **tools**: lint js in `doc/**/*.md` (Livia Medeiros) [#55904](https://github.com/nodejs/node/pull/55904) +* \[[`72eb710f0f`](https://github.com/nodejs/node/commit/72eb710f0f)] - **tools**: fix riscv64 build failed (Lu Yahan) [#52888](https://github.com/nodejs/node/pull/52888) +* \[[`882b70c83f`](https://github.com/nodejs/node/commit/882b70c83f)] - **tools**: bump cross-spawn from 7.0.3 to 7.0.5 in /tools/eslint (dependabot\[bot]) [#55894](https://github.com/nodejs/node/pull/55894) +* \[[`9eccd7dba9`](https://github.com/nodejs/node/commit/9eccd7dba9)] - **util**: add fast path for Latin1 decoding (Mert Can Altin) [#55275](https://github.com/nodejs/node/pull/55275) + ## 2024-11-20, Version 23.3.0 (Current), @RafaelGSS @@ -246,11 +428,11 @@ will now correctly change as the underlying `ArrayBuffer` size is changed. ```js const ab = new ArrayBuffer(10, { maxByteLength: 20 }); const buffer = Buffer.from(ab); -console.log(buffer.byteLength); 10 +console.log(buffer.byteLength); // 10 ab.resize(15); -console.log(buffer.byteLength); 15 +console.log(buffer.byteLength); // 15 ab.resize(5); -console.log(buffer.byteLength); 5 +console.log(buffer.byteLength); // 5 ``` Contributed by James M Snell in [#55377](https://github.com/nodejs/node/pull/55377). diff --git a/doc/contributing/advocacy-ambasador-program.md b/doc/contributing/advocacy-ambassador-program.md similarity index 77% rename from doc/contributing/advocacy-ambasador-program.md rename to doc/contributing/advocacy-ambassador-program.md index 3b518d092e1feb..cfb8c5cb1cd484 100644 --- a/doc/contributing/advocacy-ambasador-program.md +++ b/doc/contributing/advocacy-ambassador-program.md @@ -18,6 +18,10 @@ The ambassador program does that by: messages and topics defined. * Advocating for ambassadors to be part of the OpenJS speakers bureau, even if the ambassador is not otherwise an active member of the project itself. +* Each ambassador could add a maximum of three links to resources to learn Node.js + on a dedicated page on the main Node.js website. At least one of those must be a + free resource. The Node.js TSC members could ask for coupon codes to verify the + material if they so decide. ## Ambassadors nominations @@ -96,3 +100,33 @@ an ambassador. These requests can be made through the existing social channel in the OpenJS Slack. For that reason and for communication purposes and collaboration opportunities, ambassadors should be members of the [OpenJS Slack](https://slack-invite.openjsf.org/). + +## Messages and topics to promote + +### How to add messages or topics to promote + +Messages or topics that ambassadors are asked to promote are added to this +document in the [Current messages for promotion](#current-messages-for-promotion) +section through the standard PR process except that they should be open +for 7 days before landing and should include and at mention to the +nodejs/TSC for awareness. They should be removed through the same process +when no longer relevant. + +### Current messages for promotion + +#### Sample message (Leave this one at the top) + +##### Goal + +The goal is to raise awareness of XYZ in the JavaScript ecosystem. + +#### Related Links + +List of links with more information about the topic to provide brackground +or the information to be shared. + +#### Project contacts + +Add a list of GitHub handles for those within the project that +have volunteered to be contacated when necessary by ambassadors +to get more info about the message to be promoted. diff --git a/doc/contributing/gn-build.md b/doc/contributing/gn-build.md index 0981472805a4f8..9054ed5e7d41c4 100644 --- a/doc/contributing/gn-build.md +++ b/doc/contributing/gn-build.md @@ -28,27 +28,33 @@ Node.js contains following GN build files: Unlike GYP, the GN tool does not include any built-in rules for compiling a project, which means projects building with GN must provide their own build -configurations for things like how to invoke a C++ compiler. Chromium related -projects like V8 and skia choose to reuse Chromium's build configurations, and -V8's Node.js integration testing repository -([node-ci](https://chromium.googlesource.com/v8/node-ci/)) can be reused for -building Node.js. +configurations for things like how to invoke a C++ compiler. + +Chromium related projects like V8 and skia choose to reuse Chromium's build +configurations, and V8's Node.js integration testing repository +[`node-ci`][node-ci] can be reused for building Node.js. ### 1. Install `depot_tools` -The `depot_tools` is a set of tools used by Chromium related projects for -checking out code and managing dependencies, and since this guide is reusing the -infra of V8, it needs to be installed and added to `PATH`: +You'll need to install [`depot_tools`][depot-tools] the toolset +used for fetching Chromium and its dependencies. ```bash git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git export PATH=/path/to/depot_tools:$PATH ``` -You can also follow the [official tutorial of -`depot_tools`](https://commondatastorage.googleapis.com/chrome-infra-docs/flat/depot_tools/docs/html/depot_tools_tutorial.html). +You can ensure `depot_tools` is correctly added to your PATH by running +`which gn` and confirming that it returns `/path/to/depot_tools/gn`. + +**NOTE:** On Windows you'll also need to set the environment variable +`DEPOT_TOOLS_WIN_TOOLCHAIN=0`. To do so, open `Control Panel` → `System and +Security` → `System` → `Advanced system settings` and add a system variable +`DEPOT_TOOLS_WIN_TOOLCHAIN` with value `0`. This tells `depot_tools` to use +your locally installed version of Visual Studio (by default, `depot_tools` will +try to download a Google-internal version that only Googlers have access to). -### 2. Check out code of Node.js +### 2. Checkout Node.js Source Code To check out the latest main branch of Node.js for building, use the `fetch` tool from `depot_tools`: @@ -91,9 +97,9 @@ out at `node_gn/node/node`. ### 3. Build -GN only supports [`ninja`](https://ninja-build.org) for building, so to build -Node.js with GN, `ninja` build files should be generated first, and then -`ninja` can be invoked to do the building. +GN only supports [`ninja`](https://ninja-build.org) for building. To build +Node.js with GN you'll first need to generate `ninja` build files and then invoke +`ninja` to perform the build. The `node-ci` repository provides a script for calling GN: @@ -103,9 +109,10 @@ cd node # Enter `node_gn/node` which contains a node-ci checkout ``` which writes `ninja` build files into the `out/Release` directory under -`node_gn/node`. +`node_gn/node`. To see all possible configurable options, run +`tools/gn-gen.py --help`. -And then you can execute `ninja`: +When `gn-gen.py` has executed successfully, you can then execute `ninja`: ```bash ninja -C out/Release node @@ -116,10 +123,12 @@ After the build is completed, the compiled Node.js executable can be found in ## Status of the GN build -Currently the GN build of Node.js is not fully functioning. It builds for macOS -and Linux, while the Windows build is still a work in progress. And some tests -are still failing with the GN build. +Currently the GN build of Node.js is not fully functioning. Some tests +are still failing with the GN build, and there may be other small pitfall +for certain configuration options. + +An effort is currently underway to make GN build work without using `depot_tools`, +which is tracked in [#51689](https://github.com/nodejs/node/issues/51689). -There are also efforts on making GN build work without using `depot_tools`, -which is tracked in the issue -[#51689](https://github.com/nodejs/node/issues/51689). +[depot-tools]: https://commondatastorage.googleapis.com/chrome-infra-docs/flat/depot_tools/docs/html/depot_tools_tutorial.html#_setting_up +[node-ci]: https://chromium.googlesource.com/v8/node-ci diff --git a/doc/contributing/maintaining/maintaining-dependencies.md b/doc/contributing/maintaining/maintaining-dependencies.md index 8ebbd3f7bb8924..f5d9c69c41a664 100644 --- a/doc/contributing/maintaining/maintaining-dependencies.md +++ b/doc/contributing/maintaining/maintaining-dependencies.md @@ -145,6 +145,48 @@ can be added as a non-externalizable dependency. In this case simply add the path to the JavaScript file in the `deps_files` list in the `node.gyp` file. +## Common approach for dependencies with WASM components + +WASM components within dependencies are most often built +outside of the regular Node.js `make build` step. They also +require different tools. + +It is important that the tools and their versions used to build +WASM components shipped within Node.js are well documented and +be available if needed to rebuild/update older Node.js versions. + +In order to minimize the different number of tools and versions +used to build WASM components and to document and ensure future +availability, the project builds and maintains a common +[wasm-builder](https://github.com/nodejs/wasm-builder) container +that should be use to build WASM components in Node.js +dependencies. + +The container provides a durable copy of the versions of the tools +used for a specific build which are under the control of the Node.js +project. In addition, the tools and verions are documented through metadata +within the container in the `/home/node/metadata directory`. + +The available tools can be found by looking at the current version of the +[Dockerfile](https://github.com/nodejs/wasm-builder/blob/main/container-build-info/Dockerfile) +used to create the container. + +If additional WASM tool are needed beyond those available in the +container, additions should be PR'd into the wasm-builder container. + +Examples of using the container include: + +* [build/wasm.js](https://github.com/nodejs/undici/blob/main/build/wasm.js) from undici +* [tools/build-wasm.js](https://github.com/nodejs/amaro/blob/main/tools/build-wasm.js) from amaro + +In addition to using the container to build WASM components, the goal is also +for the WASM components and final files that are shipped with Node.js to be +built by the [dep-updaters](https://github.com/nodejs/node/tree/main/tools/dep_updaters) +that are run on a regular basis and that they use only the files available in the Node.js +repo for the dependency. For example, being able to rebuild the WASM and files that +we ship in Node.js using only the files in +[../deps/undici](https://github.com/nodejs/node/tree/main/deps/undici). + ## Updating dependencies Most dependencies are automatically updated by diff --git a/doc/contributing/releases.md b/doc/contributing/releases.md index 82eec2833998ad..055290263388b5 100644 --- a/doc/contributing/releases.md +++ b/doc/contributing/releases.md @@ -282,7 +282,15 @@ You can integrate the PRs into the proposal without running full CI. ### 2. Create a new branch for the release -⚠️ At this point, you can either run `git node release --prepare`: +> \[!TIP] Once the staging branch is up-to-date you can use the +> [`create-release-proposal`][] action to generate the proposal. + +```bash +gh workflow run "Create Release Proposal" -f release-line=N -f release-date=YYYY-MM-DD +``` + +If you prefer to run it locally you can either run +`git node release --prepare`: ```bash git node release -S --prepare x.y.z @@ -707,12 +715,23 @@ the build before moving forward. Use the following list as a baseline: ### 11. Tag and sign the release commit -Once you have produced builds that you're happy with, create a new tag. By -waiting until this stage to create tags, you can discard a proposed release if -something goes wrong or additional commits are required. Once you have created a -tag and pushed it to GitHub, you _**must not**_ delete and re-tag. If you make -a mistake after tagging then you'll have to version-bump and start again and -count that tag/version as lost. +Once you have produced builds that you're happy with you can either run +`git node release --promote` + +```bash +git node release -S --promote https://github.com/nodejs/node/pull/XXXX +``` + +to automate the remaining steps until step 16 or you can perform it manually +following the below steps. + +*** + +Create a new tag: By waiting until this stage to create tags, you can discard +a proposed release if something goes wrong or additional commits are required. +Once you have created a tag and pushed it to GitHub, you _**must not**_ delete +and re-tag. If you make a mistake after tagging then you'll have to version-bump +and start again and count that tag/version as lost. Tag summaries have a predictable format. Look at a recent tag to see: @@ -947,6 +966,13 @@ a `NODEJS_RELEASE_HOST` environment variable: NODEJS_RELEASE_HOST=proxy.xyz ./tools/release.sh ``` +In case `gpg` is unable to autoselect a key, you can retry using the +`-a` option to enable an interactive interface: + +```bash +./tools/release.sh -a +``` + > \[!TIP] > Sometimes, due to machines being overloaded or other external factors, > the files at , @@ -1372,11 +1398,48 @@ Infrastructure team is able to perform the switch of the default. An issue should be opened on the [Node.js Snap management repository][] requesting this take place once a new LTS line has been released. +## FAQ + +Due to how `tools/release.sh` work, it isn't uncommon to face some errors +during the promotion process as it depends on network communication and machine +availability. This section aims to guide the releaser through potential +failures. + +### Error on dist-indexer while promoting + +```bash +node:events:491 + throw er; // Unhandled 'error' event + ^ + +Error: read ECONNRESET + at TLSWrap.onStreamRead (node:internal/stream_base_commons:217:20) +Emitted 'error' event on DestroyableTransform instance at: + at ClientRequest. (/usr/lib/node_modules/nodejs-dist-indexer/node_modules/hyperquest/index.js:14:19) + at ClientRequest.emit (node:events:513:28) + at TLSSocket.socketErrorListener (node:_http_client:494:9) + at TLSSocket.emit (node:events:513:28) + at emitErrorNT (node:internal/streams/destroy:157:8) + at emitErrorCloseNT (node:internal/streams/destroy:122:3) + at processTicksAndRejections (node:internal/process/task_queues:83:21) { + errno: -104, + code: 'ECONNRESET', + syscall: 'read' +} +``` + +Typical resolution: sign the release again. + +```bash +./tools/release.sh -s vX.Y.Z +``` + [Build issue tracker]: https://github.com/nodejs/build/issues/new [CI lockdown procedure]: https://github.com/nodejs/build/blob/HEAD/doc/jenkins-guide.md#restricting-access-for-security-releases [Node.js Snap management repository]: https://github.com/nodejs/snap [Partner Communities]: https://github.com/nodejs/community-committee/blob/HEAD/governance/PARTNER_COMMUNITIES.md [Snap]: https://snapcraft.io/node +[`create-release-proposal`]: https://github.com/nodejs/node/actions/workflows/create-release-proposal.yml [build-infra team]: https://github.com/orgs/nodejs/teams/build-infra [expected assets]: https://github.com/nodejs/build/tree/HEAD/ansible/www-standalone/tools/promote/expected_assets [nodejs.org release-post.js script]: https://github.com/nodejs/nodejs.org/blob/HEAD/scripts/release-post/index.mjs diff --git a/doc/contributing/writing-and-running-benchmarks.md b/doc/contributing/writing-and-running-benchmarks.md index 3d16c7d14fc033..63fbf75c798833 100644 --- a/doc/contributing/writing-and-running-benchmarks.md +++ b/doc/contributing/writing-and-running-benchmarks.md @@ -538,7 +538,7 @@ The arguments of `createBenchmark` are: source: ['buffer', 'string'], len: [2048], n: [50, 2048], - } + }, }, { byGroups: true }); ``` diff --git a/doc/node.1 b/doc/node.1 index 3d92fcd7858b98..e38ce7f0431e62 100644 --- a/doc/node.1 +++ b/doc/node.1 @@ -163,11 +163,6 @@ Enable Source Map V3 support for stack traces. .It Fl -entry-url Interpret the entry point as a URL. . -.It Fl -experimental-default-type Ns = Ns Ar type -Interpret as either ES modules or CommonJS modules input via --eval or STDIN, when --input-type is unspecified; -.js or extensionless files with no sibling or parent package.json; -.js or extensionless files whose nearest parent package.json lacks a "type" field, unless under node_modules. -. .It Fl -experimental-import-meta-resolve Enable experimental ES modules support for import.meta.resolve(). . @@ -182,9 +177,6 @@ Enable the experimental permission model. .It Fl -experimental-shadow-realm Use this flag to enable ShadowRealm support. . -.It Fl -experimental-sqlite -Enable the experimental node:sqlite module. -. .It Fl -experimental-test-coverage Enable code coverage in the test runner. . @@ -194,9 +186,6 @@ Configures the type of test isolation used in the test runner. .It Fl -experimental-test-module-mocks Enable module mocking in the test runner. . -.It Fl -experimental-test-snapshots -Enable snapshot testing in the test runner. -. .It Fl -experimental-strip-types Enable experimental type-stripping for TypeScript files. . @@ -215,6 +204,9 @@ Enable experimental support for the Web Storage API. .It Fl -no-experimental-repl-await Disable top-level await keyword support in REPL. . +.It Fl -no-experimental-sqlite +Disable the experimental node:sqlite module. +. .It Fl -experimental-vm-modules Enable experimental ES module support in VM module. . diff --git a/eslint.config.mjs b/eslint.config.mjs index f37774bace871d..8e574a312d4a3e 100644 --- a/eslint.config.mjs +++ b/eslint.config.mjs @@ -43,9 +43,7 @@ export default [ '**/node_modules/**', 'benchmark/fixtures/**', 'benchmark/tmp/**', - 'doc/**/*.js', 'doc/changelogs/CHANGELOG_V1*.md', - '!doc/api_assets/*.js', '!doc/changelogs/CHANGELOG_V18.md', 'lib/punycode.js', 'test/.tmp.*/**', diff --git a/lib/assert.js b/lib/assert.js index 2a65457b39387e..3e212a1c3aebbe 100644 --- a/lib/assert.js +++ b/lib/assert.js @@ -21,22 +21,35 @@ 'use strict'; const { + ArrayFrom, + ArrayIsArray, ArrayPrototypeIndexOf, ArrayPrototypeJoin, ArrayPrototypePush, ArrayPrototypeSlice, Error, + FunctionPrototypeCall, + MapPrototypeDelete, + MapPrototypeGet, + MapPrototypeHas, + MapPrototypeSet, NumberIsNaN, ObjectAssign, ObjectIs, ObjectKeys, ObjectPrototypeIsPrototypeOf, ReflectApply, + ReflectHas, + ReflectOwnKeys, RegExpPrototypeExec, + SafeMap, + SafeSet, + SafeWeakSet, String, StringPrototypeIndexOf, StringPrototypeSlice, StringPrototypeSplit, + SymbolIterator, } = primordials; const { @@ -50,8 +63,18 @@ const { } = require('internal/errors'); const AssertionError = require('internal/assert/assertion_error'); const { inspect } = require('internal/util/inspect'); -const { isPromise, isRegExp } = require('internal/util/types'); -const { isError, deprecate } = require('internal/util'); +const { Buffer } = require('buffer'); +const { + isKeyObject, + isPromise, + isRegExp, + isMap, + isSet, + isDate, + isWeakSet, + isWeakMap, +} = require('internal/util/types'); +const { isError, deprecate, emitExperimentalWarning } = require('internal/util'); const { innerOk } = require('internal/assert/utils'); const CallTracker = require('internal/assert/calltracker'); @@ -341,6 +364,183 @@ assert.notStrictEqual = function notStrictEqual(actual, expected, message) { } }; +function isSpecial(obj) { + return obj == null || typeof obj !== 'object' || isError(obj) || isRegExp(obj) || isDate(obj); +} + +const typesToCallDeepStrictEqualWith = [ + isKeyObject, isWeakSet, isWeakMap, Buffer.isBuffer, +]; + +/** + * Compares two objects or values recursively to check if they are equal. + * @param {any} actual - The actual value to compare. + * @param {any} expected - The expected value to compare. + * @param {Set} [comparedObjects=new Set()] - Set to track compared objects for handling circular references. + * @returns {boolean} - Returns `true` if the actual value matches the expected value, otherwise `false`. + * @example + * compareBranch({a: 1, b: 2, c: 3}, {a: 1, b: 2}); // true + */ +function compareBranch( + actual, + expected, + comparedObjects, +) { + // Check for Map object equality + if (isMap(actual) && isMap(expected)) { + if (actual.size !== expected.size) { + return false; + } + const safeIterator = FunctionPrototypeCall(SafeMap.prototype[SymbolIterator], actual); + + comparedObjects ??= new SafeWeakSet(); + + for (const { 0: key, 1: val } of safeIterator) { + if (!MapPrototypeHas(expected, key)) { + return false; + } + if (!compareBranch(val, MapPrototypeGet(expected, key), comparedObjects)) { + return false; + } + } + return true; + } + + for (const type of typesToCallDeepStrictEqualWith) { + if (type(actual) || type(expected)) { + if (isDeepStrictEqual === undefined) lazyLoadComparison(); + return isDeepStrictEqual(actual, expected); + } + } + + // Check for Set object equality + if (isSet(actual) && isSet(expected)) { + if (expected.size > actual.size) { + return false; // `expected` can't be a subset if it has more elements + } + + if (isDeepEqual === undefined) lazyLoadComparison(); + + const actualArray = ArrayFrom(FunctionPrototypeCall(SafeSet.prototype[SymbolIterator], actual)); + const expectedIterator = FunctionPrototypeCall(SafeSet.prototype[SymbolIterator], expected); + const usedIndices = new SafeSet(); + + expectedIteration: for (const expectedItem of expectedIterator) { + for (let actualIdx = 0; actualIdx < actualArray.length; actualIdx++) { + if (!usedIndices.has(actualIdx) && isDeepStrictEqual(actualArray[actualIdx], expectedItem)) { + usedIndices.add(actualIdx); + continue expectedIteration; + } + } + return false; + } + + return true; + } + + // Check if expected array is a subset of actual array + if (ArrayIsArray(actual) && ArrayIsArray(expected)) { + if (expected.length > actual.length) { + return false; + } + + if (isDeepEqual === undefined) lazyLoadComparison(); + + // Create a map to count occurrences of each element in the expected array + const expectedCounts = new SafeMap(); + for (const expectedItem of expected) { + let found = false; + for (const { 0: key, 1: count } of expectedCounts) { + if (isDeepStrictEqual(key, expectedItem)) { + MapPrototypeSet(expectedCounts, key, count + 1); + found = true; + break; + } + } + if (!found) { + MapPrototypeSet(expectedCounts, expectedItem, 1); + } + } + + // Create a map to count occurrences of relevant elements in the actual array + for (const actualItem of actual) { + for (const { 0: key, 1: count } of expectedCounts) { + if (isDeepStrictEqual(key, actualItem)) { + if (count === 1) { + MapPrototypeDelete(expectedCounts, key); + } else { + MapPrototypeSet(expectedCounts, key, count - 1); + } + break; + } + } + } + + return !expectedCounts.size; + } + + // Comparison done when at least one of the values is not an object + if (isSpecial(actual) || isSpecial(expected)) { + if (isDeepEqual === undefined) { + lazyLoadComparison(); + } + return isDeepStrictEqual(actual, expected); + } + + // Use Reflect.ownKeys() instead of Object.keys() to include symbol properties + const keysExpected = ReflectOwnKeys(expected); + + comparedObjects ??= new SafeWeakSet(); + + // Handle circular references + if (comparedObjects.has(actual)) { + return true; + } + comparedObjects.add(actual); + + // Check if all expected keys and values match + for (let i = 0; i < keysExpected.length; i++) { + const key = keysExpected[i]; + assert( + ReflectHas(actual, key), + new AssertionError({ message: `Expected key ${String(key)} not found in actual object` }), + ); + if (!compareBranch(actual[key], expected[key], comparedObjects)) { + return false; + } + } + + return true; +} + +/** + * The strict equivalence assertion test between two objects + * @param {any} actual + * @param {any} expected + * @param {string | Error} [message] + * @returns {void} + */ +assert.partialDeepStrictEqual = function partialDeepStrictEqual( + actual, + expected, + message, +) { + emitExperimentalWarning('assert.partialDeepStrictEqual'); + if (arguments.length < 2) { + throw new ERR_MISSING_ARGS('actual', 'expected'); + } + + if (!compareBranch(actual, expected)) { + innerFail({ + actual, + expected, + message, + operator: 'partialDeepStrictEqual', + stackStartFn: partialDeepStrictEqual, + }); + } +}; + class Comparison { constructor(obj, keys, actual) { for (const key of keys) { diff --git a/lib/dgram.js b/lib/dgram.js index 74dd83889990ca..09630b6c901181 100644 --- a/lib/dgram.js +++ b/lib/dgram.js @@ -61,6 +61,7 @@ const { validateString, validateNumber, validatePort, + validateUint32, } = require('internal/validators'); const { Buffer } = require('buffer'); const { deprecate, guessHandleType, promisify } = require('internal/util'); @@ -110,6 +111,12 @@ function Socket(type, listener) { options = type; type = options.type; lookup = options.lookup; + if (options.recvBufferSize) { + validateUint32(options.recvBufferSize, 'options.recvBufferSize'); + } + if (options.sendBufferSize) { + validateUint32(options.sendBufferSize, 'options.sendBufferSize'); + } recvBufferSize = options.recvBufferSize; sendBufferSize = options.sendBufferSize; } diff --git a/lib/fs.js b/lib/fs.js index 6a7d2cde463597..dd256566d268f3 100644 --- a/lib/fs.js +++ b/lib/fs.js @@ -163,6 +163,7 @@ let ReadStream; let WriteStream; let rimraf; let kResistStopPropagation; +let ReadFileContext; // These have to be separate because of how graceful-fs happens to do it's // monkeypatching. @@ -364,7 +365,7 @@ function readFile(path, options, callback) { callback ||= options; validateFunction(callback, 'cb'); options = getOptions(options, { flag: 'r' }); - const ReadFileContext = require('internal/fs/read/context'); + ReadFileContext ??= require('internal/fs/read/context'); const context = new ReadFileContext(callback, options.encoding); context.isUserFd = isFd(path); // File descriptor ownership @@ -1368,6 +1369,102 @@ function mkdirSync(path, options) { } } +/* + * An recursive algorithm for reading the entire contents of the `basePath` directory. + * This function does not validate `basePath` as a directory. It is passed directly to + * `binding.readdir`. + * @param {string} basePath + * @param {{ encoding: string, withFileTypes: boolean }} options + * @param {( + * err?: Error, + * files?: string[] | Buffer[] | Dirent[] + * ) => any} callback + * @returns {void} +*/ +function readdirRecursive(basePath, options, callback) { + const context = { + withFileTypes: Boolean(options.withFileTypes), + encoding: options.encoding, + basePath, + readdirResults: [], + pathsQueue: [basePath], + }; + + let i = 0; + + function read(path) { + const req = new FSReqCallback(); + req.oncomplete = (err, result) => { + if (err) { + callback(err); + return; + } + + if (result === undefined) { + callback(null, context.readdirResults); + return; + } + + processReaddirResult({ + result, + currentPath: path, + context, + }); + + if (i < context.pathsQueue.length) { + read(context.pathsQueue[i++]); + } else { + callback(null, context.readdirResults); + } + }; + + binding.readdir( + path, + context.encoding, + context.withFileTypes, + req, + ); + } + + read(context.pathsQueue[i++]); +} + +// Calling `readdir` with `withFileTypes=true`, the result is an array of arrays. +// The first array is the names, and the second array is the types. +// They are guaranteed to be the same length; hence, setting `length` to the length +// of the first array within the result. +const processReaddirResult = (args) => (args.context.withFileTypes ? handleDirents(args) : handleFilePaths(args)); + +function handleDirents({ result, currentPath, context }) { + const { 0: names, 1: types } = result; + const { length } = names; + + for (let i = 0; i < length; i++) { + // Avoid excluding symlinks, as they are not directories. + // Refs: https://github.com/nodejs/node/issues/52663 + const fullPath = pathModule.join(currentPath, names[i]); + const dirent = getDirent(currentPath, names[i], types[i]); + ArrayPrototypePush(context.readdirResults, dirent); + + if (dirent.isDirectory() || binding.internalModuleStat(binding, fullPath) === 1) { + ArrayPrototypePush(context.pathsQueue, fullPath); + } + } +} + +function handleFilePaths({ result, currentPath, context }) { + for (let i = 0; i < result.length; i++) { + const resultPath = pathModule.join(currentPath, result[i]); + const relativeResultPath = pathModule.relative(context.basePath, resultPath); + const stat = binding.internalModuleStat(binding, resultPath); + ArrayPrototypePush(context.readdirResults, relativeResultPath); + + if (stat === 1) { + ArrayPrototypePush(context.pathsQueue, resultPath); + } + } +} + /** * An iterative algorithm for reading the entire contents of the `basePath` directory. * This function does not validate `basePath` as a directory. It is passed directly to @@ -1377,55 +1474,37 @@ function mkdirSync(path, options) { * @returns {string[] | Dirent[]} */ function readdirSyncRecursive(basePath, options) { - const withFileTypes = Boolean(options.withFileTypes); - const encoding = options.encoding; - - const readdirResults = []; - const pathsQueue = [basePath]; + const context = { + withFileTypes: Boolean(options.withFileTypes), + encoding: options.encoding, + basePath, + readdirResults: [], + pathsQueue: [basePath], + }; function read(path) { const readdirResult = binding.readdir( path, - encoding, - withFileTypes, + context.encoding, + context.withFileTypes, ); if (readdirResult === undefined) { return; } - if (withFileTypes) { - // Calling `readdir` with `withFileTypes=true`, the result is an array of arrays. - // The first array is the names, and the second array is the types. - // They are guaranteed to be the same length; hence, setting `length` to the length - // of the first array within the result. - const length = readdirResult[0].length; - for (let i = 0; i < length; i++) { - const dirent = getDirent(path, readdirResult[0][i], readdirResult[1][i]); - ArrayPrototypePush(readdirResults, dirent); - if (dirent.isDirectory()) { - ArrayPrototypePush(pathsQueue, pathModule.join(dirent.parentPath, dirent.name)); - } - } - } else { - for (let i = 0; i < readdirResult.length; i++) { - const resultPath = pathModule.join(path, readdirResult[i]); - const relativeResultPath = pathModule.relative(basePath, resultPath); - const stat = binding.internalModuleStat(binding, resultPath); - ArrayPrototypePush(readdirResults, relativeResultPath); - // 1 indicates directory - if (stat === 1) { - ArrayPrototypePush(pathsQueue, resultPath); - } - } - } + processReaddirResult({ + result: readdirResult, + currentPath: path, + context, + }); } - for (let i = 0; i < pathsQueue.length; i++) { - read(pathsQueue[i]); + for (let i = 0; i < context.pathsQueue.length; i++) { + read(context.pathsQueue[i]); } - return readdirResults; + return context.readdirResults; } /** @@ -1451,7 +1530,7 @@ function readdir(path, options, callback) { } if (options.recursive) { - callback(null, readdirSyncRecursive(path, options)); + readdirRecursive(path, options, callback); return; } diff --git a/lib/internal/blocklist.js b/lib/internal/blocklist.js index d4eb35c9a70cb8..a992a6b8f0703c 100644 --- a/lib/internal/blocklist.js +++ b/lib/internal/blocklist.js @@ -43,6 +43,15 @@ class BlockList { this[kHandle][owner_symbol] = this; } + /** + * Returns true if the value is a BlockList + * @param {any} value + * @returns {boolean} + */ + static isBlockList(value) { + return value?.[kHandle] !== undefined; + } + [kInspect](depth, options) { if (depth < 0) return this; diff --git a/lib/internal/crypto/diffiehellman.js b/lib/internal/crypto/diffiehellman.js index 410759414f635d..9ebd9766f6bc8c 100644 --- a/lib/internal/crypto/diffiehellman.js +++ b/lib/internal/crypto/diffiehellman.js @@ -5,6 +5,7 @@ const { MathCeil, ObjectDefineProperty, SafeSet, + Uint8Array, } = primordials; const { Buffer } = require('buffer'); @@ -295,6 +296,8 @@ function diffieHellman(options) { return statelessDH(privateKey[kHandle], publicKey[kHandle]); } +let masks; + // The ecdhDeriveBits function is part of the Web Crypto API and serves both // deriveKeys and deriveBits functions. async function ecdhDeriveBits(algorithm, baseKey, length) { @@ -341,18 +344,25 @@ async function ecdhDeriveBits(algorithm, baseKey, length) { // If the length is not a multiple of 8 the nearest ceiled // multiple of 8 is sliced. - length = MathCeil(length / 8); - const { byteLength } = bits; + const sliceLength = MathCeil(length / 8); + const { byteLength } = bits; // If the length is larger than the derived secret, throw. - // Otherwise, we either return the secret or a truncated - // slice. - if (byteLength < length) + if (byteLength < sliceLength) throw lazyDOMException('derived bit length is too small', 'OperationError'); - return length === byteLength ? - bits : - ArrayBufferPrototypeSlice(bits, 0, length); + const slice = ArrayBufferPrototypeSlice(bits, 0, sliceLength); + + const mod = length % 8; + if (mod === 0) + return slice; + + // eslint-disable-next-line no-sparse-arrays + masks ||= [, 0b10000000, 0b11000000, 0b11100000, 0b11110000, 0b11111000, 0b11111100, 0b11111110]; + + const masked = new Uint8Array(slice); + masked[sliceLength - 1] = masked[sliceLength - 1] & masks[mod]; + return masked.buffer; } module.exports = { diff --git a/lib/internal/crypto/keys.js b/lib/internal/crypto/keys.js index 64f8bedc48281b..96771f09c8c941 100644 --- a/lib/internal/crypto/keys.js +++ b/lib/internal/crypto/keys.js @@ -1,7 +1,6 @@ 'use strict'; const { - ArrayFrom, ArrayPrototypeSlice, ObjectDefineProperties, ObjectDefineProperty, @@ -781,7 +780,7 @@ class CryptoKey { get usages() { if (!(this instanceof CryptoKey)) throw new ERR_INVALID_THIS('CryptoKey'); - return ArrayFrom(this[kKeyUsages]); + return this[kKeyUsages]; } } diff --git a/lib/internal/encoding.js b/lib/internal/encoding.js index 252eaa75fac22b..b2ca3c612bf6ef 100644 --- a/lib/internal/encoding.js +++ b/lib/internal/encoding.js @@ -29,6 +29,7 @@ const kDecoder = Symbol('decoder'); const kEncoder = Symbol('encoder'); const kFatal = Symbol('kFatal'); const kUTF8FastPath = Symbol('kUTF8FastPath'); +const kLatin1FastPath = Symbol('kLatin1FastPath'); const kIgnoreBOM = Symbol('kIgnoreBOM'); const { @@ -55,6 +56,7 @@ const { encodeIntoResults, encodeUtf8String, decodeUTF8, + decodeLatin1, } = binding; const { Buffer } = require('buffer'); @@ -419,9 +421,10 @@ function makeTextDecoderICU() { this[kFatal] = Boolean(options?.fatal); // Only support fast path for UTF-8. this[kUTF8FastPath] = enc === 'utf-8'; + this[kLatin1FastPath] = enc === 'windows-1252'; this[kHandle] = undefined; - if (!this[kUTF8FastPath]) { + if (!this[kUTF8FastPath] && !this[kLatin1FastPath]) { this.#prepareConverter(); } } @@ -438,11 +441,16 @@ function makeTextDecoderICU() { validateDecoder(this); this[kUTF8FastPath] &&= !(options?.stream); + this[kLatin1FastPath] &&= !(options?.stream); if (this[kUTF8FastPath]) { return decodeUTF8(input, this[kIgnoreBOM], this[kFatal]); } + if (this[kLatin1FastPath]) { + return decodeLatin1(input, this[kIgnoreBOM], this[kFatal]); + } + this.#prepareConverter(); validateObject(options, 'options', kValidateObjectAllowObjectsAndNull); diff --git a/lib/internal/errors.js b/lib/internal/errors.js index 9045046c100308..d990f8d5a106aa 100644 --- a/lib/internal/errors.js +++ b/lib/internal/errors.js @@ -1551,6 +1551,9 @@ E('ERR_IPC_CHANNEL_CLOSED', 'Channel closed', Error); E('ERR_IPC_DISCONNECTED', 'IPC channel is already disconnected', Error); E('ERR_IPC_ONE_PIPE', 'Child process can have only one IPC pipe', Error); E('ERR_IPC_SYNC_FORK', 'IPC cannot be used with synchronous forks', Error); +E('ERR_IP_BLOCKED', function(ip) { + return `IP(${ip}) is blocked by net.BlockList`; +}, Error); E( 'ERR_LOADER_CHAIN_INCOMPLETE', '"%s" did not call the next hook in its chain and did not' + @@ -1647,9 +1650,12 @@ E('ERR_PARSE_ARGS_UNKNOWN_OPTION', (option, allowPositionals) => { E('ERR_PERFORMANCE_INVALID_TIMESTAMP', '%d is not a valid timestamp', TypeError); E('ERR_PERFORMANCE_MEASURE_INVALID_OPTIONS', '%s', TypeError); +E('ERR_QUIC_APPLICATION_ERROR', 'A QUIC application error occurred. %d [%s]', Error); E('ERR_QUIC_CONNECTION_FAILED', 'QUIC connection failed', Error); E('ERR_QUIC_ENDPOINT_CLOSED', 'QUIC endpoint closed: %s (%d)', Error); E('ERR_QUIC_OPEN_STREAM_FAILED', 'Failed to open QUIC stream', Error); +E('ERR_QUIC_TRANSPORT_ERROR', 'A QUIC transport error occurred. %d [%s]', Error); +E('ERR_QUIC_VERSION_NEGOTIATION_ERROR', 'The QUIC session requires version negotiation', Error); E('ERR_REQUIRE_ASYNC_MODULE', 'require() cannot be used on an ESM ' + 'graph with top-level await. Use import() instead. To see where the' + ' top-level await comes from, use --experimental-print-required-tla.', Error); diff --git a/lib/internal/main/check_syntax.js b/lib/internal/main/check_syntax.js index aa521dea92e314..16e367c4e089f6 100644 --- a/lib/internal/main/check_syntax.js +++ b/lib/internal/main/check_syntax.js @@ -59,9 +59,7 @@ function loadESMIfNeeded(cb) { async function checkSyntax(source, filename) { let format; if (filename === '[stdin]' || filename === '[eval]') { - format = (getOptionValue('--input-type') === 'module' || - (getOptionValue('--experimental-default-type') === 'module' && getOptionValue('--input-type') !== 'commonjs')) ? - 'module' : 'commonjs'; + format = (getOptionValue('--input-type') === 'module') ? 'module' : 'commonjs'; } else { const { defaultResolve } = require('internal/modules/esm/resolve'); const { defaultGetFormat } = require('internal/modules/esm/get_format'); diff --git a/lib/internal/main/eval_stdin.js b/lib/internal/main/eval_stdin.js index 3ee4bcdb1d853b..2fd685dd6afcfa 100644 --- a/lib/internal/main/eval_stdin.js +++ b/lib/internal/main/eval_stdin.js @@ -25,8 +25,7 @@ readStdin((code) => { const print = getOptionValue('--print'); const shouldLoadESM = getOptionValue('--import').length > 0; - if (getOptionValue('--input-type') === 'module' || - (getOptionValue('--experimental-default-type') === 'module' && getOptionValue('--input-type') !== 'commonjs')) { + if (getOptionValue('--input-type') === 'module') { evalModuleEntryPoint(code, print); } else { evalScript('[stdin]', diff --git a/lib/internal/main/eval_string.js b/lib/internal/main/eval_string.js index 8027bc7e09b269..6518c93430a28e 100644 --- a/lib/internal/main/eval_string.js +++ b/lib/internal/main/eval_string.js @@ -29,8 +29,7 @@ const source = getOptionValue('--experimental-strip-types') ? const print = getOptionValue('--print'); const shouldLoadESM = getOptionValue('--import').length > 0 || getOptionValue('--experimental-loader').length > 0; -if (getOptionValue('--input-type') === 'module' || - (getOptionValue('--experimental-default-type') === 'module' && getOptionValue('--input-type') !== 'commonjs')) { +if (getOptionValue('--input-type') === 'module') { evalModuleEntryPoint(source, print); } else { // For backward compatibility, we want the identifier crypto to be the diff --git a/lib/internal/main/run_main_module.js b/lib/internal/main/run_main_module.js index 9b79410bdad88d..0089a62c61592d 100644 --- a/lib/internal/main/run_main_module.js +++ b/lib/internal/main/run_main_module.js @@ -23,15 +23,11 @@ if (isEntryURL) { emitExperimentalWarning('--entry-url'); } -if (getOptionValue('--experimental-default-type') === 'module') { - require('internal/modules/run_main').executeUserEntryPoint(mainEntry); -} else { - /** - * To support legacy monkey-patching of `Module.runMain`, we call `runMain` here to have the CommonJS loader begin - * the execution of the main entry point, even if the ESM loader immediately takes over because the main entry is an - * ES module or one of the other opt-in conditions (such as the use of `--import`) are met. Users can monkey-patch - * before the main entry point is loaded by doing so via scripts loaded through `--require`. This monkey-patchability - * is undesirable and is removed in `--experimental-default-type=module` mode. - */ - require('internal/modules/cjs/loader').Module.runMain(mainEntry); -} +/** + * To support legacy monkey-patching of `Module.runMain`, we call `runMain` here to have the CommonJS loader begin the + * execution of the main entry point, even if the ESM loader immediately takes over because the main entry is an ES + * module or one of the other opt-in conditions (such as the use of `--import`) are met. Users can monkey-patch before + * the main entry point is loaded by doing so via scripts loaded through `--require`. This monkey-patchability is + * undesirable and should be removed once the module customization hooks provide equivalent functionality. + */ +require('internal/modules/cjs/loader').Module.runMain(mainEntry); diff --git a/lib/internal/modules/cjs/loader.js b/lib/internal/modules/cjs/loader.js index 95763de0701581..f99d0fc2a7a0eb 100644 --- a/lib/internal/modules/cjs/loader.js +++ b/lib/internal/modules/cjs/loader.js @@ -70,6 +70,7 @@ const { module_export_private_symbol, module_parent_private_symbol, }, + isInsideNodeModules, } = internalBinding('util'); const { kEvaluated, createRequiredModuleFacade } = internalBinding('module_wrap'); @@ -131,6 +132,7 @@ const { setOwnProperty, getLazy, isWindows, + isUnderNodeModules, } = require('internal/util'); const { makeContextifyScript, @@ -1328,6 +1330,7 @@ Module.prototype.require = function(id) { } }; +let emittedRequireModuleWarning = false; /** * Resolve and evaluate it synchronously as ESM if it's ESM. * @param {Module} mod CJS module instance @@ -1347,29 +1350,46 @@ function loadESMFromCJS(mod, filename, format, source) { setOwnProperty(process, 'mainModule', undefined); } else { const parent = mod[kModuleParent]; - let messagePrefix; - if (parent) { - // In the case of the module calling `require()`, it's more useful to know its absolute path. - let from = parent.filename || parent.id; - // In the case of the module being require()d, it's more useful to know the id passed into require(). - const to = mod.id || mod.filename; - if (from === 'internal/preload') { - from = '--require'; - } else if (from === '') { - from = 'The REPL'; - } else if (from === '.') { - from = 'The entry point'; - } else { - from &&= `CommonJS module ${from}`; + + if (!emittedRequireModuleWarning) { + let shouldEmitWarning = false; + // Check if the require() comes from node_modules. + if (parent) { + shouldEmitWarning = !isUnderNodeModules(parent.filename); + } else if (mod[kIsCachedByESMLoader]) { + // It comes from the require() built for `import cjs` and doesn't have a parent recorded + // in the CJS module instance. Inspect the stack trace to see if the require() + // comes from node_modules and reduce the noise. If there are more than 100 frames, + // just give up and assume it is under node_modules. + shouldEmitWarning = !isInsideNodeModules(100, true); } - if (from && to) { - messagePrefix = `${from} is loading ES Module ${to} using require().\n`; + if (shouldEmitWarning) { + let messagePrefix; + if (parent) { + // In the case of the module calling `require()`, it's more useful to know its absolute path. + let from = parent.filename || parent.id; + // In the case of the module being require()d, it's more useful to know the id passed into require(). + const to = mod.id || mod.filename; + if (from === 'internal/preload') { + from = '--require'; + } else if (from === '') { + from = 'The REPL'; + } else if (from === '.') { + from = 'The entry point'; + } else { + from &&= `CommonJS module ${from}`; + } + if (from && to) { + messagePrefix = `${from} is loading ES Module ${to} using require().\n`; + } + } + emitExperimentalWarning('Support for loading ES Module in require()', + messagePrefix, + undefined, + parent?.require); + emittedRequireModuleWarning = true; } } - emitExperimentalWarning('Support for loading ES Module in require()', - messagePrefix, - undefined, - parent?.require); const { wrap, namespace, diff --git a/lib/internal/modules/esm/formats.js b/lib/internal/modules/esm/formats.js index 608b69caf3fe49..1ae4491b6fd2f6 100644 --- a/lib/internal/modules/esm/formats.js +++ b/lib/internal/modules/esm/formats.js @@ -46,8 +46,8 @@ function mimeToFormat(mime) { } /** - * For extensionless files in a `module` package scope, or a default `module` scope enabled by the - * `--experimental-default-type` flag, we check the file contents to disambiguate between ES module JavaScript and Wasm. + * For extensionless files in a `module` package scope, we check the file contents to disambiguate between ES module + * JavaScript and Wasm. * We do this by taking advantage of the fact that all Wasm files start with the header `0x00 0x61 0x73 0x6d` (`_asm`). * @param {URL} url */ diff --git a/lib/internal/modules/esm/get_format.js b/lib/internal/modules/esm/get_format.js index 9519f947b8dfdc..9fc5e67969abcf 100644 --- a/lib/internal/modules/esm/get_format.js +++ b/lib/internal/modules/esm/get_format.js @@ -119,29 +119,16 @@ function getFileProtocolModuleFormat(url, context = { __proto__: null }, ignoreE } // The controlling `package.json` file has no `type` field. - switch (getOptionValue('--experimental-default-type')) { - case 'module': { // The user explicitly passed `--experimental-default-type=module`. - // An exception to the type flag making ESM the default everywhere is that package scopes under `node_modules` - // should retain the assumption that a lack of a `type` field means CommonJS. - return underNodeModules(url) ? 'commonjs' : 'module'; - } - case 'commonjs': { // The user explicitly passed `--experimental-default-type=commonjs`. - return 'commonjs'; - } - default: { // The user did not pass `--experimental-default-type`. - // `source` is undefined when this is called from `defaultResolve`; - // but this gets called again from `defaultLoad`/`defaultLoadSync`. - // For ambiguous files (no type field, .js extension) we return - // undefined from `resolve` and re-run the check in `load`. - const format = detectModuleFormat(source, url); - if (format === 'module' && foundPackageJson) { - // This module has a .js extension, a package.json with no `type` field, and ESM syntax. - // Warn about the missing `type` field so that the user can avoid the performance penalty of detection. - warnTypelessPackageJsonFile(pjsonPath, url); - } - return format; - } + // `source` is undefined when this is called from `defaultResolve`; + // but this gets called again from `defaultLoad`/`defaultLoadSync`. + // For ambiguous files (.js, no type field) we return undefined from `resolve` and re-run the check in `load`. + const format = detectModuleFormat(source, url); + if (format === 'module' && foundPackageJson) { + // This module has a .js extension, a package.json with no `type` field, and ESM syntax. + // Warn about the missing `type` field so that the user can avoid the performance penalty of detection. + warnTypelessPackageJsonFile(pjsonPath, url); } + return format; } if (ext === '.ts' && getOptionValue('--experimental-strip-types')) { const { type: packageType, pjsonPath, exists: foundPackageJson } = getPackageScopeConfig(url); @@ -149,35 +136,22 @@ function getFileProtocolModuleFormat(url, context = { __proto__: null }, ignoreE return `${packageType}-typescript`; } // The controlling `package.json` file has no `type` field. - switch (getOptionValue('--experimental-default-type')) { - case 'module': { // The user explicitly passed `--experimental-default-type=module`. - // An exception to the type flag making ESM the default everywhere is that package scopes under `node_modules` - // should retain the assumption that a lack of a `type` field means CommonJS. - return underNodeModules(url) ? 'commonjs-typescript' : 'module-typescript'; - } - case 'commonjs': { // The user explicitly passed `--experimental-default-type=commonjs`. - return 'commonjs-typescript'; - } - default: { // The user did not pass `--experimental-default-type`. - // `source` is undefined when this is called from `defaultResolve`; - // but this gets called again from `defaultLoad`/`defaultLoadSync`. - // Since experimental-strip-types depends on detect-module, we always return null - // if source is undefined. - if (!source) { return null; } - const { stringify } = require('internal/modules/helpers'); - const { stripTypeScriptModuleTypes } = require('internal/modules/typescript'); - const stringifiedSource = stringify(source); - const parsedSource = stripTypeScriptModuleTypes(stringifiedSource, fileURLToPath(url)); - const detectedFormat = detectModuleFormat(parsedSource, url); - const format = `${detectedFormat}-typescript`; - if (format === 'module-typescript' && foundPackageJson) { - // This module has a .js extension, a package.json with no `type` field, and ESM syntax. - // Warn about the missing `type` field so that the user can avoid the performance penalty of detection. - warnTypelessPackageJsonFile(pjsonPath, url); - } - return format; - } + // `source` is undefined when this is called from `defaultResolve`; + // but this gets called again from `defaultLoad`/`defaultLoadSync`. + // Since experimental-strip-types depends on detect-module, we always return null if source is undefined. + if (!source) { return null; } + const { stringify } = require('internal/modules/helpers'); + const { stripTypeScriptModuleTypes } = require('internal/modules/typescript'); + const stringifiedSource = stringify(source); + const parsedSource = stripTypeScriptModuleTypes(stringifiedSource, fileURLToPath(url)); + const detectedFormat = detectModuleFormat(parsedSource, url); + const format = `${detectedFormat}-typescript`; + if (format === 'module-typescript' && foundPackageJson) { + // This module has a .js extension, a package.json with no `type` field, and ESM syntax. + // Warn about the missing `type` field so that the user can avoid the performance penalty of detection. + warnTypelessPackageJsonFile(pjsonPath, url); } + return format; } if (ext === '') { @@ -190,24 +164,14 @@ function getFileProtocolModuleFormat(url, context = { __proto__: null }, ignoreE } // The controlling `package.json` file has no `type` field. - switch (getOptionValue('--experimental-default-type')) { - case 'module': { // The user explicitly passed `--experimental-default-type=module`. - return underNodeModules(url) ? 'commonjs' : getFormatOfExtensionlessFile(url); - } - case 'commonjs': { // The user explicitly passed `--experimental-default-type=commonjs`. - return 'commonjs'; - } - default: { // The user did not pass `--experimental-default-type`. - if (!source) { - return null; - } - const format = getFormatOfExtensionlessFile(url); - if (format === 'wasm') { - return format; - } - return detectModuleFormat(source, url); - } + if (!source) { + return null; } + const format = getFormatOfExtensionlessFile(url); + if (format === 'wasm') { + return format; + } + return detectModuleFormat(source, url); } const format = extensionFormatMap[ext]; diff --git a/lib/internal/modules/esm/load.js b/lib/internal/modules/esm/load.js index 5ba13096b98047..9e0a2c6ab6ee13 100644 --- a/lib/internal/modules/esm/load.js +++ b/lib/internal/modules/esm/load.js @@ -9,12 +9,8 @@ const { const { defaultGetFormat } = require('internal/modules/esm/get_format'); const { validateAttributes, emitImportAssertionWarning } = require('internal/modules/esm/assert'); -const { getOptionValue } = require('internal/options'); const { readFileSync } = require('fs'); -const defaultType = - getOptionValue('--experimental-default-type'); - const { Buffer: { from: BufferFrom } } = require('buffer'); const { URL } = require('internal/url'); @@ -109,7 +105,7 @@ async function defaultLoad(url, context = kEmptyObject) { if (urlInstance.protocol === 'node:') { source = null; format ??= 'builtin'; - } else if (format !== 'commonjs' || defaultType === 'module') { + } else if (format !== 'commonjs') { if (source == null) { ({ responseURL, source } = await getSource(urlInstance, context)); context = { __proto__: context, source }; diff --git a/lib/internal/modules/run_main.js b/lib/internal/modules/run_main.js index ab4783a7982b9f..9c90f0f6d3e33b 100644 --- a/lib/internal/modules/run_main.js +++ b/lib/internal/modules/run_main.js @@ -26,32 +26,18 @@ const { * @param {string} main - Entry point path */ function resolveMainPath(main) { - const defaultType = getOptionValue('--experimental-default-type'); /** @type {string} */ let mainPath; - if (defaultType === 'module') { - if (getOptionValue('--preserve-symlinks-main')) { return; } - mainPath = path.resolve(main); - } else { - // Extension searching for the main entry point is supported only in legacy mode. - // Module._findPath is monkey-patchable here. - const { Module } = require('internal/modules/cjs/loader'); - mainPath = Module._findPath(path.resolve(main), null, true); - } + // Extension searching for the main entry point is supported for backward compatibility. + // Module._findPath is monkey-patchable here. + const { Module } = require('internal/modules/cjs/loader'); + mainPath = Module._findPath(path.resolve(main), null, true); if (!mainPath) { return; } const preserveSymlinksMain = getOptionValue('--preserve-symlinks-main'); if (!preserveSymlinksMain) { const { toRealPath } = require('internal/modules/helpers'); - try { - mainPath = toRealPath(mainPath); - } catch (err) { - if (defaultType === 'module' && err?.code === 'ENOENT') { - const { decorateErrorWithCommonJSHints } = require('internal/modules/esm/resolve'); - decorateErrorWithCommonJSHints(err, mainPath, getCWDURL()); - } - throw err; - } + mainPath = toRealPath(mainPath); } return mainPath; @@ -62,8 +48,6 @@ function resolveMainPath(main) { * @param {string} mainPath - Absolute path to the main entry point */ function shouldUseESMLoader(mainPath) { - if (getOptionValue('--experimental-default-type') === 'module') { return true; } - /** * @type {string[]} userLoaders A list of custom loaders registered by the user * (or an empty list when none have been registered). @@ -81,8 +65,6 @@ function shouldUseESMLoader(mainPath) { if (!mainPath || StringPrototypeEndsWith(mainPath, '.cjs')) { return false; } if (getOptionValue('--experimental-strip-types')) { - // This ensures that --experimental-default-type=commonjs and .mts files are treated as commonjs - if (getOptionValue('--experimental-default-type') === 'commonjs') { return false; } if (!mainPath || StringPrototypeEndsWith(mainPath, '.cts')) { return false; } // This will likely change in the future to start with commonjs loader by default if (mainPath && StringPrototypeEndsWith(mainPath, '.mts')) { return true; } @@ -147,7 +129,6 @@ function runEntryPointWithESMLoader(callback) { * Parse the CLI main entry point string and run it. * For backwards compatibility, we have to run a bunch of monkey-patchable code that belongs to the CJS loader (exposed * by `require('module')`) even when the entry point is ESM. - * This monkey-patchable code is bypassed under `--experimental-default-type=module`. * Because of backwards compatibility, this function is exposed publicly via `import { runMain } from 'node:module'`. * Because of module detection, this function will attempt to run ambiguous (no explicit extension, no * `package.json` type field) entry points as CommonJS first; under certain conditions, it will retry running as ESM. diff --git a/lib/internal/process/execution.js b/lib/internal/process/execution.js index 68b267b61c39b1..a5afd44ca9ca06 100644 --- a/lib/internal/process/execution.js +++ b/lib/internal/process/execution.js @@ -82,7 +82,7 @@ function evalScript(name, body, breakFirstLine, print, shouldLoadESM = false) { const baseUrl = pathToFileURL(module.filename).href; if (getOptionValue('--experimental-detect-module') && - getOptionValue('--input-type') === '' && getOptionValue('--experimental-default-type') === '' && + getOptionValue('--input-type') === '' && containsModuleSyntax(body, name, null, 'no CJS variables')) { return evalModuleEntryPoint(body, print); } diff --git a/lib/internal/process/pre_execution.js b/lib/internal/process/pre_execution.js index 08945a62d4277b..41ebf85900b100 100644 --- a/lib/internal/process/pre_execution.js +++ b/lib/internal/process/pre_execution.js @@ -303,7 +303,7 @@ function setupNavigator() { } function setupSQLite() { - if (!getOptionValue('--experimental-sqlite')) { + if (getOptionValue('--no-experimental-sqlite')) { return; } diff --git a/lib/internal/quic/quic.js b/lib/internal/quic/quic.js index 4d0077195d0629..a76708a37ec1d2 100644 --- a/lib/internal/quic/quic.js +++ b/lib/internal/quic/quic.js @@ -5,28 +5,24 @@ /* c8 ignore start */ const { - ArrayBuffer, + ArrayBufferPrototypeTransfer, ArrayIsArray, ArrayPrototypePush, - BigUint64Array, - DataView, - DataViewPrototypeGetBigInt64, - DataViewPrototypeGetBigUint64, - DataViewPrototypeGetUint8, - DataViewPrototypeSetUint8, ObjectDefineProperties, - Symbol, + SafeSet, SymbolAsyncDispose, + SymbolIterator, Uint8Array, } = primordials; +// QUIC requires that Node.js be compiled with crypto support. const { assertCrypto, - customInspectSymbol: kInspect, } = require('internal/util'); -const { inspect } = require('internal/util/inspect'); assertCrypto(); +const { inspect } = require('internal/util/inspect'); + const { Endpoint: Endpoint_, setCallbacks, @@ -54,99 +50,6 @@ const { CLOSECONTEXT_RECEIVE_FAILURE: kCloseContextReceiveFailure, CLOSECONTEXT_SEND_FAILURE: kCloseContextSendFailure, CLOSECONTEXT_START_FAILURE: kCloseContextStartFailure, - - // All of the IDX_STATS_* constants are the index positions of the stats - // fields in the relevant BigUint64Array's that underlie the *Stats objects. - // These are not exposed to end users. - IDX_STATS_ENDPOINT_CREATED_AT, - IDX_STATS_ENDPOINT_DESTROYED_AT, - IDX_STATS_ENDPOINT_BYTES_RECEIVED, - IDX_STATS_ENDPOINT_BYTES_SENT, - IDX_STATS_ENDPOINT_PACKETS_RECEIVED, - IDX_STATS_ENDPOINT_PACKETS_SENT, - IDX_STATS_ENDPOINT_SERVER_SESSIONS, - IDX_STATS_ENDPOINT_CLIENT_SESSIONS, - IDX_STATS_ENDPOINT_SERVER_BUSY_COUNT, - IDX_STATS_ENDPOINT_RETRY_COUNT, - IDX_STATS_ENDPOINT_VERSION_NEGOTIATION_COUNT, - IDX_STATS_ENDPOINT_STATELESS_RESET_COUNT, - IDX_STATS_ENDPOINT_IMMEDIATE_CLOSE_COUNT, - - IDX_STATS_SESSION_CREATED_AT, - IDX_STATS_SESSION_CLOSING_AT, - IDX_STATS_SESSION_DESTROYED_AT, - IDX_STATS_SESSION_HANDSHAKE_COMPLETED_AT, - IDX_STATS_SESSION_HANDSHAKE_CONFIRMED_AT, - IDX_STATS_SESSION_GRACEFUL_CLOSING_AT, - IDX_STATS_SESSION_BYTES_RECEIVED, - IDX_STATS_SESSION_BYTES_SENT, - IDX_STATS_SESSION_BIDI_IN_STREAM_COUNT, - IDX_STATS_SESSION_BIDI_OUT_STREAM_COUNT, - IDX_STATS_SESSION_UNI_IN_STREAM_COUNT, - IDX_STATS_SESSION_UNI_OUT_STREAM_COUNT, - IDX_STATS_SESSION_LOSS_RETRANSMIT_COUNT, - IDX_STATS_SESSION_MAX_BYTES_IN_FLIGHT, - IDX_STATS_SESSION_BYTES_IN_FLIGHT, - IDX_STATS_SESSION_BLOCK_COUNT, - IDX_STATS_SESSION_CWND, - IDX_STATS_SESSION_LATEST_RTT, - IDX_STATS_SESSION_MIN_RTT, - IDX_STATS_SESSION_RTTVAR, - IDX_STATS_SESSION_SMOOTHED_RTT, - IDX_STATS_SESSION_SSTHRESH, - IDX_STATS_SESSION_DATAGRAMS_RECEIVED, - IDX_STATS_SESSION_DATAGRAMS_SENT, - IDX_STATS_SESSION_DATAGRAMS_ACKNOWLEDGED, - IDX_STATS_SESSION_DATAGRAMS_LOST, - - IDX_STATS_STREAM_CREATED_AT, - IDX_STATS_STREAM_RECEIVED_AT, - IDX_STATS_STREAM_ACKED_AT, - IDX_STATS_STREAM_CLOSING_AT, - IDX_STATS_STREAM_DESTROYED_AT, - IDX_STATS_STREAM_BYTES_RECEIVED, - IDX_STATS_STREAM_BYTES_SENT, - IDX_STATS_STREAM_MAX_OFFSET, - IDX_STATS_STREAM_MAX_OFFSET_ACK, - IDX_STATS_STREAM_MAX_OFFSET_RECV, - IDX_STATS_STREAM_FINAL_SIZE, - - IDX_STATE_SESSION_PATH_VALIDATION, - IDX_STATE_SESSION_VERSION_NEGOTIATION, - IDX_STATE_SESSION_DATAGRAM, - IDX_STATE_SESSION_SESSION_TICKET, - IDX_STATE_SESSION_CLOSING, - IDX_STATE_SESSION_GRACEFUL_CLOSE, - IDX_STATE_SESSION_SILENT_CLOSE, - IDX_STATE_SESSION_STATELESS_RESET, - IDX_STATE_SESSION_DESTROYED, - IDX_STATE_SESSION_HANDSHAKE_COMPLETED, - IDX_STATE_SESSION_HANDSHAKE_CONFIRMED, - IDX_STATE_SESSION_STREAM_OPEN_ALLOWED, - IDX_STATE_SESSION_PRIORITY_SUPPORTED, - IDX_STATE_SESSION_WRAPPED, - IDX_STATE_SESSION_LAST_DATAGRAM_ID, - - IDX_STATE_ENDPOINT_BOUND, - IDX_STATE_ENDPOINT_RECEIVING, - IDX_STATE_ENDPOINT_LISTENING, - IDX_STATE_ENDPOINT_CLOSING, - IDX_STATE_ENDPOINT_BUSY, - IDX_STATE_ENDPOINT_PENDING_CALLBACKS, - - IDX_STATE_STREAM_ID, - IDX_STATE_STREAM_FIN_SENT, - IDX_STATE_STREAM_FIN_RECEIVED, - IDX_STATE_STREAM_READ_ENDED, - IDX_STATE_STREAM_WRITE_ENDED, - IDX_STATE_STREAM_DESTROYED, - IDX_STATE_STREAM_PAUSED, - IDX_STATE_STREAM_RESET, - IDX_STATE_STREAM_HAS_READER, - IDX_STATE_STREAM_WANTS_BLOCK, - IDX_STATE_STREAM_WANTS_HEADERS, - IDX_STATE_STREAM_WANTS_RESET, - IDX_STATE_STREAM_WANTS_TRAILERS, } = internalBinding('quic'); const { @@ -160,12 +63,16 @@ const { const { codes: { + ERR_ILLEGAL_CONSTRUCTOR, ERR_INVALID_ARG_TYPE, ERR_INVALID_ARG_VALUE, ERR_INVALID_STATE, + ERR_QUIC_APPLICATION_ERROR, ERR_QUIC_CONNECTION_FAILED, ERR_QUIC_ENDPOINT_CLOSED, ERR_QUIC_OPEN_STREAM_FAILED, + ERR_QUIC_TRANSPORT_ERROR, + ERR_QUIC_VERSION_NEGOTIATION_ERROR, }, } = require('internal/errors'); @@ -180,34 +87,74 @@ const { isCryptoKey, } = require('internal/crypto/keys'); -const { - kHandle: kKeyObjectHandle, - kKeyObject: kKeyObjectInner, -} = require('internal/crypto/util'); - const { validateFunction, validateObject, + validateString, + validateBoolean, } = require('internal/validators'); const kEmptyObject = { __proto__: null }; -const kEnumerable = { __proto__: null, enumerable: true }; - -const kBlocked = Symbol('kBlocked'); -const kDatagram = Symbol('kDatagram'); -const kDatagramStatus = Symbol('kDatagramStatus'); -const kError = Symbol('kError'); -const kFinishClose = Symbol('kFinishClose'); -const kHandshake = Symbol('kHandshake'); -const kHeaders = Symbol('kHeaders'); -const kOwner = Symbol('kOwner'); -const kNewSession = Symbol('kNewSession'); -const kNewStream = Symbol('kNewStream'); -const kPathValidation = Symbol('kPathValidation'); -const kReset = Symbol('kReset'); -const kSessionTicket = Symbol('kSessionTicket'); -const kTrailers = Symbol('kTrailers'); -const kVersionNegotiation = Symbol('kVersionNegotiation'); + +const { + kBlocked, + kDatagram, + kDatagramStatus, + kError, + kFinishClose, + kHandshake, + kHeaders, + kOwner, + kRemoveSession, + kNewSession, + kRemoveStream, + kNewStream, + kPathValidation, + kReset, + kSessionTicket, + kTrailers, + kVersionNegotiation, + kInspect, + kKeyObjectHandle, + kKeyObjectInner, + kPrivateConstructor, +} = require('internal/quic/symbols'); + +const { + QuicEndpointStats, + QuicStreamStats, + QuicSessionStats, +} = require('internal/quic/stats'); + +const { + QuicEndpointState, + QuicSessionState, + QuicStreamState, +} = require('internal/quic/state'); + +const { assert } = require('internal/assert'); + +const dc = require('diagnostics_channel'); +const onEndpointCreatedChannel = dc.channel('quic.endpoint.created'); +const onEndpointListeningChannel = dc.channel('quic.endpoint.listen'); +const onEndpointClosingChannel = dc.channel('quic.endpoint.closing'); +const onEndpointClosedChannel = dc.channel('quic.endpoint.closed'); +const onEndpointErrorChannel = dc.channel('quic.endpoint.error'); +const onEndpointBusyChangeChannel = dc.channel('quic.endpoint.busy.change'); +const onEndpointClientSessionChannel = dc.channel('quic.session.created.client'); +const onEndpointServerSessionChannel = dc.channel('quic.session.created.server'); +const onSessionOpenStreamChannel = dc.channel('quic.session.open.stream'); +const onSessionReceivedStreamChannel = dc.channel('quic.session.received.stream'); +const onSessionSendDatagramChannel = dc.channel('quic.session.send.datagram'); +const onSessionUpdateKeyChannel = dc.channel('quic.session.update.key'); +const onSessionClosingChannel = dc.channel('quic.session.closing'); +const onSessionClosedChannel = dc.channel('quic.session.closed'); +const onSessionReceiveDatagramChannel = dc.channel('quic.session.receive.datagram'); +const onSessionReceiveDatagramStatusChannel = dc.channel('quic.session.receive.datagram.status'); +const onSessionPathValidationChannel = dc.channel('quic.session.path.validation'); +const onSessionTicketChannel = dc.channel('quic.session.ticket'); +const onSessionVersionNegotiationChannel = dc.channel('quic.session.version.negotiation'); +const onSessionHandshakeChannel = dc.channel('quic.session.handshake'); /** * @typedef {import('../socketaddress.js').SocketAddress} SocketAddress @@ -230,8 +177,8 @@ const kVersionNegotiation = Symbol('kVersionNegotiation'); * @property {bigint|number} [handshakeTimeout] The handshake timeout * @property {bigint|number} [maxStreamWindow] The maximum stream window * @property {bigint|number} [maxWindow] The maximum window - * @property {number} [rxDiagnosticLoss] The receive diagnostic loss (range 0.0-1.0) - * @property {number} [txDiagnosticLoss] The transmit diagnostic loss (range 0.0-1.0) + * @property {number} [rxDiagnosticLoss] The receive diagnostic loss probability (range 0.0-1.0) + * @property {number} [txDiagnosticLoss] The transmit diagnostic loss probability (range 0.0-1.0) * @property {number} [udpReceiveBufferSize] The UDP receive buffer size * @property {number} [udpSendBufferSize] The UDP send buffer size * @property {number} [udpTTL] The UDP TTL @@ -317,64 +264,65 @@ const kVersionNegotiation = Symbol('kVersionNegotiation'); /** * Called when the Endpoint receives a new server-side Session. * @callback OnSessionCallback - * @param {Session} session - * @param {Endpoint} endpoint + * @this {QuicEndpoint} + * @param {QuicSession} session * @returns {void} */ /** * @callback OnStreamCallback + * @this {QuicSession} * @param {QuicStream} stream - * @param {Session} session * @returns {void} */ /** * @callback OnDatagramCallback + * @this {QuicSession} * @param {Uint8Array} datagram - * @param {Session} session - * @param {boolean} early + * @param {boolean} early A datagram is early if it was received before the TLS handshake completed * @returns {void} */ /** * @callback OnDatagramStatusCallback + * @this {QuicSession} * @param {bigint} id * @param {'lost'|'acknowledged'} status - * @param {Session} session * @returns {void} */ /** * @callback OnPathValidationCallback + * @this {QuicSession} * @param {'aborted'|'failure'|'success'} result * @param {SocketAddress} newLocalAddress * @param {SocketAddress} newRemoteAddress * @param {SocketAddress} oldLocalAddress * @param {SocketAddress} oldRemoteAddress * @param {boolean} preferredAddress - * @param {Session} session * @returns {void} */ /** * @callback OnSessionTicketCallback + * @this {QuicSession} * @param {object} ticket - * @param {Session} session * @returns {void} */ /** * @callback OnVersionNegotiationCallback + * @this {QuicSession} * @param {number} version * @param {number[]} requestedVersions * @param {number[]} supportedVersions - * @param {Session} session * @returns {void} */ /** * @callback OnHandshakeCallback + * @this {QuicSession} * @param {string} sni * @param {string} alpn * @param {string} cipher @@ -382,7 +330,6 @@ const kVersionNegotiation = Symbol('kVersionNegotiation'); * @param {string} validationErrorReason * @param {number} validationErrorCode * @param {boolean} earlyDataAccepted - * @param {Session} session * @returns {void} */ @@ -413,6 +360,14 @@ const kVersionNegotiation = Symbol('kVersionNegotiation'); * @returns {void} */ +/** + * @typedef {object} StreamCallbackConfiguration + * @property {OnBlockedCallback} [onblocked] The blocked callback + * @property {OnStreamErrorCallback} [onreset] The reset callback + * @property {OnHeadersCallback} [onheaders] The headers callback + * @property {OnTrailersCallback} [ontrailers] The trailers callback + */ + /** * Provdes the callback configuration for Sessions. * @typedef {object} SessionCallbackConfiguration @@ -426,38 +381,126 @@ const kVersionNegotiation = Symbol('kVersionNegotiation'); */ /** - * @typedef {object} StreamCallbackConfiguration - * @property {OnStreamErrorCallback} onerror The error callback + * @typedef {object} ProcessedSessionCallbackConfiguration + * @property {OnStreamCallback} onstream The stream callback + * @property {OnDatagramCallback} [ondatagram] The datagram callback + * @property {OnDatagramStatusCallback} [ondatagramstatus] The datagram status callback + * @property {OnPathValidationCallback} [onpathvalidation] The path validation callback + * @property {OnSessionTicketCallback} [onsessionticket] The session ticket callback + * @property {OnVersionNegotiationCallback} [onversionnegotiation] The version negotation callback + * @property {OnHandshakeCallback} [onhandshake] The handshake callback + * @property {StreamCallbackConfiguration} stream The processed stream callbacks + */ + +/** + * Provides the callback configuration for the Endpoint. + * @typedef {object} EndpointCallbackConfiguration + * @property {OnSessionCallback} onsession The session callback + * @property {OnStreamCallback} onstream The stream callback + * @property {OnDatagramCallback} [ondatagram] The datagram callback + * @property {OnDatagramStatusCallback} [ondatagramstatus] The datagram status callback + * @property {OnPathValidationCallback} [onpathvalidation] The path validation callback + * @property {OnSessionTicketCallback} [onsessionticket] The session ticket callback + * @property {OnVersionNegotiationCallback} [onversionnegotiation] The version negotiation callback + * @property {OnHandshakeCallback} [onhandshake] The handshake callback * @property {OnBlockedCallback} [onblocked] The blocked callback * @property {OnStreamErrorCallback} [onreset] The reset callback * @property {OnHeadersCallback} [onheaders] The headers callback * @property {OnTrailersCallback} [ontrailers] The trailers callback + * @property {SocketAddress} [address] The local address to bind to + * @property {bigint|number} [retryTokenExpiration] The retry token expiration + * @property {bigint|number} [tokenExpiration] The token expiration + * @property {bigint|number} [maxConnectionsPerHost] The maximum number of connections per host + * @property {bigint|number} [maxConnectionsTotal] The maximum number of total connections + * @property {bigint|number} [maxStatelessResetsPerHost] The maximum number of stateless resets per host + * @property {bigint|number} [addressLRUSize] The size of the address LRU cache + * @property {bigint|number} [maxRetries] The maximum number of retries + * @property {bigint|number} [maxPayloadSize] The maximum payload size + * @property {bigint|number} [unacknowledgedPacketThreshold] The unacknowledged packet threshold + * @property {bigint|number} [handshakeTimeout] The handshake timeout + * @property {bigint|number} [maxStreamWindow] The maximum stream window + * @property {bigint|number} [maxWindow] The maximum window + * @property {number} [rxDiagnosticLoss] The receive diagnostic loss probability (range 0.0-1.0) + * @property {number} [txDiagnosticLoss] The transmit diagnostic loss probability (range 0.0-1.0) + * @property {number} [udpReceiveBufferSize] The UDP receive buffer size + * @property {number} [udpSendBufferSize] The UDP send buffer size + * @property {number} [udpTTL] The UDP TTL + * @property {boolean} [noUdpPayloadSizeShaping] Disable UDP payload size shaping + * @property {boolean} [validateAddress] Validate the address + * @property {boolean} [disableActiveMigration] Disable active migration + * @property {boolean} [ipv6Only] Use IPv6 only + * @property {'reno'|'cubic'|'bbr'|number} [cc] The congestion control algorithm + * @property {ArrayBufferView} [resetTokenSecret] The reset token secret + * @property {ArrayBufferView} [tokenSecret] The token secret */ /** - * Provides the callback configuration for the Endpoint. - * @typedef {object} EndpointCallbackConfiguration + * @typedef {object} ProcessedEndpointCallbackConfiguration * @property {OnSessionCallback} onsession The session callback - * @property {SessionCallbackConfiguration} session The session callback configuration - * @property {StreamCallbackConfiguration} stream The stream callback configuration + * @property {SessionCallbackConfiguration} session The processesd session callbacks */ setCallbacks({ + // QuicEndpoint callbacks + + /** + * Called when the QuicEndpoint C++ handle has closed and we need to finish + * cleaning up the JS side. + * @param {number} context Identifies the reason the endpoint was closed. + * @param {number} status If context indicates an error, provides the error code. + */ onEndpointClose(context, status) { this[kOwner][kFinishClose](context, status); }, + /** + * Called when the QuicEndpoint C++ handle receives a new server-side session + * @param {*} session The QuicSession C++ handle + */ onSessionNew(session) { this[kOwner][kNewSession](session); }, + + // QuicSession callbacks + + /** + * Called when the underlying session C++ handle is closed either normally + * or with an error. + * @param {number} errorType + * @param {number} code + * @param {string} [reason] + */ onSessionClose(errorType, code, reason) { this[kOwner][kFinishClose](errorType, code, reason); }, + + /** + * Called when a datagram is received on this session. + * @param {Uint8Array} uint8Array + * @param {boolean} early + */ onSessionDatagram(uint8Array, early) { this[kOwner][kDatagram](uint8Array, early); }, + + /** + * Called when the status of a datagram is received. + * @param {bigint} id + * @param {'lost' | 'acknowledged'} status + */ onSessionDatagramStatus(id, status) { this[kOwner][kDatagramStatus](id, status); }, + + /** + * Called when the session handshake completes. + * @param {string} sni + * @param {string} alpn + * @param {string} cipher + * @param {string} cipherVersion + * @param {string} validationErrorReason + * @param {number} validationErrorCode + * @param {boolean} earlyDataAccepted + */ onSessionHandshake(sni, alpn, cipher, cipherVersion, validationErrorReason, validationErrorCode, @@ -466,12 +509,37 @@ setCallbacks({ validationErrorReason, validationErrorCode, earlyDataAccepted); }, - onSessionPathValidation(...args) { - this[kOwner][kPathValidation](...args); + + /** + * Called when the session path validation completes. + * @param {'aborted'|'failure'|'success'} result + * @param {SocketAddress} newLocalAddress + * @param {SocketAddress} newRemoteAddress + * @param {SocketAddress} oldLocalAddress + * @param {SocketAddress} oldRemoteAddress + * @param {boolean} preferredAddress + */ + onSessionPathValidation(result, newLocalAddress, newRemoteAddress, + oldLocalAddress, oldRemoteAddress, preferredAddress) { + this[kOwner][kPathValidation](result, newLocalAddress, newRemoteAddress, + oldLocalAddress, oldRemoteAddress, + preferredAddress); }, + + /** + * Called when the session generates a new TLS session ticket + * @param {object} ticket An opaque session ticket + */ onSessionTicket(ticket) { this[kOwner][kSessionTicket](ticket); }, + + /** + * Called when the session receives a session version negotiation request + * @param {*} version + * @param {*} requestedVersions + * @param {*} supportedVersions + */ onSessionVersionNegotiation(version, requestedVersions, supportedVersions) { @@ -479,185 +547,177 @@ setCallbacks({ // Note that immediately following a version negotiation event, the // session will be destroyed. }, + + /** + * Called when a new stream has been received for the session + * @param {object} stream The QuicStream C++ handle + * @param {number} direction The stream direction (0 == bidi, 1 == uni) + */ onStreamCreated(stream, direction) { const session = this[kOwner]; - // The event is ignored if the session has been destroyed. + // The event is ignored and the stream destroyed if the session has been destroyed. if (session.destroyed) { stream.destroy(); return; }; session[kNewStream](stream, direction); }, + + // QuicStream callbacks onStreamBlocked() { + // Called when the stream C++ handle has been blocked by flow control. this[kOwner][kBlocked](); }, onStreamClose(error) { + // Called when the stream C++ handle has been closed. this[kOwner][kError](error); }, onStreamReset(error) { + // Called when the stream C++ handle has received a stream reset. this[kOwner][kReset](error); }, onStreamHeaders(headers, kind) { + // Called when the stream C++ handle has received a full block of headers. this[kOwner][kHeaders](headers, kind); }, onStreamTrailers() { + // Called when the stream C++ handle is ready to receive trailing headers. this[kOwner][kTrailers](); }, }); -/** - * @param {'use'|'ignore'|'default'} policy - * @returns {number} - */ -function getPreferredAddressPolicy(policy = 'default') { - switch (policy) { - case 'use': return PREFERRED_ADDRESS_USE; - case 'ignore': return PREFERRED_ADDRESS_IGNORE; - case 'default': return DEFAULT_PREFERRED_ADDRESS_POLICY; - } - throw new ERR_INVALID_ARG_VALUE('options.preferredAddressPolicy', policy); -} - -/** - * @param {TlsOptions} tls - */ -function processTlsOptions(tls) { - validateObject(tls, 'options.tls'); - const { - sni, - alpn, - ciphers, - groups, - keylog, - verifyClient, - tlsTrace, - verifyPrivateKey, - keys, - certs, - ca, - crl, - } = tls; - - const keyHandles = []; - if (keys !== undefined) { - const keyInputs = ArrayIsArray(keys) ? keys : [keys]; - for (const key of keyInputs) { - if (isKeyObject(key)) { - if (key.type !== 'private') { - throw new ERR_INVALID_ARG_VALUE('options.tls.keys', key, 'must be a private key'); - } - ArrayPrototypePush(keyHandles, key[kKeyObjectHandle]); - } else if (isCryptoKey(key)) { - if (key.type !== 'private') { - throw new ERR_INVALID_ARG_VALUE('options.tls.keys', key, 'must be a private key'); - } - ArrayPrototypePush(keyHandles, key[kKeyObjectInner][kKeyObjectHandle]); - } else { - throw new ERR_INVALID_ARG_TYPE('options.tls.keys', ['KeyObject', 'CryptoKey'], key); - } - } - } - - return { - sni, - alpn, - ciphers, - groups, - keylog, - verifyClient, - tlsTrace, - verifyPrivateKey, - keys: keyHandles, - certs, - ca, - crl, - }; -} - -class QuicStreamStats { - /** @type {BigUint64Array} */ +class QuicStream { + /** @type {object} */ #handle; + /** @type {QuicSession} */ + #session; + /** @type {QuicStreamStats} */ + #stats; + /** @type {QuicStreamState} */ + #state; + /** @type {number} */ + #direction; + /** @type {OnBlockedCallback|undefined} */ + #onblocked; + /** @type {OnStreamErrorCallback|undefined} */ + #onreset; + /** @type {OnHeadersCallback|undefined} */ + #onheaders; + /** @type {OnTrailersCallback|undefined} */ + #ontrailers; /** - * @param {ArrayBuffer} buffer + * @param {symbol} privateSymbol + * @param {StreamCallbackConfiguration} config + * @param {object} handle + * @param {QuicSession} session */ - constructor(buffer) { - if (!isArrayBuffer(buffer)) { - throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); + constructor(privateSymbol, config, handle, session, direction) { + if (privateSymbol !== kPrivateConstructor) { + throw new ERR_ILLEGAL_CONSTRUCTOR(); } - this.#handle = new BigUint64Array(buffer); - } - /** @type {bigint} */ - get createdAt() { - return this.#handle[IDX_STATS_STREAM_CREATED_AT]; - } + const { + onblocked, + onreset, + onheaders, + ontrailers, + } = config; - /** @type {bigint} */ - get receivedAt() { - return this.#handle[IDX_STATS_STREAM_RECEIVED_AT]; - } + if (onblocked !== undefined) { + this.#onblocked = onblocked.bind(this); + } + if (onreset !== undefined) { + this.#onreset = onreset.bind(this); + } + if (onheaders !== undefined) { + this.#onheaders = onheaders.bind(this); + } + if (ontrailers !== undefined) { + this.#ontrailers = ontrailers.bind(this); + } + this.#handle = handle; + this.#handle[kOwner] = true; - /** @type {bigint} */ - get ackedAt() { - return this.#handle[IDX_STATS_STREAM_ACKED_AT]; - } + this.#session = session; + this.#direction = direction; - /** @type {bigint} */ - get closingAt() { - return this.#handle[IDX_STATS_STREAM_CLOSING_AT]; + this.#stats = new QuicStreamStats(kPrivateConstructor, this.#handle.stats); + + this.#state = new QuicStreamState(kPrivateConstructor, this.#handle.stats); + this.#state.wantsBlock = !!this.#onblocked; + this.#state.wantsReset = !!this.#onreset; + this.#state.wantsHeaders = !!this.#onheaders; + this.#state.wantsTrailers = !!this.#ontrailers; } + /** @type {QuicStreamStats} */ + get stats() { return this.#stats; } + + /** @type {QuicStreamState} */ + get state() { return this.#state; } + + /** @type {QuicSession} */ + get session() { return this.#session; } + /** @type {bigint} */ - get destroyedAt() { - return this.#handle[IDX_STATS_STREAM_DESTROYED_AT]; + get id() { return this.#state.id; } + + /** @type {'bidi'|'uni'} */ + get direction() { + return this.#direction === 0 ? 'bidi' : 'uni'; } - /** @type {bigint} */ - get bytesReceived() { - return this.#handle[IDX_STATS_STREAM_BYTES_RECEIVED]; + /** @returns {boolean} */ + get destroyed() { + return this.#handle === undefined; } - /** @type {bigint} */ - get bytesSent() { - return this.#handle[IDX_STATS_STREAM_BYTES_SENT]; + destroy(error) { + if (this.destroyed) return; + // TODO(@jasnell): pass an error code + this.#stats[kFinishClose](); + this.#state[kFinishClose](); + this.#onblocked = undefined; + this.#onreset = undefined; + this.#onheaders = undefined; + this.#ontrailers = undefined; + this.#session[kRemoveStream](this); + this.#session = undefined; + this.#handle.destroy(); + this.#handle = undefined; } - /** @type {bigint} */ - get maxOffset() { - return this.#handle[IDX_STATS_STREAM_MAX_OFFSET]; + [kBlocked]() { + // The blocked event should only be called if the stream was created with + // an onblocked callback. The callback should always exist here. + assert(this.#onblocked, 'Unexpected stream blocked event'); + this.#onblocked(this); } - /** @type {bigint} */ - get maxOffsetAcknowledged() { - return this.#handle[IDX_STATS_STREAM_MAX_OFFSET_ACK]; + [kError](error) { + this.destroy(error); } - /** @type {bigint} */ - get maxOffsetReceived() { - return this.#handle[IDX_STATS_STREAM_MAX_OFFSET_RECV]; + [kReset](error) { + // The reset event should only be called if the stream was created with + // an onreset callback. The callback should always exist here. + assert(this.#onreset, 'Unexpected stream reset event'); + this.#onreset(error, this); } - /** @type {bigint} */ - get finalSize() { - return this.#handle[IDX_STATS_STREAM_FINAL_SIZE]; + [kHeaders](headers, kind) { + // The headers event should only be called if the stream was created with + // an onheaders callback. The callback should always exist here. + assert(this.#onheaders, 'Unexpected stream headers event'); + this.#onheaders(headers, kind, this); } - toJSON() { - return { - __proto__: null, - createdAt: `${this.createdAt}`, - receivedAt: `${this.receivedAt}`, - ackedAt: `${this.ackedAt}`, - closingAt: `${this.closingAt}`, - destroyedAt: `${this.destroyedAt}`, - bytesReceived: `${this.bytesReceived}`, - bytesSent: `${this.bytesSent}`, - maxOffset: `${this.maxOffset}`, - maxOffsetAcknowledged: `${this.maxOffsetAcknowledged}`, - maxOffsetReceived: `${this.maxOffsetReceived}`, - finalSize: `${this.finalSize}`, - }; + [kTrailers]() { + // The trailers event should only be called if the stream was created with + // an ontrailers callback. The callback should always exist here. + assert(this.#ontrailers, 'Unexpected stream trailers event'); + this.#ontrailers(this); } [kInspect](depth, options) { @@ -669,731 +729,62 @@ class QuicStreamStats { depth: options.depth == null ? null : options.depth - 1, }; - return `StreamStats ${inspect({ - createdAt: this.createdAt, - receivedAt: this.receivedAt, - ackedAt: this.ackedAt, - closingAt: this.closingAt, - destroyedAt: this.destroyedAt, - bytesReceived: this.bytesReceived, - bytesSent: this.bytesSent, - maxOffset: this.maxOffset, - maxOffsetAcknowledged: this.maxOffsetAcknowledged, - maxOffsetReceived: this.maxOffsetReceived, - finalSize: this.finalSize, + return `Stream ${inspect({ + id: this.id, + direction: this.direction, + stats: this.stats, + state: this.state, + session: this.session, }, opts)}`; } - - [kFinishClose]() { - // Snapshot the stats into a new BigUint64Array since the underlying - // buffer will be destroyed. - this.#handle = new BigUint64Array(this.#handle); - } } -class QuicStreamState { - /** @type {DataView} */ +class QuicSession { + /** @type {QuicEndpoint} */ + #endpoint = undefined; + /** @type {boolean} */ + #isPendingClose = false; + /** @type {object|undefined} */ #handle; + /** @type {PromiseWithResolvers} */ + #pendingClose = Promise.withResolvers(); // eslint-disable-line node-core/prefer-primordials + /** @type {SocketAddress|undefined} */ + #remoteAddress = undefined; + /** @type {QuicSessionState} */ + #state; + /** @type {QuicSessionStats} */ + #stats; + /** @type {Set} */ + #streams = new SafeSet(); + /** @type {OnStreamCallback} */ + #onstream; + /** @type {OnDatagramCallback|undefined} */ + #ondatagram; + /** @type {OnDatagramStatusCallback|undefined} */ + #ondatagramstatus; + /** @type {OnPathValidationCallback|undefined} */ + #onpathvalidation; + /** @type {OnSessionTicketCallback|undefined} */ + #onsessionticket; + /** @type {OnVersionNegotiationCallback|undefined} */ + #onversionnegotiation; + /** @type {OnHandshakeCallback} */ + #onhandshake; + /** @type {StreamCallbackConfiguration} */ + #streamConfig; /** - * @param {ArrayBuffer} buffer + * @param {symbol} privateSymbol + * @param {ProcessedSessionCallbackConfiguration} config + * @param {object} handle + * @param {QuicEndpoint} endpoint */ - constructor(buffer) { - if (!isArrayBuffer(buffer)) { - throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); + constructor(privateSymbol, config, handle, endpoint) { + // Instances of QuicSession can only be created internally. + if (privateSymbol !== kPrivateConstructor) { + throw new ERR_ILLEGAL_CONSTRUCTOR(); } - this.#handle = new DataView(buffer); - } - - /** @type {bigint} */ - get id() { - return DataViewPrototypeGetBigInt64(this.#handle, IDX_STATE_STREAM_ID); - } - - /** @type {boolean} */ - get finSent() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_FIN_SENT); - } - - /** @type {boolean} */ - get finReceived() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_FIN_RECEIVED); - } - - /** @type {boolean} */ - get readEnded() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_READ_ENDED); - } - - /** @type {boolean} */ - get writeEnded() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WRITE_ENDED); - } - - /** @type {boolean} */ - get destroyed() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_DESTROYED); - } - - /** @type {boolean} */ - get paused() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_PAUSED); - } - - /** @type {boolean} */ - get reset() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_RESET); - } - - /** @type {boolean} */ - get hasReader() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_HAS_READER); - } - - /** @type {boolean} */ - get wantsBlock() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WANTS_BLOCK); - } - - /** @type {boolean} */ - set wantsBlock(val) { - DataViewPrototypeSetUint8(this.#handle, IDX_STATE_STREAM_WANTS_BLOCK, val ? 1 : 0); - } - - /** @type {boolean} */ - get wantsHeaders() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WANTS_HEADERS); - } - - /** @type {boolean} */ - set wantsHeaders(val) { - DataViewPrototypeSetUint8(this.#handle, IDX_STATE_STREAM_WANTS_HEADERS, val ? 1 : 0); - } - - /** @type {boolean} */ - get wantsReset() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WANTS_RESET); - } - - /** @type {boolean} */ - set wantsReset(val) { - DataViewPrototypeSetUint8(this.#handle, IDX_STATE_STREAM_WANTS_RESET, val ? 1 : 0); - } - - /** @type {boolean} */ - get wantsTrailers() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WANTS_TRAILERS); - } - - /** @type {boolean} */ - set wantsTrailers(val) { - DataViewPrototypeSetUint8(this.#handle, IDX_STATE_STREAM_WANTS_TRAILERS, val ? 1 : 0); - } - - toJSON() { - return { - __proto__: null, - id: `${this.id}`, - finSent: this.finSent, - finReceived: this.finReceived, - readEnded: this.readEnded, - writeEnded: this.writeEnded, - destroyed: this.destroyed, - paused: this.paused, - reset: this.reset, - hasReader: this.hasReader, - wantsBlock: this.wantsBlock, - wantsHeaders: this.wantsHeaders, - wantsReset: this.wantsReset, - wantsTrailers: this.wantsTrailers, - }; - } - - [kInspect](depth, options) { - if (depth < 0) - return this; - - const opts = { - ...options, - depth: options.depth == null ? null : options.depth - 1, - }; - - return `StreamState ${inspect({ - id: this.id, - finSent: this.finSent, - finReceived: this.finReceived, - readEnded: this.readEnded, - writeEnded: this.writeEnded, - destroyed: this.destroyed, - paused: this.paused, - reset: this.reset, - hasReader: this.hasReader, - wantsBlock: this.wantsBlock, - wantsHeaders: this.wantsHeaders, - wantsReset: this.wantsReset, - wantsTrailers: this.wantsTrailers, - }, opts)}`; - } - - [kFinishClose]() { - // When the stream is destroyed, the state is no longer available. - this.#handle = new DataView(new ArrayBuffer(0)); - } -} - -class QuicStream { - /** @type {object} */ - #handle; - /** @type {Session} */ - #session; - /** @type {QuicStreamStats} */ - #stats; - /** @type {QuicStreamState} */ - #state; - /** @type {number} */ - #direction; - /** @type {OnStreamErrorCallback} */ - #onerror; - /** @type {OnBlockedCallback|undefined} */ - #onblocked; - /** @type {OnStreamErrorCallback|undefined} */ - #onreset; - /** @type {OnHeadersCallback|undefined} */ - #onheaders; - /** @type {OnTrailersCallback|undefined} */ - #ontrailers; - - /** - * @param {StreamCallbackConfiguration} config - * @param {object} handle - * @param {Session} session - */ - constructor(config, handle, session, direction) { - validateObject(config, 'config'); - this.#stats = new QuicStreamStats(handle.stats); - this.#state = new QuicStreamState(handle.stats); - const { - onblocked, - onerror, - onreset, - onheaders, - ontrailers, - } = config; - if (onblocked !== undefined) { - validateFunction(onblocked, 'config.onblocked'); - this.#state.wantsBlock = true; - this.#onblocked = onblocked; - } - if (onreset !== undefined) { - validateFunction(onreset, 'config.onreset'); - this.#state.wantsReset = true; - this.#onreset = onreset; - } - if (onheaders !== undefined) { - validateFunction(onheaders, 'config.onheaders'); - this.#state.wantsHeaders = true; - this.#onheaders = onheaders; - } - if (ontrailers !== undefined) { - validateFunction(ontrailers, 'config.ontrailers'); - this.#state.wantsTrailers = true; - this.#ontrailers = ontrailers; - } - validateFunction(onerror, 'config.onerror'); - this.#onerror = onerror; - this.#handle = handle; - this.#session = session; - this.#direction = direction; - this.#handle[kOwner] = true; - } - - /** @type {QuicStreamStats} */ - get stats() { return this.#stats; } - - /** @type {QuicStreamState} */ - get state() { return this.#state; } - - /** @type {Session} */ - get session() { return this.#session; } - - /** @type {bigint} */ - get id() { return this.#state.id; } - - /** @type {'bidi'|'uni'} */ - get direction() { - return this.#direction === 0 ? 'bidi' : 'uni'; - } - - [kBlocked]() { - this.#onblocked(this); - } - - [kError](error) { - this.#onerror(error, this); - } - - [kReset](error) { - this.#onreset(error, this); - } - - [kHeaders](headers, kind) { - this.#onheaders(headers, kind, this); - } - - [kTrailers]() { - this.#ontrailers(this); - } - - [kInspect](depth, options) { - if (depth < 0) - return this; - - const opts = { - ...options, - depth: options.depth == null ? null : options.depth - 1, - }; - - return `Stream ${inspect({ - id: this.id, - direction: this.direction, - stats: this.stats, - state: this.state, - session: this.session, - }, opts)}`; - } -} - -class SessionStats { - /** @type {BigUint64Array} */ - #handle; - - /** - * @param {BigUint64Array} buffer - */ - constructor(buffer) { - if (!isArrayBuffer(buffer)) { - throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); - } - this.#handle = new BigUint64Array(buffer); - } - - /** @type {bigint} */ - get createdAt() { - return this.#handle[IDX_STATS_SESSION_CREATED_AT]; - } - - /** @type {bigint} */ - get closingAt() { - return this.#handle[IDX_STATS_SESSION_CLOSING_AT]; - } - - /** @type {bigint} */ - get destroyedAt() { - return this.#handle[IDX_STATS_SESSION_DESTROYED_AT]; - } - - /** @type {bigint} */ - get handshakeCompletedAt() { - return this.#handle[IDX_STATS_SESSION_HANDSHAKE_COMPLETED_AT]; - } - - /** @type {bigint} */ - get handshakeConfirmedAt() { - return this.#handle[IDX_STATS_SESSION_HANDSHAKE_CONFIRMED_AT]; - } - - /** @type {bigint} */ - get gracefulClosingAt() { - return this.#handle[IDX_STATS_SESSION_GRACEFUL_CLOSING_AT]; - } - - /** @type {bigint} */ - get bytesReceived() { - return this.#handle[IDX_STATS_SESSION_BYTES_RECEIVED]; - } - - /** @type {bigint} */ - get bytesSent() { - return this.#handle[IDX_STATS_SESSION_BYTES_SENT]; - } - - /** @type {bigint} */ - get bidiInStreamCount() { - return this.#handle[IDX_STATS_SESSION_BIDI_IN_STREAM_COUNT]; - } - - /** @type {bigint} */ - get bidiOutStreamCount() { - return this.#handle[IDX_STATS_SESSION_BIDI_OUT_STREAM_COUNT]; - } - - /** @type {bigint} */ - get uniInStreamCount() { - return this.#handle[IDX_STATS_SESSION_UNI_IN_STREAM_COUNT]; - } - - /** @type {bigint} */ - get uniOutStreamCount() { - return this.#handle[IDX_STATS_SESSION_UNI_OUT_STREAM_COUNT]; - } - - /** @type {bigint} */ - get lossRetransmitCount() { - return this.#handle[IDX_STATS_SESSION_LOSS_RETRANSMIT_COUNT]; - } - - /** @type {bigint} */ - get maxBytesInFlights() { - return this.#handle[IDX_STATS_SESSION_MAX_BYTES_IN_FLIGHT]; - } - - /** @type {bigint} */ - get bytesInFlight() { - return this.#handle[IDX_STATS_SESSION_BYTES_IN_FLIGHT]; - } - - /** @type {bigint} */ - get blockCount() { - return this.#handle[IDX_STATS_SESSION_BLOCK_COUNT]; - } - - /** @type {bigint} */ - get cwnd() { - return this.#handle[IDX_STATS_SESSION_CWND]; - } - - /** @type {bigint} */ - get latestRtt() { - return this.#handle[IDX_STATS_SESSION_LATEST_RTT]; - } - - /** @type {bigint} */ - get minRtt() { - return this.#handle[IDX_STATS_SESSION_MIN_RTT]; - } - - /** @type {bigint} */ - get rttVar() { - return this.#handle[IDX_STATS_SESSION_RTTVAR]; - } - - /** @type {bigint} */ - get smoothedRtt() { - return this.#handle[IDX_STATS_SESSION_SMOOTHED_RTT]; - } - - /** @type {bigint} */ - get ssthresh() { - return this.#handle[IDX_STATS_SESSION_SSTHRESH]; - } - - /** @type {bigint} */ - get datagramsReceived() { - return this.#handle[IDX_STATS_SESSION_DATAGRAMS_RECEIVED]; - } - - /** @type {bigint} */ - get datagramsSent() { - return this.#handle[IDX_STATS_SESSION_DATAGRAMS_SENT]; - } - - /** @type {bigint} */ - get datagramsAcknowledged() { - return this.#handle[IDX_STATS_SESSION_DATAGRAMS_ACKNOWLEDGED]; - } - - /** @type {bigint} */ - get datagramsLost() { - return this.#handle[IDX_STATS_SESSION_DATAGRAMS_LOST]; - } - - toJSON() { - return { - __proto__: null, - createdAt: `${this.createdAt}`, - closingAt: `${this.closingAt}`, - destroyedAt: `${this.destroyedAt}`, - handshakeCompletedAt: `${this.handshakeCompletedAt}`, - handshakeConfirmedAt: `${this.handshakeConfirmedAt}`, - gracefulClosingAt: `${this.gracefulClosingAt}`, - bytesReceived: `${this.bytesReceived}`, - bytesSent: `${this.bytesSent}`, - bidiInStreamCount: `${this.bidiInStreamCount}`, - bidiOutStreamCount: `${this.bidiOutStreamCount}`, - uniInStreamCount: `${this.uniInStreamCount}`, - uniOutStreamCount: `${this.uniOutStreamCount}`, - lossRetransmitCount: `${this.lossRetransmitCount}`, - maxBytesInFlights: `${this.maxBytesInFlights}`, - bytesInFlight: `${this.bytesInFlight}`, - blockCount: `${this.blockCount}`, - cwnd: `${this.cwnd}`, - latestRtt: `${this.latestRtt}`, - minRtt: `${this.minRtt}`, - rttVar: `${this.rttVar}`, - smoothedRtt: `${this.smoothedRtt}`, - ssthresh: `${this.ssthresh}`, - datagramsReceived: `${this.datagramsReceived}`, - datagramsSent: `${this.datagramsSent}`, - datagramsAcknowledged: `${this.datagramsAcknowledged}`, - datagramsLost: `${this.datagramsLost}`, - }; - } - - [kInspect](depth, options) { - if (depth < 0) - return this; - - const opts = { - ...options, - depth: options.depth == null ? null : options.depth - 1, - }; - - return `SessionStats ${inspect({ - createdAt: this.createdAt, - closingAt: this.closingAt, - destroyedAt: this.destroyedAt, - handshakeCompletedAt: this.handshakeCompletedAt, - handshakeConfirmedAt: this.handshakeConfirmedAt, - gracefulClosingAt: this.gracefulClosingAt, - bytesReceived: this.bytesReceived, - bytesSent: this.bytesSent, - bidiInStreamCount: this.bidiInStreamCount, - bidiOutStreamCount: this.bidiOutStreamCount, - uniInStreamCount: this.uniInStreamCount, - uniOutStreamCount: this.uniOutStreamCount, - lossRetransmitCount: this.lossRetransmitCount, - maxBytesInFlights: this.maxBytesInFlights, - bytesInFlight: this.bytesInFlight, - blockCount: this.blockCount, - cwnd: this.cwnd, - latestRtt: this.latestRtt, - minRtt: this.minRtt, - rttVar: this.rttVar, - smoothedRtt: this.smoothedRtt, - ssthresh: this.ssthresh, - datagramsReceived: this.datagramsReceived, - datagramsSent: this.datagramsSent, - datagramsAcknowledged: this.datagramsAcknowledged, - datagramsLost: this.datagramsLost, - }, opts)}`; - } - - [kFinishClose]() { - // Snapshot the stats into a new BigUint64Array since the underlying - // buffer will be destroyed. - this.#handle = new BigUint64Array(this.#handle); - } -} - -class SessionState { - /** @type {DataView} */ - #handle; - - /** - * @param {ArrayBuffer} buffer - */ - constructor(buffer) { - if (!isArrayBuffer(buffer)) { - throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); - } - this.#handle = new DataView(buffer); - } - - /** @type {boolean} */ - get hasPathValidationListener() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_PATH_VALIDATION); - } - - /** @type {boolean} */ - set hasPathValidationListener(val) { - DataViewPrototypeSetUint8(this.#handle, IDX_STATE_SESSION_PATH_VALIDATION, val ? 1 : 0); - } - - /** @type {boolean} */ - get hasVersionNegotiationListener() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_VERSION_NEGOTIATION); - } - - /** @type {boolean} */ - set hasVersionNegotiationListener(val) { - DataViewPrototypeSetUint8(this.#handle, IDX_STATE_SESSION_VERSION_NEGOTIATION, val ? 1 : 0); - } - - /** @type {boolean} */ - get hasDatagramListener() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_DATAGRAM); - } - - /** @type {boolean} */ - set hasDatagramListener(val) { - DataViewPrototypeSetUint8(this.#handle, IDX_STATE_SESSION_DATAGRAM, val ? 1 : 0); - } - - /** @type {boolean} */ - get hasSessionTicketListener() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_SESSION_TICKET); - } - - /** @type {boolean} */ - set hasSessionTicketListener(val) { - DataViewPrototypeSetUint8(this.#handle, IDX_STATE_SESSION_SESSION_TICKET, val ? 1 : 0); - } - - /** @type {boolean} */ - get isClosing() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_CLOSING); - } - - /** @type {boolean} */ - get isGracefulClose() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_GRACEFUL_CLOSE); - } - - /** @type {boolean} */ - get isSilentClose() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_SILENT_CLOSE); - } - - /** @type {boolean} */ - get isStatelessReset() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_STATELESS_RESET); - } - - /** @type {boolean} */ - get isDestroyed() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_DESTROYED); - } - - /** @type {boolean} */ - get isHandshakeCompleted() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_HANDSHAKE_COMPLETED); - } - - /** @type {boolean} */ - get isHandshakeConfirmed() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_HANDSHAKE_CONFIRMED); - } - - /** @type {boolean} */ - get isStreamOpenAllowed() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_STREAM_OPEN_ALLOWED); - } - - /** @type {boolean} */ - get isPrioritySupported() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_PRIORITY_SUPPORTED); - } - - /** @type {boolean} */ - get isWrapped() { - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_WRAPPED); - } - - /** @type {bigint} */ - get lastDatagramId() { - return DataViewPrototypeGetBigUint64(this.#handle, IDX_STATE_SESSION_LAST_DATAGRAM_ID); - } - - toJSON() { - return { - __proto__: null, - hasPathValidationListener: this.hasPathValidationListener, - hasVersionNegotiationListener: this.hasVersionNegotiationListener, - hasDatagramListener: this.hasDatagramListener, - hasSessionTicketListener: this.hasSessionTicketListener, - isClosing: this.isClosing, - isGracefulClose: this.isGracefulClose, - isSilentClose: this.isSilentClose, - isStatelessReset: this.isStatelessReset, - isDestroyed: this.isDestroyed, - isHandshakeCompleted: this.isHandshakeCompleted, - isHandshakeConfirmed: this.isHandshakeConfirmed, - isStreamOpenAllowed: this.isStreamOpenAllowed, - isPrioritySupported: this.isPrioritySupported, - isWrapped: this.isWrapped, - lastDatagramId: `${this.lastDatagramId}`, - }; - } - - [kInspect](depth, options) { - if (depth < 0) - return this; - - const opts = { - ...options, - depth: options.depth == null ? null : options.depth - 1, - }; - - return `SessionState ${inspect({ - hasPathValidationListener: this.hasPathValidationListener, - hasVersionNegotiationListener: this.hasVersionNegotiationListener, - hasDatagramListener: this.hasDatagramListener, - hasSessionTicketListener: this.hasSessionTicketListener, - isClosing: this.isClosing, - isGracefulClose: this.isGracefulClose, - isSilentClose: this.isSilentClose, - isStatelessReset: this.isStatelessReset, - isDestroyed: this.isDestroyed, - isHandshakeCompleted: this.isHandshakeCompleted, - isHandshakeConfirmed: this.isHandshakeConfirmed, - isStreamOpenAllowed: this.isStreamOpenAllowed, - isPrioritySupported: this.isPrioritySupported, - isWrapped: this.isWrapped, - lastDatagramId: this.lastDatagramId, - }, opts)}`; - } - - [kFinishClose]() { - // Snapshot the state into a new DataView since the underlying - // buffer will be destroyed. - this.#handle = new DataView(new ArrayBuffer(0)); - } -} - -class Session { - /** @type {Endpoint} */ - #endpoint = undefined; - /** @type {boolean} */ - #isPendingClose = false; - /** @type {object|undefined} */ - #handle; - /** @type {PromiseWithResolvers} */ - #pendingClose; - /** @type {SocketAddress|undefined} */ - #remoteAddress = undefined; - /** @type {SessionState} */ - #state; - /** @type {SessionStats} */ - #stats; - /** @type {QuicStream[]} */ - #streams = []; - /** @type {OnStreamCallback} */ - #onstream; - /** @type {OnDatagramCallback|undefined} */ - #ondatagram; - /** @type {OnDatagramStatusCallback|undefined} */ - #ondatagramstatus; - /** @type {OnPathValidationCallback|undefined} */ - #onpathvalidation; - /** @type {OnSessionTicketCallback|undefined} */ - #onsessionticket; - /** @type {OnVersionNegotiationCallback|undefined} */ - #onversionnegotiation; - /** @type {OnHandshakeCallback} */ - #onhandshake; - /** @type {StreamCallbackConfiguration} */ - #streamConfig; - - /** - * @param {SessionCallbackConfiguration} config - * @param {StreamCallbackConfiguration} streamConfig - * @param {object} [handle] - * @param {Endpoint} [endpoint] - */ - constructor(config, streamConfig, handle, endpoint) { - validateObject(config, 'config'); - this.#stats = new SessionStats(handle.stats); - this.#state = new SessionState(handle.state); + // The config should have already been validated by the QuicEndpoing const { ondatagram, ondatagramstatus, @@ -1402,37 +793,47 @@ class Session { onsessionticket, onstream, onversionnegotiation, + stream, } = config; + if (ondatagram !== undefined) { - validateFunction(ondatagram, 'config.ondatagram'); - validateFunction(ondatagramstatus, 'config.ondatagramstatus'); - this.#state.hasDatagramListener = true; - this.#ondatagram = ondatagram; - this.#ondatagramstatus = ondatagramstatus; + this.#ondatagram = ondatagram.bind(this); + } + if (ondatagramstatus !== undefined) { + this.#ondatagramstatus = ondatagramstatus.bind(this); } - validateFunction(onhandshake, 'config.onhandshake'); if (onpathvalidation !== undefined) { - validateFunction(onpathvalidation, 'config.onpathvalidation'); - this.#state.hasPathValidationListener = true; - this.#onpathvalidation = onpathvalidation; + this.#onpathvalidation = onpathvalidation.bind(this); } if (onsessionticket !== undefined) { - validateFunction(onsessionticket, 'config.onsessionticket'); - this.#state.hasSessionTicketListener = true; - this.#onsessionticket = onsessionticket; + this.#onsessionticket = onsessionticket.bind(this); } - validateFunction(onstream, 'config.onstream'); if (onversionnegotiation !== undefined) { - validateFunction(onversionnegotiation, 'config.onversionnegotiation'); - this.#state.hasVersionNegotiationListener = true; - this.#onversionnegotiation = onversionnegotiation; + this.#onversionnegotiation = onversionnegotiation.bind(this); + } + if (onhandshake !== undefined) { + this.#onhandshake = onhandshake.bind(this); + } + + // It is ok for onstream to be undefined if the session is not expecting + // or wanting to receive incoming streams. If a stream is received and + // no onstream callback is specified, a warning will be emitted and the + // stream will just be immediately destroyed. + if (onstream !== undefined) { + this.#onstream = onstream.bind(this); } - this.#onhandshake = onhandshake; - this.#onstream = onstream; - this.#handle = handle; this.#endpoint = endpoint; - this.#pendingClose = Promise.withResolvers(); // eslint-disable-line node-core/prefer-primordials + this.#streamConfig = stream; + + this.#handle = handle; this.#handle[kOwner] = this; + this.#stats = new QuicSessionStats(kPrivateConstructor, handle.stats); + + this.#state = new QuicSessionState(kPrivateConstructor, handle.state); + this.#state.hasDatagramListener = !!ondatagram; + this.#state.hasPathValidationListener = !!onpathvalidation; + this.#state.hasSessionTicketListener = !!onsessionticket; + this.#state.hasVersionNegotiationListener = !!onversionnegotiation; } /** @type {boolean} */ @@ -1440,18 +841,21 @@ class Session { return this.#handle === undefined || this.#isPendingClose; } - /** @type {SessionStats} */ + /** @type {QuicSessionStats} */ get stats() { return this.#stats; } - /** @type {SessionState} */ + /** @type {QuicSessionState} */ get state() { return this.#state; } - /** @type {Endpoint} */ + /** @type {QuicEndpoint} */ get endpoint() { return this.#endpoint; } - /** @type {Path} */ + /** + * The path is the local and remote addresses of the session. + * @type {Path} + */ get path() { - if (this.#isClosedOrClosing) return undefined; + if (this.destroyed) return undefined; if (this.#remoteAddress === undefined) { const addr = this.#handle.getRemoteAddress(); if (addr !== undefined) { @@ -1478,28 +882,85 @@ class Session { if (handle === undefined) { throw new ERR_QUIC_OPEN_STREAM_FAILED(); } - const stream = new QuicStream(this.#streamConfig, handle, this, 0); - ArrayPrototypePush(this.#streams, stream); + const stream = new QuicStream(kPrivateConstructor, this.#streamConfig, handle, + this, 0 /* Bidirectional */); + this.#streams.add(stream); + + if (onSessionOpenStreamChannel.hasSubscribers) { + onSessionOpenStreamChannel.publish({ + stream, + session: this, + }); + } + return stream; + } + + /** + * @returns {QuicStream} + */ + openUnidirectionalStream() { + if (this.#isClosedOrClosing) { + throw new ERR_INVALID_STATE('Session is closed'); + } + if (!this.state.isStreamOpenAllowed) { + throw new ERR_QUIC_OPEN_STREAM_FAILED(); + } + const handle = this.#handle.openStream(STREAM_DIRECTION_UNIDIRECTIONAL); + if (handle === undefined) { + throw new ERR_QUIC_OPEN_STREAM_FAILED(); + } + const stream = new QuicStream(kPrivateConstructor, this.#streamConfig, handle, + this, 1 /* Unidirectional */); + this.#streams.add(stream); + + if (onSessionOpenStreamChannel.hasSubscribers) { + onSessionOpenStreamChannel.publish({ + stream, + session: this, + }); + } + return stream; } /** - * @returns {QuicStream} + * Send a datagram. The id of the sent datagram will be returned. The status + * of the sent datagram will be reported via the datagram-status event if + * possible. + * + * If a string is given it will be encoded as UTF-8. + * + * If an ArrayBufferView is given, the view will be copied. + * @param {ArrayBufferView|string} datagram The datagram payload + * @returns {bigint} The datagram ID */ - openUnidirectionalStream() { + sendDatagram(datagram) { if (this.#isClosedOrClosing) { throw new ERR_INVALID_STATE('Session is closed'); } - if (!this.state.isStreamOpenAllowed) { - throw new ERR_QUIC_OPEN_STREAM_FAILED(); + if (typeof datagram === 'string') { + datagram = Buffer.from(datagram, 'utf8'); + } else { + if (!isArrayBufferView(datagram)) { + throw new ERR_INVALID_ARG_TYPE('datagram', + ['ArrayBufferView', 'string'], + datagram); + } + datagram = new Uint8Array(ArrayBufferPrototypeTransfer(datagram.buffer), + datagram.byteOffset, + datagram.byteLength); } - const handle = this.#handle.openStream(STREAM_DIRECTION_UNIDIRECTIONAL); - if (handle === undefined) { - throw new ERR_QUIC_OPEN_STREAM_FAILED(); + const id = this.#handle.sendDatagram(datagram); + + if (onSessionSendDatagramChannel.hasSubscribers) { + onSessionSendDatagramChannel.publish({ + id, + length: datagram.byteLength, + session: this, + }); } - const stream = new QuicStream(this.#streamConfig, handle, this, 1); - ArrayPrototypePush(this.#streams, stream); - return stream; + + return id; } /** @@ -1510,6 +971,11 @@ class Session { throw new ERR_INVALID_STATE('Session is closed'); } this.#handle.updateKey(); + if (onSessionUpdateKeyChannel.hasSubscribers) { + onSessionUpdateKeyChannel.publish({ + session: this, + }); + } } /** @@ -1526,6 +992,11 @@ class Session { if (!this.#isClosedOrClosing) { this.#isPendingClose = true; this.#handle?.gracefulClose(); + if (onSessionClosingChannel.hasSubscribers) { + onSessionClosingChannel.publish({ + session: this, + }); + } } return this.closed; } @@ -1547,39 +1018,98 @@ class Session { * the closed promise will be rejected with that error. If no error is given, * the closed promise will be resolved. * @param {any} error + * @return {Promise} Returns this.closed */ destroy(error) { - // TODO(@jasnell): Implement. + if (this.destroyed) return; + // First, forcefully and immediately destroy all open streams, if any. + for (const stream of this.#streams) { + stream.destroy(error); + } + // The streams should remove themselves when they are destroyed but let's + // be doubly sure. + if (this.#streams.size) { + process.emitWarning( + `The session is destroyed with ${this.#streams.size} active streams. ` + + 'This should not happen and indicates a bug in Node.js. Please open an ' + + 'issue in the Node.js GitHub repository at https://github.com/nodejs/node ' + + 'to report the problem.', + ); + } + this.#streams.clear(); + + // Remove this session immediately from the endpoint + this.#endpoint[kRemoveSession](this); + this.#endpoint = undefined; + this.#isPendingClose = false; + + if (error) { + // If the session is still waiting to be closed, and error + // is specified, reject the closed promise. + this.#pendingClose.reject?.(error); + } else { + this.#pendingClose.resolve?.(); + } + this.#pendingClose.reject = undefined; + this.#pendingClose.resolve = undefined; + + this.#remoteAddress = undefined; + this.#state[kFinishClose](); + this.#stats[kFinishClose](); + + this.#onstream = undefined; + this.#ondatagram = undefined; + this.#ondatagramstatus = undefined; + this.#onpathvalidation = undefined; + this.#onsessionticket = undefined; + this.#onversionnegotiation = undefined; + this.#onhandshake = undefined; + this.#streamConfig = undefined; + + // Destroy the underlying C++ handle + this.#handle.destroy(); + this.#handle = undefined; + + if (onSessionClosedChannel.hasSubscribers) { + onSessionClosedChannel.publish({ + session: this, + }); + } + + return this.closed; } /** - * Send a datagram. The id of the sent datagram will be returned. The status - * of the sent datagram will be reported via the datagram-status event if - * possible. - * - * If a string is given it will be encoded as UTF-8. - * - * If an ArrayBufferView is given, the view will be copied. - * @param {ArrayBufferView|string} datagram The datagram payload - * @returns {bigint} The datagram ID + * @param {number} errorType + * @param {number} code + * @param {string} [reason] */ - sendDatagram(datagram) { - if (this.#isClosedOrClosing) { - throw new ERR_INVALID_STATE('Session is closed'); + [kFinishClose](errorType, code, reason) { + // If code is zero, then we closed without an error. Yay! We can destroy + // safely without specifying an error. + if (code === 0) { + this.destroy(); + return; } - if (typeof datagram === 'string') { - datagram = Buffer.from(datagram, 'utf8'); - } else { - if (!isArrayBufferView(datagram)) { - throw new ERR_INVALID_ARG_TYPE('datagram', - ['ArrayBufferView', 'string'], - datagram); + + // Otherwise, errorType indicates the type of error that occurred, code indicates + // the specific error, and reason is an optional string describing the error. + switch (errorType) { + case 0: /* Transport Error */ + this.destroy(new ERR_QUIC_TRANSPORT_ERROR(code, reason)); + break; + case 1: /* Application Error */ + this.destroy(new ERR_QUIC_APPLICATION_ERROR(code, reason)); + break; + case 2: /* Version Negotiation Error */ + this.destroy(new ERR_QUIC_VERSION_NEGOTIATION_ERROR()); + break; + case 3: /* Idle close */ { + // An idle close is not really an error. We can just destroy. + this.destroy(); + break; } - datagram = new Uint8Array(datagram.buffer.transfer(), - datagram.byteOffset, - datagram.byteLength); } - return this.#handle.sendDatagram(datagram); } /** @@ -1587,8 +1117,19 @@ class Session { * @param {boolean} early */ [kDatagram](u8, early) { + // The datagram event should only be called if the session was created with + // an ondatagram callback. The callback should always exist here. + assert(this.#ondatagram, 'Unexpected datagram event'); if (this.destroyed) return; - this.#ondatagram(u8, this, early); + this.#ondatagram(u8, early); + + if (onSessionReceiveDatagramChannel.hasSubscribers) { + onSessionReceiveDatagramChannel.publish({ + length: u8.byteLength, + early, + session: this, + }); + } } /** @@ -1597,7 +1138,17 @@ class Session { */ [kDatagramStatus](id, status) { if (this.destroyed) return; - this.#ondatagramstatus(id, status, this); + // The ondatagramstatus callback may not have been specified. That's ok. + // We'll just ignore the event in that case. + this.#ondatagramstatus?.(id, status); + + if (onSessionReceiveDatagramStatusChannel.hasSubscribers) { + onSessionReceiveDatagramStatusChannel.publish({ + id, + status, + session: this, + }); + } } /** @@ -1610,18 +1161,41 @@ class Session { */ [kPathValidation](result, newLocalAddress, newRemoteAddress, oldLocalAddress, oldRemoteAddress, preferredAddress) { + // The path validation event should only be called if the session was created + // with an onpathvalidation callback. The callback should always exist here. + assert(this.#onpathvalidation, 'Unexpected path validation event'); if (this.destroyed) return; this.#onpathvalidation(result, newLocalAddress, newRemoteAddress, - oldLocalAddress, oldRemoteAddress, preferredAddress, - this); + oldLocalAddress, oldRemoteAddress, preferredAddress); + + if (onSessionPathValidationChannel.hasSubscribers) { + onSessionPathValidationChannel.publish({ + result, + newLocalAddress, + newRemoteAddress, + oldLocalAddress, + oldRemoteAddress, + preferredAddress, + session: this, + }); + } } /** * @param {object} ticket */ [kSessionTicket](ticket) { + // The session ticket event should only be called if the session was created + // with an onsessionticket callback. The callback should always exist here. + assert(this.#onsessionticket, 'Unexpected session ticket event'); if (this.destroyed) return; - this.#onsessionticket(ticket, this); + this.#onsessionticket(ticket); + if (onSessionTicketChannel.hasSubscribers) { + onSessionTicketChannel.publish({ + ticket, + session: this, + }); + } } /** @@ -1630,8 +1204,21 @@ class Session { * @param {number[]} supportedVersions */ [kVersionNegotiation](version, requestedVersions, supportedVersions) { + // The version negotiation event should only be called if the session was + // created with an onversionnegotiation callback. The callback should always + // exist here. if (this.destroyed) return; - this.#onversionnegotiation(version, requestedVersions, supportedVersions, this); + this.#onversionnegotiation(version, requestedVersions, supportedVersions); + this.destroy(new ERR_QUIC_VERSION_NEGOTIATION_ERROR()); + + if (onSessionVersionNegotiationChannel.hasSubscribers) { + onSessionVersionNegotiationChannel.publish({ + version, + requestedVersions, + supportedVersions, + session: this, + }); + } } /** @@ -1646,8 +1233,23 @@ class Session { [kHandshake](sni, alpn, cipher, cipherVersion, validationErrorReason, validationErrorCode, earlyDataAccepted) { if (this.destroyed) return; - this.#onhandshake(sni, alpn, cipher, cipherVersion, validationErrorReason, - validationErrorCode, earlyDataAccepted, this); + // The onhandshake callback may not have been specified. That's ok. + // We'll just ignore the event in that case. + this.#onhandshake?.(sni, alpn, cipher, cipherVersion, validationErrorReason, + validationErrorCode, earlyDataAccepted); + + if (onSessionHandshakeChannel.hasSubscribers) { + onSessionHandshakeChannel.publish({ + sni, + alpn, + cipher, + cipherVersion, + validationErrorReason, + validationErrorCode, + earlyDataAccepted, + session: this, + }); + } } /** @@ -1655,9 +1257,30 @@ class Session { * @param {number} direction */ [kNewStream](handle, direction) { - const stream = new QuicStream(this.#streamConfig, handle, this, direction); - ArrayPrototypePush(this.#streams, stream); - this.#onstream(stream, this); + const stream = new QuicStream(kPrivateConstructor, this.#streamConfig, handle, + this, direction); + + // A new stream was received. If we don't have an onstream callback, then + // there's nothing we can do about it. Destroy the stream in this case. + if (this.#onstream === undefined) { + process.emitWarning('A new stream was received but no onstream callback was provided'); + stream.destroy(); + return; + } + this.#streams.add(stream); + + this.#onstream(stream); + + if (onSessionReceivedStreamChannel.hasSubscribers) { + onSessionReceivedStreamChannel.publish({ + stream, + session: this, + }); + } + } + + [kRemoveStream](stream) { + this.#streams.delete(stream); } [kInspect](depth, options) { @@ -1669,7 +1292,7 @@ class Session { depth: options.depth == null ? null : options.depth - 1, }; - return `Session ${inspect({ + return `QuicSession ${inspect({ closed: this.closed, closing: this.#isPendingClose, destroyed: this.destroyed, @@ -1681,331 +1304,189 @@ class Session { }, opts)}`; } - [kFinishClose](errorType, code, reason) { - // TODO(@jasnell): Finish the implementation - } - async [SymbolAsyncDispose]() { await this.close(); } } -class EndpointStats { - /** @type {BigUint64Array} */ +class QuicEndpoint { + /** + * The local socket address on which the endpoint is listening (lazily created) + * @type {SocketAddress|undefined} + */ + #address = undefined; + /** + * When true, the endpoint has been marked busy and is temporarily not accepting + * new sessions (only used when the Endpoint is acting as a server) + * @type {boolean} + */ + #busy = false; + /** + * The underlying C++ handle for the endpoint. When undefined the endpoint is + * considered to be closed. + * @type {object} + */ #handle; - /** - * @param {ArrayBuffer} buffer + * True if endpoint.close() has been called and the [kFinishClose] method has + * not yet been called. + * @type {boolean} */ - constructor(buffer) { - if (!isArrayBuffer(buffer)) { - throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); - } - this.#handle = new BigUint64Array(buffer); - } - - /** @type {bigint} */ - get createdAt() { - return this.#handle[IDX_STATS_ENDPOINT_CREATED_AT]; - } - - /** @type {bigint} */ - get destroyedAt() { - return this.#handle[IDX_STATS_ENDPOINT_DESTROYED_AT]; - } - - /** @type {bigint} */ - get bytesReceived() { - return this.#handle[IDX_STATS_ENDPOINT_BYTES_RECEIVED]; - } - - /** @type {bigint} */ - get bytesSent() { - return this.#handle[IDX_STATS_ENDPOINT_BYTES_SENT]; - } - - /** @type {bigint} */ - get packetsReceived() { - return this.#handle[IDX_STATS_ENDPOINT_PACKETS_RECEIVED]; - } - - /** @type {bigint} */ - get packetsSent() { - return this.#handle[IDX_STATS_ENDPOINT_PACKETS_SENT]; - } - - /** @type {bigint} */ - get serverSessions() { - return this.#handle[IDX_STATS_ENDPOINT_SERVER_SESSIONS]; - } - - /** @type {bigint} */ - get clientSessions() { - return this.#handle[IDX_STATS_ENDPOINT_CLIENT_SESSIONS]; - } - - /** @type {bigint} */ - get serverBusyCount() { - return this.#handle[IDX_STATS_ENDPOINT_SERVER_BUSY_COUNT]; - } - - /** @type {bigint} */ - get retryCount() { - return this.#handle[IDX_STATS_ENDPOINT_RETRY_COUNT]; - } - - /** @type {bigint} */ - get versionNegotiationCount() { - return this.#handle[IDX_STATS_ENDPOINT_VERSION_NEGOTIATION_COUNT]; - } + #isPendingClose = false; + /** + * True if the endpoint is acting as a server and actively listening for connections. + * @type {boolean} + */ + #listening = false; + /** + * A promise that is resolved when the endpoint has been closed (or rejected if + * the endpoint closes abruptly due to an error). + * @type {PromiseWithResolvers} + */ + #pendingClose = Promise.withResolvers(); // eslint-disable-line node-core/prefer-primordials + /** + * If destroy() is called with an error, the error is stored here and used to reject + * the pendingClose promise when [kFinishClose] is called. + * @type {any} + */ + #pendingError = undefined; + /** + * The collection of active sessions. + * @type {Set} + */ + #sessions = new SafeSet(); + /** + * The internal state of the endpoint. Used to efficiently track and update the + * state of the underlying c++ endpoint handle. + * @type {QuicEndpointState} + */ + #state; + /** + * The collected statistics for the endpoint. + * @type {QuicEndpointStats} + */ + #stats; + /** + * The user provided callback that is invoked when a new session is received. + * (used only when the endpoint is acting as a server) + * @type {OnSessionCallback} + */ + #onsession; + /** + * The callback configuration used for new sessions (client or server) + * @type {ProcessedSessionCallbackConfiguration} + */ + #sessionConfig; - /** @type {bigint} */ - get statelessResetCount() { - return this.#handle[IDX_STATS_ENDPOINT_STATELESS_RESET_COUNT]; - } + /** + * @param {EndpointCallbackConfiguration} config + * @returns {StreamCallbackConfiguration} + */ + #processStreamConfig(config) { + validateObject(config, 'config'); + const { + onblocked, + onreset, + onheaders, + ontrailers, + } = config; - /** @type {bigint} */ - get immediateCloseCount() { - return this.#handle[IDX_STATS_ENDPOINT_IMMEDIATE_CLOSE_COUNT]; - } + if (onblocked !== undefined) { + validateFunction(onblocked, 'config.onblocked'); + } + if (onreset !== undefined) { + validateFunction(onreset, 'config.onreset'); + } + if (onheaders !== undefined) { + validateFunction(onheaders, 'config.onheaders'); + } + if (ontrailers !== undefined) { + validateFunction(ontrailers, 'ontrailers'); + } - toJSON() { return { __proto__: null, - createdAt: `${this.createdAt}`, - destroyedAt: `${this.destroyedAt}`, - bytesReceived: `${this.bytesReceived}`, - bytesSent: `${this.bytesSent}`, - packetsReceived: `${this.packetsReceived}`, - packetsSent: `${this.packetsSent}`, - serverSessions: `${this.serverSessions}`, - clientSessions: `${this.clientSessions}`, - serverBusyCount: `${this.serverBusyCount}`, - retryCount: `${this.retryCount}`, - versionNegotiationCount: `${this.versionNegotiationCount}`, - statelessResetCount: `${this.statelessResetCount}`, - immediateCloseCount: `${this.immediateCloseCount}`, - }; - } - - [kInspect](depth, options) { - if (depth < 0) - return this; - - const opts = { - ...options, - depth: options.depth == null ? null : options.depth - 1, + onblocked, + onreset, + onheaders, + ontrailers, }; - - return `EndpointStats ${inspect({ - createdAt: this.createdAt, - destroyedAt: this.destroyedAt, - bytesReceived: this.bytesReceived, - bytesSent: this.bytesSent, - packetsReceived: this.packetsReceived, - packetsSent: this.packetsSent, - serverSessions: this.serverSessions, - clientSessions: this.clientSessions, - serverBusyCount: this.serverBusyCount, - retryCount: this.retryCount, - versionNegotiationCount: this.versionNegotiationCount, - statelessResetCount: this.statelessResetCount, - immediateCloseCount: this.immediateCloseCount, - }, opts)}`; - } - - [kFinishClose]() { - // Snapshot the stats into a new BigUint64Array since the underlying - // buffer will be destroyed. - this.#handle = new BigUint64Array(this.#handle); } -} - -class EndpointState { - /** @type {DataView} */ - #handle; /** - * @param {ArrayBuffer} buffer + * + * @param {EndpointCallbackConfiguration} config + * @returns {ProcessedSessionCallbackConfiguration} */ - constructor(buffer) { - if (!isArrayBuffer(buffer)) { - throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); + #processSessionConfig(config) { + validateObject(config, 'config'); + const { + onstream, + ondatagram, + ondatagramstatus, + onpathvalidation, + onsessionticket, + onversionnegotiation, + onhandshake, + } = config; + if (onstream !== undefined) { + validateFunction(onstream, 'config.onstream'); } - this.#handle = new DataView(buffer); - } - - #assertNotClosed() { - if (this.#handle.byteLength === 0) { - throw new ERR_INVALID_STATE('Endpoint is closed'); + if (ondatagram !== undefined) { + validateFunction(ondatagram, 'config.ondatagram'); + } + if (ondatagramstatus !== undefined) { + validateFunction(ondatagramstatus, 'config.ondatagramstatus'); + } + if (onpathvalidation !== undefined) { + validateFunction(onpathvalidation, 'config.onpathvalidation'); + } + if (onsessionticket !== undefined) { + validateFunction(onsessionticket, 'config.onsessionticket'); + } + if (onversionnegotiation !== undefined) { + validateFunction(onversionnegotiation, 'config.onversionnegotiation'); + } + if (onhandshake !== undefined) { + validateFunction(onhandshake, 'config.onhandshake'); } - } - - /** @type {boolean} */ - get isBound() { - this.#assertNotClosed(); - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_ENDPOINT_BOUND); - } - - /** @type {boolean} */ - get isReceiving() { - this.#assertNotClosed(); - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_ENDPOINT_RECEIVING); - } - - /** @type {boolean} */ - get isListening() { - this.#assertNotClosed(); - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_ENDPOINT_LISTENING); - } - - /** @type {boolean} */ - get isClosing() { - this.#assertNotClosed(); - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_ENDPOINT_CLOSING); - } - - /** @type {boolean} */ - get isBusy() { - this.#assertNotClosed(); - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_ENDPOINT_BUSY); - } - - /** - * The number of underlying callbacks that are pending. If the session - * is closing, these are the number of callbacks that the session is - * waiting on before it can be closed. - * @type {bigint} - */ - get pendingCallbacks() { - this.#assertNotClosed(); - return DataViewPrototypeGetBigUint64(this.#handle, IDX_STATE_ENDPOINT_PENDING_CALLBACKS); - } - - toJSON() { - if (this.#handle.byteLength === 0) return {}; return { __proto__: null, - isBound: this.isBound, - isReceiving: this.isReceiving, - isListening: this.isListening, - isClosing: this.isClosing, - isBusy: this.isBusy, - pendingCallbacks: `${this.pendingCallbacks}`, + onstream, + ondatagram, + ondatagramstatus, + onpathvalidation, + onsessionticket, + onversionnegotiation, + onhandshake, + stream: this.#processStreamConfig(config), }; } - [kInspect](depth, options) { - if (depth < 0) - return this; + /** + * @param {EndpointCallbackConfiguration} config + * @returns {ProcessedEndpointCallbackConfiguration} + */ + #processEndpointConfig(config) { + validateObject(config, 'config'); + const { + onsession, + } = config; - if (this.#handle.byteLength === 0) { - return 'EndpointState { }'; + if (onsession !== undefined) { + validateFunction(config.onsession, 'config.onsession'); } - const opts = { - ...options, - depth: options.depth == null ? null : options.depth - 1, + return { + __proto__: null, + onsession, + session: this.#processSessionConfig(config), }; - - return `EndpointState ${inspect({ - isBound: this.isBound, - isReceiving: this.isReceiving, - isListening: this.isListening, - isClosing: this.isClosing, - isBusy: this.isBusy, - pendingCallbacks: this.pendingCallbacks, - }, opts)}`; - } - - [kFinishClose]() { - // Snapshot the state into a new DataView since the underlying - // buffer will be destroyed. - if (this.#handle.byteLength === 0) return; - this.#handle = new DataView(new ArrayBuffer(0)); } -} - -function validateStreamConfig(config, name = 'config') { - validateObject(config, name); - if (config.onerror !== undefined) - validateFunction(config.onerror, `${name}.onerror`); - if (config.onblocked !== undefined) - validateFunction(config.onblocked, `${name}.onblocked`); - if (config.onreset !== undefined) - validateFunction(config.onreset, `${name}.onreset`); - if (config.onheaders !== undefined) - validateFunction(config.onheaders, `${name}.onheaders`); - if (config.ontrailers !== undefined) - validateFunction(config.ontrailers, `${name}.ontrailers`); - return config; -} - -function validateSessionConfig(config, name = 'config') { - validateObject(config, name); - if (config.onstream !== undefined) - validateFunction(config.onstream, `${name}.onstream`); - if (config.ondatagram !== undefined) - validateFunction(config.ondatagram, `${name}.ondatagram`); - if (config.ondatagramstatus !== undefined) - validateFunction(config.ondatagramstatus, `${name}.ondatagramstatus`); - if (config.onpathvalidation !== undefined) - validateFunction(config.onpathvalidation, `${name}.onpathvalidation`); - if (config.onsessionticket !== undefined) - validateFunction(config.onsessionticket, `${name}.onsessionticket`); - if (config.onversionnegotiation !== undefined) - validateFunction(config.onversionnegotiation, `${name}.onversionnegotiation`); - if (config.onhandshake !== undefined) - validateFunction(config.onhandshake, `${name}.onhandshake`); - return config; -} - -function validateEndpointConfig(config) { - validateObject(config, 'config'); - validateFunction(config.onsession, 'config.onsession'); - if (config.session !== undefined) - validateSessionConfig(config.session, 'config.session'); - if (config.stream !== undefined) - validateStreamConfig(config.stream, 'config.stream'); - return config; -} - -class Endpoint { - /** @type {SocketAddress|undefined} */ - #address = undefined; - /** @type {boolean} */ - #busy = false; - /** @type {object} */ - #handle; - /** @type {boolean} */ - #isPendingClose = false; - /** @type {boolean} */ - #listening = false; - /** @type {PromiseWithResolvers} */ - #pendingClose; - /** @type {any} */ - #pendingError = undefined; - /** @type {Session[]} */ - #sessions = []; - /** @type {EndpointState} */ - #state; - /** @type {EndpointStats} */ - #stats; - /** @type {OnSessionCallback} */ - #onsession; - /** @type {SessionCallbackConfiguration} */ - #sessionConfig; - /** @type {StreamCallbackConfiguration */ - #streamConfig; /** - * @param {EndpointCallbackConfiguration} config - * @param {EndpointOptions} [options] + * @param {EndpointCallbackConfiguration} options + * @returns {EndpointOptions} */ - constructor(config, options = kEmptyObject) { - validateEndpointConfig(config); - this.#onsession = config.onsession; - this.#sessionConfig = config.session; - this.#streamConfig = config.stream; - + #processEndpointOptions(options) { validateObject(options, 'options'); let { address } = options; const { @@ -2044,8 +1525,7 @@ class Endpoint { } } - this.#pendingClose = Promise.withResolvers(); // eslint-disable-line node-core/prefer-primordials - this.#handle = new Endpoint_({ + return { __proto__: null, address: address?.[kSocketAddressHandle], retryTokenExpiration, @@ -2072,16 +1552,49 @@ class Endpoint { cc, resetTokenSecret, tokenSecret, - }); + }; + } + + #newSession(handle) { + const session = new QuicSession(kPrivateConstructor, this.#sessionConfig, handle, this); + this.#sessions.add(session); + return session; + } + + /** + * @param {EndpointCallbackConfiguration} config + */ + constructor(config = kEmptyObject) { + const { + onsession, + session, + } = this.#processEndpointConfig(config); + + // Note that the onsession callback is only used for server sessions. + // If the callback is not specified, calling listen() will fail but + // connect() can still be called. + if (onsession !== undefined) { + this.#onsession = onsession.bind(this); + } + this.#sessionConfig = session; + + this.#handle = new Endpoint_(this.#processEndpointOptions(config)); this.#handle[kOwner] = this; - this.#stats = new EndpointStats(this.#handle.stats); - this.#state = new EndpointState(this.#handle.state); + this.#stats = new QuicEndpointStats(kPrivateConstructor, this.#handle.stats); + this.#state = new QuicEndpointState(kPrivateConstructor, this.#handle.state); + + if (onEndpointCreatedChannel.hasSubscribers) { + onEndpointCreatedChannel.publish({ + endpoint: this, + config, + }); + } } - /** @type {EndpointStats} */ + /** @type {QuicEndpointStats} */ get stats() { return this.#stats; } - /** @type {EndpointState} */ + /** @type {QuicEndpointState} */ get state() { return this.#state; } get #isClosedOrClosing() { @@ -2103,11 +1616,16 @@ class Endpoint { throw new ERR_INVALID_STATE('Endpoint is closed'); } // The val is allowed to be any truthy value - val = !!val; // Non-op if there is no change - if (val !== this.#busy) { - this.#busy = val; + if (!!val !== this.#busy) { + this.#busy = !this.#busy; this.#handle.markBusy(this.#busy); + if (onEndpointBusyChangeChannel.hasSubscribers) { + onEndpointBusyChangeChannel.publish({ + endpoint: this, + busy: this.#busy, + }); + } } } @@ -2125,19 +1643,127 @@ class Endpoint { } /** - * Configures the endpoint to listen for incoming connections. - * @param {SessionOptions} [options] + * @param {TlsOptions} tls */ - listen(options = kEmptyObject) { - if (this.#isClosedOrClosing) { - throw new ERR_INVALID_STATE('Endpoint is closed'); + #processTlsOptions(tls) { + validateObject(tls, 'options.tls'); + const { + sni, + alpn, + ciphers = DEFAULT_CIPHERS, + groups = DEFAULT_GROUPS, + keylog = false, + verifyClient = false, + tlsTrace = false, + verifyPrivateKey = false, + keys, + certs, + ca, + crl, + } = tls; + + if (sni !== undefined) { + validateString(sni, 'options.tls.sni'); } - if (this.#listening) { - throw new ERR_INVALID_STATE('Endpoint is already listening'); + if (alpn !== undefined) { + validateString(alpn, 'options.tls.alpn'); + } + if (ciphers !== undefined) { + validateString(ciphers, 'options.tls.ciphers'); + } + if (groups !== undefined) { + validateString(groups, 'options.tls.groups'); + } + validateBoolean(keylog, 'options.tls.keylog'); + validateBoolean(verifyClient, 'options.tls.verifyClient'); + validateBoolean(tlsTrace, 'options.tls.tlsTrace'); + validateBoolean(verifyPrivateKey, 'options.tls.verifyPrivateKey'); + + if (certs !== undefined) { + const certInputs = ArrayIsArray(certs) ? certs : [certs]; + for (const cert of certInputs) { + if (!isArrayBufferView(cert) && !isArrayBuffer(cert)) { + throw new ERR_INVALID_ARG_TYPE('options.tls.certs', + ['ArrayBufferView', 'ArrayBuffer'], cert); + } + } } - validateObject(options, 'options'); + if (ca !== undefined) { + const caInputs = ArrayIsArray(ca) ? ca : [ca]; + for (const caCert of caInputs) { + if (!isArrayBufferView(caCert) && !isArrayBuffer(caCert)) { + throw new ERR_INVALID_ARG_TYPE('options.tls.ca', + ['ArrayBufferView', 'ArrayBuffer'], caCert); + } + } + } + + if (crl !== undefined) { + const crlInputs = ArrayIsArray(crl) ? crl : [crl]; + for (const crlCert of crlInputs) { + if (!isArrayBufferView(crlCert) && !isArrayBuffer(crlCert)) { + throw new ERR_INVALID_ARG_TYPE('options.tls.crl', + ['ArrayBufferView', 'ArrayBuffer'], crlCert); + } + } + } + const keyHandles = []; + if (keys !== undefined) { + const keyInputs = ArrayIsArray(keys) ? keys : [keys]; + for (const key of keyInputs) { + if (isKeyObject(key)) { + if (key.type !== 'private') { + throw new ERR_INVALID_ARG_VALUE('options.tls.keys', key, 'must be a private key'); + } + ArrayPrototypePush(keyHandles, key[kKeyObjectHandle]); + } else if (isCryptoKey(key)) { + if (key.type !== 'private') { + throw new ERR_INVALID_ARG_VALUE('options.tls.keys', key, 'must be a private key'); + } + ArrayPrototypePush(keyHandles, key[kKeyObjectInner][kKeyObjectHandle]); + } else { + throw new ERR_INVALID_ARG_TYPE('options.tls.keys', ['KeyObject', 'CryptoKey'], key); + } + } + } + + return { + __proto__: null, + sni, + alpn, + ciphers, + groups, + keylog, + verifyClient, + tlsTrace, + verifyPrivateKey, + keys: keyHandles, + certs, + ca, + crl, + }; + } + + /** + * @param {'use'|'ignore'|'default'} policy + * @returns {number} + */ + #getPreferredAddressPolicy(policy = 'default') { + switch (policy) { + case 'use': return PREFERRED_ADDRESS_USE; + case 'ignore': return PREFERRED_ADDRESS_IGNORE; + case 'default': return DEFAULT_PREFERRED_ADDRESS_POLICY; + } + throw new ERR_INVALID_ARG_VALUE('options.preferredAddressPolicy', policy); + } + + /** + * @param {SessionOptions} options + */ + #processSessionOptions(options) { + validateObject(options, 'options'); const { version, minVersion, @@ -2146,69 +1772,88 @@ class Endpoint { transportParams = kEmptyObject, tls = kEmptyObject, qlog = false, + sessionTicket, } = options; - this.#handle.listen({ + return { + __proto__: null, version, minVersion, - preferredAddressPolicy: getPreferredAddressPolicy(preferredAddressPolicy), + preferredAddressPolicy: this.#getPreferredAddressPolicy(preferredAddressPolicy), application, transportParams, - tls: processTlsOptions(tls), + tls: this.#processTlsOptions(tls), qlog, - }); + sessionTicket, + }; + } + + /** + * Configures the endpoint to listen for incoming connections. + * @param {SessionOptions} [options] + */ + listen(options = kEmptyObject) { + if (this.#isClosedOrClosing) { + throw new ERR_INVALID_STATE('Endpoint is closed'); + } + if (this.#onsession === undefined) { + throw new ERR_INVALID_STATE( + 'Endpoint is not configured to accept sessions. Specify an onsession ' + + 'callback when creating the endpoint', + ); + } + if (this.#listening) { + throw new ERR_INVALID_STATE('Endpoint is already listening'); + } + this.#handle.listen(this.#processSessionOptions(options)); this.#listening = true; + + if (onEndpointListeningChannel.hasSubscribers) { + onEndpointListeningChannel.publish({ + endpoint: this, + options, + }); + } } /** * Initiates a session with a remote endpoint. * @param {SocketAddress} address * @param {SessionOptions} [options] - * @returns {Session} + * @returns {QuicSession} */ connect(address, options = kEmptyObject) { if (this.#isClosedOrClosing) { throw new ERR_INVALID_STATE('Endpoint is closed'); } - if (this.#busy) { - throw new ERR_INVALID_STATE('Endpoint is busy'); - } - if (address === undefined || !SocketAddress.isSocketAddress(address)) { - if (typeof address === 'object' && address !== null) { - address = new SocketAddress(address); - } else { + if (!SocketAddress.isSocketAddress(address)) { + if (address == null || typeof address !== 'object') { throw new ERR_INVALID_ARG_TYPE('address', 'SocketAddress', address); } + address = new SocketAddress(address); } - validateObject(options, 'options'); - const { - version, - minVersion, - preferredAddressPolicy = 'default', - application = kEmptyObject, - transportParams = kEmptyObject, - tls = kEmptyObject, - qlog = false, - sessionTicket, - } = options; + const processedOptions = this.#processSessionOptions(options); + const { sessionTicket } = processedOptions; - const handle = this.#handle.connect(address[kSocketAddressHandle], { - version, - minVersion, - preferredAddressPolicy: getPreferredAddressPolicy(preferredAddressPolicy), - application, - transportParams, - tls: processTlsOptions(tls), - qlog, - }, sessionTicket); + const handle = this.#handle.connect(address[kSocketAddressHandle], + processedOptions, sessionTicket); if (handle === undefined) { throw new ERR_QUIC_CONNECTION_FAILED(); } - const session = new Session(this.#sessionConfig, this.#streamConfig, handle, this); - ArrayPrototypePush(this.#sessions, session); + const session = this.#newSession(handle); + + if (onEndpointClientSessionChannel.hasSubscribers) { + onEndpointClientSessionChannel.publish({ + endpoint: this, + session, + address, + options, + }); + } + return session; } @@ -2218,10 +1863,16 @@ class Endpoint { * not be accepted or created. The returned promise will be resolved when * closing is complete, or will be rejected if the endpoint is closed abruptly * due to an error. - * @returns {Promise} + * @returns {Promise} Returns this.closed */ close() { if (!this.#isClosedOrClosing) { + if (onEndpointClosingChannel.hasSubscribers) { + onEndpointClosingChannel.publish({ + endpoint: this, + hasPendingError: this.#pendingError !== undefined, + }); + } this.#isPendingClose = true; this.#handle?.closeGracefully(); } @@ -2239,12 +1890,22 @@ class Endpoint { /** @type {boolean} */ get destroyed() { return this.#handle === undefined; } + /** + * Return an iterator over all currently active sessions associated + * with this endpoint. + * @type {SetIterator} + */ + get sessions() { + return this.#sessions[SymbolIterator](); + } + /** * Forcefully terminates the endpoint by immediately destroying all sessions * after calling close. If an error is given, the closed promise will be * rejected with that error. If no error is given, the closed promise will * be resolved. - * @param {any} error + * @param {any} [error] + * @returns {Promise} Returns this.closed */ destroy(error) { if (!this.#isClosedOrClosing) { @@ -2258,6 +1919,7 @@ class Endpoint { for (const session of this.#sessions) { session.destroy(error); } + return this.closed; } ref() { @@ -2268,63 +1930,100 @@ class Endpoint { if (this.#handle !== undefined) this.#handle.ref(false); } - [kFinishClose](context, status) { - if (this.#handle === undefined) return; - this.#isPendingClose = false; - this.#address = undefined; - this.#busy = false; - this.#listening = false; - this.#isPendingClose = false; - this.#stats[kFinishClose](); - this.#state[kFinishClose](); - this.#sessions = []; - + #maybeGetCloseError(context, status) { switch (context) { case kCloseContextClose: { - if (this.#pendingError !== undefined) { - this.#pendingClose.reject(this.#pendingError); - } else { - this.#pendingClose.resolve(); - } - break; + return this.#pendingError; } case kCloseContextBindFailure: { - this.#pendingClose.reject( - new ERR_QUIC_ENDPOINT_CLOSED('Bind failure', status)); - break; + return new ERR_QUIC_ENDPOINT_CLOSED('Bind failure', status); } case kCloseContextListenFailure: { - this.#pendingClose.reject( - new ERR_QUIC_ENDPOINT_CLOSED('Listen failure', status)); - break; + return new ERR_QUIC_ENDPOINT_CLOSED('Listen failure', status); } case kCloseContextReceiveFailure: { - this.#pendingClose.reject( - new ERR_QUIC_ENDPOINT_CLOSED('Receive failure', status)); - break; + return new ERR_QUIC_ENDPOINT_CLOSED('Receive failure', status); } case kCloseContextSendFailure: { - this.#pendingClose.reject( - new ERR_QUIC_ENDPOINT_CLOSED('Send failure', status)); - break; + return new ERR_QUIC_ENDPOINT_CLOSED('Send failure', status); } case kCloseContextStartFailure: { - this.#pendingClose.reject( - new ERR_QUIC_ENDPOINT_CLOSED('Start failure', status)); - break; + return new ERR_QUIC_ENDPOINT_CLOSED('Start failure', status); } } + // Otherwise return undefined. + } - this.#pendingError = undefined; + [kFinishClose](context, status) { + if (this.#handle === undefined) return; + this.#handle = undefined; + this.#stats[kFinishClose](); + this.#state[kFinishClose](); + this.#address = undefined; + this.#busy = false; + this.#listening = false; + this.#isPendingClose = false; + + // As QuicSessions are closed they are expected to remove themselves + // from the sessions collection. Just in case they don't, let's force + // it by resetting the set so we don't leak memory. Let's emit a warning, + // tho, if the set is not empty at this point as that would indicate a + // bug in Node.js that should be fixed. + if (this.#sessions.size > 0) { + process.emitWarning( + `The endpoint is closed with ${this.#sessions.size} active sessions. ` + + 'This should not happen and indicates a bug in Node.js. Please open an ' + + 'issue in the Node.js GitHub repository at https://github.com/nodejs/node ' + + 'to report the problem.', + ); + } + this.#sessions.clear(); + + // If destroy was called with an error, then the this.#pendingError will be + // set. Or, if context indicates an error condition that caused the endpoint + // to be closed, the status will indicate the error code. In either case, + // we will reject the pending close promise at this point. + const maybeCloseError = this.#maybeGetCloseError(context, status); + if (maybeCloseError !== undefined) { + if (onEndpointErrorChannel.hasSubscribers) { + onEndpointErrorChannel.publish({ + endpoint: this, + error: maybeCloseError, + }); + } + this.#pendingClose.reject(maybeCloseError); + } else { + // Otherwise we are good to resolve the pending close promise! + this.#pendingClose.resolve(); + } + if (onEndpointClosedChannel.hasSubscribers) { + onEndpointClosedChannel.publish({ + endpoint: this, + }); + } + + // Note that we are intentionally not clearing the + // this.#pendingClose.promise here. this.#pendingClose.resolve = undefined; this.#pendingClose.reject = undefined; - this.#handle = undefined; + this.#pendingError = undefined; } [kNewSession](handle) { - const session = new Session(this.#sessionConfig, this.#streamConfig, handle, this); - ArrayPrototypePush(this.#sessions, session); - this.#onsession(session, this); + const session = this.#newSession(handle); + if (onEndpointServerSessionChannel.hasSubscribers) { + onEndpointServerSessionChannel.publish({ + endpoint: this, + session, + }); + } + this.#onsession(session); + } + + // Called by the QuicSession when it closes to remove itself from + // the active sessions tracked by the QuicEndpoint. + [kRemoveSession](session) { + this.#sessions.delete(session); } async [SymbolAsyncDispose]() { await this.close(); } @@ -2338,7 +2037,7 @@ class Endpoint { depth: options.depth == null ? null : options.depth - 1, }; - return `Endpoint ${inspect({ + return `QuicEndpoint ${inspect({ address: this.address, busy: this.busy, closed: this.closed, @@ -2352,197 +2051,39 @@ class Endpoint { } }; -ObjectDefineProperties(QuicStreamStats.prototype, { - createdAt: kEnumerable, - receivedAt: kEnumerable, - ackedAt: kEnumerable, - closingAt: kEnumerable, - destroyedAt: kEnumerable, - bytesReceived: kEnumerable, - bytesSent: kEnumerable, - maxOffset: kEnumerable, - maxOffsetAcknowledged: kEnumerable, - maxOffsetReceived: kEnumerable, - finalSize: kEnumerable, -}); -ObjectDefineProperties(QuicStreamState.prototype, { - id: kEnumerable, - finSent: kEnumerable, - finReceived: kEnumerable, - readEnded: kEnumerable, - writeEnded: kEnumerable, - destroyed: kEnumerable, - paused: kEnumerable, - reset: kEnumerable, - hasReader: kEnumerable, - wantsBlock: kEnumerable, - wantsHeaders: kEnumerable, - wantsReset: kEnumerable, - wantsTrailers: kEnumerable, -}); -ObjectDefineProperties(QuicStream.prototype, { - stats: kEnumerable, - state: kEnumerable, - session: kEnumerable, - id: kEnumerable, -}); -ObjectDefineProperties(SessionStats.prototype, { - createdAt: kEnumerable, - closingAt: kEnumerable, - destroyedAt: kEnumerable, - handshakeCompletedAt: kEnumerable, - handshakeConfirmedAt: kEnumerable, - gracefulClosingAt: kEnumerable, - bytesReceived: kEnumerable, - bytesSent: kEnumerable, - bidiInStreamCount: kEnumerable, - bidiOutStreamCount: kEnumerable, - uniInStreamCount: kEnumerable, - uniOutStreamCount: kEnumerable, - lossRetransmitCount: kEnumerable, - maxBytesInFlights: kEnumerable, - bytesInFlight: kEnumerable, - blockCount: kEnumerable, - cwnd: kEnumerable, - latestRtt: kEnumerable, - minRtt: kEnumerable, - rttVar: kEnumerable, - smoothedRtt: kEnumerable, - ssthresh: kEnumerable, - datagramsReceived: kEnumerable, - datagramsSent: kEnumerable, - datagramsAcknowledged: kEnumerable, - datagramsLost: kEnumerable, -}); -ObjectDefineProperties(SessionState.prototype, { - hasPathValidationListener: kEnumerable, - hasVersionNegotiationListener: kEnumerable, - hasDatagramListener: kEnumerable, - hasSessionTicketListener: kEnumerable, - isClosing: kEnumerable, - isGracefulClose: kEnumerable, - isSilentClose: kEnumerable, - isStatelessReset: kEnumerable, - isDestroyed: kEnumerable, - isHandshakeCompleted: kEnumerable, - isHandshakeConfirmed: kEnumerable, - isStreamOpenAllowed: kEnumerable, - isPrioritySupported: kEnumerable, - isWrapped: kEnumerable, - lastDatagramId: kEnumerable, -}); -ObjectDefineProperties(Session.prototype, { - closed: kEnumerable, - destroyed: kEnumerable, - endpoint: kEnumerable, - path: kEnumerable, - state: kEnumerable, - stats: kEnumerable, -}); -ObjectDefineProperties(EndpointStats.prototype, { - createdAt: kEnumerable, - destroyedAt: kEnumerable, - bytesReceived: kEnumerable, - bytesSent: kEnumerable, - packetsReceived: kEnumerable, - packetsSent: kEnumerable, - serverSessions: kEnumerable, - clientSessions: kEnumerable, - serverBusyCount: kEnumerable, - retryCount: kEnumerable, - versionNegotiationCount: kEnumerable, - statelessResetCount: kEnumerable, - immediateCloseCount: kEnumerable, -}); -ObjectDefineProperties(EndpointState.prototype, { - isBound: kEnumerable, - isReceiving: kEnumerable, - isListening: kEnumerable, - isClosing: kEnumerable, - isBusy: kEnumerable, - pendingCallbacks: kEnumerable, -}); -ObjectDefineProperties(Endpoint.prototype, { - address: kEnumerable, - busy: kEnumerable, - closed: kEnumerable, - destroyed: kEnumerable, - state: kEnumerable, - stats: kEnumerable, -}); -ObjectDefineProperties(Endpoint, { - CC_ALGO_RENO: { - __proto__: null, - value: CC_ALGO_RENO, - writable: false, - configurable: false, - enumerable: true, - }, - CC_ALGO_CUBIC: { - __proto__: null, - value: CC_ALGO_CUBIC, - writable: false, - configurable: false, - enumerable: true, - }, - CC_ALGO_BBR: { - __proto__: null, - value: CC_ALGO_BBR, - writable: false, - configurable: false, - enumerable: true, - }, - CC_ALGO_RENO_STR: { - __proto__: null, - value: CC_ALGO_RENO_STR, - writable: false, - configurable: false, - enumerable: true, - }, - CC_ALGO_CUBIC_STR: { - __proto__: null, - value: CC_ALGO_CUBIC_STR, - writable: false, - configurable: false, - enumerable: true, - }, - CC_ALGO_BBR_STR: { +function readOnlyConstant(value) { + return { __proto__: null, - value: CC_ALGO_BBR_STR, + value, writable: false, configurable: false, enumerable: true, - }, + }; +} + +ObjectDefineProperties(QuicEndpoint, { + CC_ALGO_RENO: readOnlyConstant(CC_ALGO_RENO), + CC_ALGO_CUBIC: readOnlyConstant(CC_ALGO_CUBIC), + CC_ALGO_BBR: readOnlyConstant(CC_ALGO_BBR), + CC_ALGP_RENO_STR: readOnlyConstant(CC_ALGO_RENO_STR), + CC_ALGO_CUBIC_STR: readOnlyConstant(CC_ALGO_CUBIC_STR), + CC_ALGO_BBR_STR: readOnlyConstant(CC_ALGO_BBR_STR), }); -ObjectDefineProperties(Session, { - DEFAULT_CIPHERS: { - __proto__: null, - value: DEFAULT_CIPHERS, - writable: false, - configurable: false, - enumerable: true, - }, - DEFAULT_GROUPS: { - __proto__: null, - value: DEFAULT_GROUPS, - writable: false, - configurable: false, - enumerable: true, - }, +ObjectDefineProperties(QuicSession, { + DEFAULT_CIPHERS: readOnlyConstant(DEFAULT_CIPHERS), + DEFAULT_GROUPS: readOnlyConstant(DEFAULT_GROUPS), }); module.exports = { - Endpoint, - Session, + QuicEndpoint, + QuicSession, QuicStream, - // Exported for testing - kFinishClose, - SessionState, - SessionStats, + QuicSessionState, + QuicSessionStats, QuicStreamState, QuicStreamStats, - EndpointState, - EndpointStats, + QuicEndpointState, + QuicEndpointStats, }; /* c8 ignore stop */ diff --git a/lib/internal/quic/state.js b/lib/internal/quic/state.js new file mode 100644 index 00000000000000..8bfb2ac83302fb --- /dev/null +++ b/lib/internal/quic/state.js @@ -0,0 +1,566 @@ +'use strict'; + +const { + ArrayBuffer, + DataView, + DataViewPrototypeGetBigInt64, + DataViewPrototypeGetBigUint64, + DataViewPrototypeGetUint8, + DataViewPrototypeSetUint8, + JSONStringify, +} = primordials; + +const { + codes: { + ERR_ILLEGAL_CONSTRUCTOR, + ERR_INVALID_ARG_TYPE, + ERR_INVALID_STATE, + }, +} = require('internal/errors'); + +const { + isArrayBuffer, +} = require('util/types'); + +const { inspect } = require('internal/util/inspect'); + +const { + kFinishClose, + kInspect, + kPrivateConstructor, +} = require('internal/quic/symbols'); + +// This file defines the helper objects for accessing state for +// various QUIC objects. Each of these wraps a DataView. +// Some of the state properties are read only, others are mutable. +// An ArrayBuffer is shared with the C++ level to allow for more +// efficient communication of state across the C++/JS boundary. +// When the state object is no longer needed, it is closed to +// prevent further updates to the buffer. + +const { + IDX_STATE_SESSION_PATH_VALIDATION, + IDX_STATE_SESSION_VERSION_NEGOTIATION, + IDX_STATE_SESSION_DATAGRAM, + IDX_STATE_SESSION_SESSION_TICKET, + IDX_STATE_SESSION_CLOSING, + IDX_STATE_SESSION_GRACEFUL_CLOSE, + IDX_STATE_SESSION_SILENT_CLOSE, + IDX_STATE_SESSION_STATELESS_RESET, + IDX_STATE_SESSION_DESTROYED, + IDX_STATE_SESSION_HANDSHAKE_COMPLETED, + IDX_STATE_SESSION_HANDSHAKE_CONFIRMED, + IDX_STATE_SESSION_STREAM_OPEN_ALLOWED, + IDX_STATE_SESSION_PRIORITY_SUPPORTED, + IDX_STATE_SESSION_WRAPPED, + IDX_STATE_SESSION_LAST_DATAGRAM_ID, + + IDX_STATE_ENDPOINT_BOUND, + IDX_STATE_ENDPOINT_RECEIVING, + IDX_STATE_ENDPOINT_LISTENING, + IDX_STATE_ENDPOINT_CLOSING, + IDX_STATE_ENDPOINT_BUSY, + IDX_STATE_ENDPOINT_PENDING_CALLBACKS, + + IDX_STATE_STREAM_ID, + IDX_STATE_STREAM_FIN_SENT, + IDX_STATE_STREAM_FIN_RECEIVED, + IDX_STATE_STREAM_READ_ENDED, + IDX_STATE_STREAM_WRITE_ENDED, + IDX_STATE_STREAM_DESTROYED, + IDX_STATE_STREAM_PAUSED, + IDX_STATE_STREAM_RESET, + IDX_STATE_STREAM_HAS_READER, + IDX_STATE_STREAM_WANTS_BLOCK, + IDX_STATE_STREAM_WANTS_HEADERS, + IDX_STATE_STREAM_WANTS_RESET, + IDX_STATE_STREAM_WANTS_TRAILERS, +} = internalBinding('quic'); + +class QuicEndpointState { + /** @type {DataView} */ + #handle; + + /** + * @param {symbol} privateSymbol + * @param {ArrayBuffer} buffer + */ + constructor(privateSymbol, buffer) { + if (privateSymbol !== kPrivateConstructor) { + throw new ERR_ILLEGAL_CONSTRUCTOR(); + } + if (!isArrayBuffer(buffer)) { + throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); + } + this.#handle = new DataView(buffer); + } + + #assertNotClosed() { + if (this.#handle.byteLength === 0) { + throw new ERR_INVALID_STATE('Endpoint is closed'); + } + } + + /** @type {boolean} */ + get isBound() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_ENDPOINT_BOUND); + } + + /** @type {boolean} */ + get isReceiving() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_ENDPOINT_RECEIVING); + } + + /** @type {boolean} */ + get isListening() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_ENDPOINT_LISTENING); + } + + /** @type {boolean} */ + get isClosing() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_ENDPOINT_CLOSING); + } + + /** @type {boolean} */ + get isBusy() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_ENDPOINT_BUSY); + } + + /** + * The number of underlying callbacks that are pending. If the session + * is closing, these are the number of callbacks that the session is + * waiting on before it can be closed. + * @type {bigint} + */ + get pendingCallbacks() { + this.#assertNotClosed(); + return DataViewPrototypeGetBigUint64(this.#handle, IDX_STATE_ENDPOINT_PENDING_CALLBACKS); + } + + toString() { + return JSONStringify(this.toJSON()); + } + + toJSON() { + if (this.#handle.byteLength === 0) return {}; + return { + __proto__: null, + isBound: this.isBound, + isReceiving: this.isReceiving, + isListening: this.isListening, + isClosing: this.isClosing, + isBusy: this.isBusy, + pendingCallbacks: `${this.pendingCallbacks}`, + }; + } + + [kInspect](depth, options) { + if (depth < 0) + return this; + + if (this.#handle.byteLength === 0) { + return 'QuicEndpointState { }'; + } + + const opts = { + ...options, + depth: options.depth == null ? null : options.depth - 1, + }; + + return `QuicEndpointState ${inspect({ + isBound: this.isBound, + isReceiving: this.isReceiving, + isListening: this.isListening, + isClosing: this.isClosing, + isBusy: this.isBusy, + pendingCallbacks: this.pendingCallbacks, + }, opts)}`; + } + + [kFinishClose]() { + // Snapshot the state into a new DataView since the underlying + // buffer will be destroyed. + if (this.#handle.byteLength === 0) return; + this.#handle = new DataView(new ArrayBuffer(0)); + } +} + +class QuicSessionState { + /** @type {DataView} */ + #handle; + + /** + * @param {symbol} privateSymbol + * @param {ArrayBuffer} buffer + */ + constructor(privateSymbol, buffer) { + if (privateSymbol !== kPrivateConstructor) { + throw new ERR_ILLEGAL_CONSTRUCTOR(); + } + if (!isArrayBuffer(buffer)) { + throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); + } + this.#handle = new DataView(buffer); + } + + #assertNotClosed() { + if (this.#handle.byteLength === 0) { + throw new ERR_INVALID_STATE('Session is closed'); + } + } + + /** @type {boolean} */ + get hasPathValidationListener() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_PATH_VALIDATION); + } + + /** @type {boolean} */ + set hasPathValidationListener(val) { + this.#assertNotClosed(); + DataViewPrototypeSetUint8(this.#handle, IDX_STATE_SESSION_PATH_VALIDATION, val ? 1 : 0); + } + + /** @type {boolean} */ + get hasVersionNegotiationListener() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_VERSION_NEGOTIATION); + } + + /** @type {boolean} */ + set hasVersionNegotiationListener(val) { + this.#assertNotClosed(); + DataViewPrototypeSetUint8(this.#handle, IDX_STATE_SESSION_VERSION_NEGOTIATION, val ? 1 : 0); + } + + /** @type {boolean} */ + get hasDatagramListener() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_DATAGRAM); + } + + /** @type {boolean} */ + set hasDatagramListener(val) { + this.#assertNotClosed(); + DataViewPrototypeSetUint8(this.#handle, IDX_STATE_SESSION_DATAGRAM, val ? 1 : 0); + } + + /** @type {boolean} */ + get hasSessionTicketListener() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_SESSION_TICKET); + } + + /** @type {boolean} */ + set hasSessionTicketListener(val) { + this.#assertNotClosed(); + DataViewPrototypeSetUint8(this.#handle, IDX_STATE_SESSION_SESSION_TICKET, val ? 1 : 0); + } + + /** @type {boolean} */ + get isClosing() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_CLOSING); + } + + /** @type {boolean} */ + get isGracefulClose() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_GRACEFUL_CLOSE); + } + + /** @type {boolean} */ + get isSilentClose() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_SILENT_CLOSE); + } + + /** @type {boolean} */ + get isStatelessReset() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_STATELESS_RESET); + } + + /** @type {boolean} */ + get isDestroyed() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_DESTROYED); + } + + /** @type {boolean} */ + get isHandshakeCompleted() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_HANDSHAKE_COMPLETED); + } + + /** @type {boolean} */ + get isHandshakeConfirmed() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_HANDSHAKE_CONFIRMED); + } + + /** @type {boolean} */ + get isStreamOpenAllowed() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_STREAM_OPEN_ALLOWED); + } + + /** @type {boolean} */ + get isPrioritySupported() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_PRIORITY_SUPPORTED); + } + + /** @type {boolean} */ + get isWrapped() { + this.#assertNotClosed(); + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_WRAPPED); + } + + /** @type {bigint} */ + get lastDatagramId() { + this.#assertNotClosed(); + return DataViewPrototypeGetBigUint64(this.#handle, IDX_STATE_SESSION_LAST_DATAGRAM_ID); + } + + toString() { + return JSONStringify(this.toJSON()); + } + + toJSON() { + if (this.#handle.byteLength === 0) return {}; + return { + __proto__: null, + hasPathValidationListener: this.hasPathValidationListener, + hasVersionNegotiationListener: this.hasVersionNegotiationListener, + hasDatagramListener: this.hasDatagramListener, + hasSessionTicketListener: this.hasSessionTicketListener, + isClosing: this.isClosing, + isGracefulClose: this.isGracefulClose, + isSilentClose: this.isSilentClose, + isStatelessReset: this.isStatelessReset, + isDestroyed: this.isDestroyed, + isHandshakeCompleted: this.isHandshakeCompleted, + isHandshakeConfirmed: this.isHandshakeConfirmed, + isStreamOpenAllowed: this.isStreamOpenAllowed, + isPrioritySupported: this.isPrioritySupported, + isWrapped: this.isWrapped, + lastDatagramId: `${this.lastDatagramId}`, + }; + } + + [kInspect](depth, options) { + if (depth < 0) + return this; + + if (this.#handle.byteLength === 0) { + return 'QuicSessionState { }'; + } + + const opts = { + ...options, + depth: options.depth == null ? null : options.depth - 1, + }; + + return `QuicSessionState ${inspect({ + hasPathValidationListener: this.hasPathValidationListener, + hasVersionNegotiationListener: this.hasVersionNegotiationListener, + hasDatagramListener: this.hasDatagramListener, + hasSessionTicketListener: this.hasSessionTicketListener, + isClosing: this.isClosing, + isGracefulClose: this.isGracefulClose, + isSilentClose: this.isSilentClose, + isStatelessReset: this.isStatelessReset, + isDestroyed: this.isDestroyed, + isHandshakeCompleted: this.isHandshakeCompleted, + isHandshakeConfirmed: this.isHandshakeConfirmed, + isStreamOpenAllowed: this.isStreamOpenAllowed, + isPrioritySupported: this.isPrioritySupported, + isWrapped: this.isWrapped, + lastDatagramId: this.lastDatagramId, + }, opts)}`; + } + + [kFinishClose]() { + // Snapshot the state into a new DataView since the underlying + // buffer will be destroyed. + if (this.#handle.byteLength === 0) return; + this.#handle = new DataView(new ArrayBuffer(0)); + } +} + +class QuicStreamState { + /** @type {DataView} */ + #handle; + + /** + * @param {symbol} privateSymbol + * @param {ArrayBuffer} buffer + */ + constructor(privateSymbol, buffer) { + if (privateSymbol !== kPrivateConstructor) { + throw new ERR_ILLEGAL_CONSTRUCTOR(); + } + if (!isArrayBuffer(buffer)) { + throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); + } + this.#handle = new DataView(buffer); + } + + /** @type {bigint} */ + get id() { + return DataViewPrototypeGetBigInt64(this.#handle, IDX_STATE_STREAM_ID); + } + + /** @type {boolean} */ + get finSent() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_FIN_SENT); + } + + /** @type {boolean} */ + get finReceived() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_FIN_RECEIVED); + } + + /** @type {boolean} */ + get readEnded() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_READ_ENDED); + } + + /** @type {boolean} */ + get writeEnded() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WRITE_ENDED); + } + + /** @type {boolean} */ + get destroyed() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_DESTROYED); + } + + /** @type {boolean} */ + get paused() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_PAUSED); + } + + /** @type {boolean} */ + get reset() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_RESET); + } + + /** @type {boolean} */ + get hasReader() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_HAS_READER); + } + + /** @type {boolean} */ + get wantsBlock() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WANTS_BLOCK); + } + + /** @type {boolean} */ + set wantsBlock(val) { + DataViewPrototypeSetUint8(this.#handle, IDX_STATE_STREAM_WANTS_BLOCK, val ? 1 : 0); + } + + /** @type {boolean} */ + get wantsHeaders() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WANTS_HEADERS); + } + + /** @type {boolean} */ + set wantsHeaders(val) { + DataViewPrototypeSetUint8(this.#handle, IDX_STATE_STREAM_WANTS_HEADERS, val ? 1 : 0); + } + + /** @type {boolean} */ + get wantsReset() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WANTS_RESET); + } + + /** @type {boolean} */ + set wantsReset(val) { + DataViewPrototypeSetUint8(this.#handle, IDX_STATE_STREAM_WANTS_RESET, val ? 1 : 0); + } + + /** @type {boolean} */ + get wantsTrailers() { + return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WANTS_TRAILERS); + } + + /** @type {boolean} */ + set wantsTrailers(val) { + DataViewPrototypeSetUint8(this.#handle, IDX_STATE_STREAM_WANTS_TRAILERS, val ? 1 : 0); + } + + toString() { + return JSONStringify(this.toJSON()); + } + + toJSON() { + if (this.#handle.byteLength === 0) return {}; + return { + __proto__: null, + id: `${this.id}`, + finSent: this.finSent, + finReceived: this.finReceived, + readEnded: this.readEnded, + writeEnded: this.writeEnded, + destroyed: this.destroyed, + paused: this.paused, + reset: this.reset, + hasReader: this.hasReader, + wantsBlock: this.wantsBlock, + wantsHeaders: this.wantsHeaders, + wantsReset: this.wantsReset, + wantsTrailers: this.wantsTrailers, + }; + } + + [kInspect](depth, options) { + if (depth < 0) + return this; + + if (this.#handle.byteLength === 0) { + return 'QuicStreamState { }'; + } + + const opts = { + ...options, + depth: options.depth == null ? null : options.depth - 1, + }; + + return `QuicStreamState ${inspect({ + id: this.id, + finSent: this.finSent, + finReceived: this.finReceived, + readEnded: this.readEnded, + writeEnded: this.writeEnded, + destroyed: this.destroyed, + paused: this.paused, + reset: this.reset, + hasReader: this.hasReader, + wantsBlock: this.wantsBlock, + wantsHeaders: this.wantsHeaders, + wantsReset: this.wantsReset, + wantsTrailers: this.wantsTrailers, + }, opts)}`; + } + + [kFinishClose]() { + // Snapshot the state into a new DataView since the underlying + // buffer will be destroyed. + if (this.#handle.byteLength === 0) return; + this.#handle = new DataView(new ArrayBuffer(0)); + } +} + +module.exports = { + QuicEndpointState, + QuicSessionState, + QuicStreamState, +}; diff --git a/lib/internal/quic/stats.js b/lib/internal/quic/stats.js new file mode 100644 index 00000000000000..3ac9523d9aeca4 --- /dev/null +++ b/lib/internal/quic/stats.js @@ -0,0 +1,646 @@ +'use strict'; + +const { + BigUint64Array, + JSONStringify, +} = primordials; + +const { + isArrayBuffer, +} = require('util/types'); + +const { + codes: { + ERR_ILLEGAL_CONSTRUCTOR, + ERR_INVALID_ARG_TYPE, + }, +} = require('internal/errors'); + +const { inspect } = require('internal/util/inspect'); + +const { + kFinishClose, + kInspect, + kPrivateConstructor, +} = require('internal/quic/symbols'); + +// This file defines the helper objects for accessing statistics collected +// by various QUIC objects. Each of these wraps a BigUint64Array. Every +// stats object is read-only via the API, and the underlying buffer is +// only updated by the QUIC internals. When the stats object is no longer +// needed, it is closed to prevent further updates to the buffer. + +const { + // All of the IDX_STATS_* constants are the index positions of the stats + // fields in the relevant BigUint64Array's that underlie the *Stats objects. + // These are not exposed to end users. + IDX_STATS_ENDPOINT_CREATED_AT, + IDX_STATS_ENDPOINT_DESTROYED_AT, + IDX_STATS_ENDPOINT_BYTES_RECEIVED, + IDX_STATS_ENDPOINT_BYTES_SENT, + IDX_STATS_ENDPOINT_PACKETS_RECEIVED, + IDX_STATS_ENDPOINT_PACKETS_SENT, + IDX_STATS_ENDPOINT_SERVER_SESSIONS, + IDX_STATS_ENDPOINT_CLIENT_SESSIONS, + IDX_STATS_ENDPOINT_SERVER_BUSY_COUNT, + IDX_STATS_ENDPOINT_RETRY_COUNT, + IDX_STATS_ENDPOINT_VERSION_NEGOTIATION_COUNT, + IDX_STATS_ENDPOINT_STATELESS_RESET_COUNT, + IDX_STATS_ENDPOINT_IMMEDIATE_CLOSE_COUNT, + + IDX_STATS_SESSION_CREATED_AT, + IDX_STATS_SESSION_CLOSING_AT, + IDX_STATS_SESSION_DESTROYED_AT, + IDX_STATS_SESSION_HANDSHAKE_COMPLETED_AT, + IDX_STATS_SESSION_HANDSHAKE_CONFIRMED_AT, + IDX_STATS_SESSION_GRACEFUL_CLOSING_AT, + IDX_STATS_SESSION_BYTES_RECEIVED, + IDX_STATS_SESSION_BYTES_SENT, + IDX_STATS_SESSION_BIDI_IN_STREAM_COUNT, + IDX_STATS_SESSION_BIDI_OUT_STREAM_COUNT, + IDX_STATS_SESSION_UNI_IN_STREAM_COUNT, + IDX_STATS_SESSION_UNI_OUT_STREAM_COUNT, + IDX_STATS_SESSION_LOSS_RETRANSMIT_COUNT, + IDX_STATS_SESSION_MAX_BYTES_IN_FLIGHT, + IDX_STATS_SESSION_BYTES_IN_FLIGHT, + IDX_STATS_SESSION_BLOCK_COUNT, + IDX_STATS_SESSION_CWND, + IDX_STATS_SESSION_LATEST_RTT, + IDX_STATS_SESSION_MIN_RTT, + IDX_STATS_SESSION_RTTVAR, + IDX_STATS_SESSION_SMOOTHED_RTT, + IDX_STATS_SESSION_SSTHRESH, + IDX_STATS_SESSION_DATAGRAMS_RECEIVED, + IDX_STATS_SESSION_DATAGRAMS_SENT, + IDX_STATS_SESSION_DATAGRAMS_ACKNOWLEDGED, + IDX_STATS_SESSION_DATAGRAMS_LOST, + + IDX_STATS_STREAM_CREATED_AT, + IDX_STATS_STREAM_RECEIVED_AT, + IDX_STATS_STREAM_ACKED_AT, + IDX_STATS_STREAM_CLOSING_AT, + IDX_STATS_STREAM_DESTROYED_AT, + IDX_STATS_STREAM_BYTES_RECEIVED, + IDX_STATS_STREAM_BYTES_SENT, + IDX_STATS_STREAM_MAX_OFFSET, + IDX_STATS_STREAM_MAX_OFFSET_ACK, + IDX_STATS_STREAM_MAX_OFFSET_RECV, + IDX_STATS_STREAM_FINAL_SIZE, +} = internalBinding('quic'); + +class QuicEndpointStats { + /** @type {BigUint64Array} */ + #handle; + /** @type {boolean} */ + #disconnected = false; + + /** + * @param {symbol} privateSymbol + * @param {ArrayBuffer} buffer + */ + constructor(privateSymbol, buffer) { + // We use the kPrivateConstructor symbol to restrict the ability to + // create new instances of QuicEndpointStats to internal code. + if (privateSymbol !== kPrivateConstructor) { + throw new ERR_ILLEGAL_CONSTRUCTOR(); + } + if (!isArrayBuffer(buffer)) { + throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); + } + this.#handle = new BigUint64Array(buffer); + } + + /** @type {bigint} */ + get createdAt() { + return this.#handle[IDX_STATS_ENDPOINT_CREATED_AT]; + } + + /** @type {bigint} */ + get destroyedAt() { + return this.#handle[IDX_STATS_ENDPOINT_DESTROYED_AT]; + } + + /** @type {bigint} */ + get bytesReceived() { + return this.#handle[IDX_STATS_ENDPOINT_BYTES_RECEIVED]; + } + + /** @type {bigint} */ + get bytesSent() { + return this.#handle[IDX_STATS_ENDPOINT_BYTES_SENT]; + } + + /** @type {bigint} */ + get packetsReceived() { + return this.#handle[IDX_STATS_ENDPOINT_PACKETS_RECEIVED]; + } + + /** @type {bigint} */ + get packetsSent() { + return this.#handle[IDX_STATS_ENDPOINT_PACKETS_SENT]; + } + + /** @type {bigint} */ + get serverSessions() { + return this.#handle[IDX_STATS_ENDPOINT_SERVER_SESSIONS]; + } + + /** @type {bigint} */ + get clientSessions() { + return this.#handle[IDX_STATS_ENDPOINT_CLIENT_SESSIONS]; + } + + /** @type {bigint} */ + get serverBusyCount() { + return this.#handle[IDX_STATS_ENDPOINT_SERVER_BUSY_COUNT]; + } + + /** @type {bigint} */ + get retryCount() { + return this.#handle[IDX_STATS_ENDPOINT_RETRY_COUNT]; + } + + /** @type {bigint} */ + get versionNegotiationCount() { + return this.#handle[IDX_STATS_ENDPOINT_VERSION_NEGOTIATION_COUNT]; + } + + /** @type {bigint} */ + get statelessResetCount() { + return this.#handle[IDX_STATS_ENDPOINT_STATELESS_RESET_COUNT]; + } + + /** @type {bigint} */ + get immediateCloseCount() { + return this.#handle[IDX_STATS_ENDPOINT_IMMEDIATE_CLOSE_COUNT]; + } + + toString() { + return JSONStringify(this.toJSON()); + } + + toJSON() { + return { + __proto__: null, + connected: this.isConnected, + // We need to convert the values to strings because JSON does not + // support BigInts. + createdAt: `${this.createdAt}`, + destroyedAt: `${this.destroyedAt}`, + bytesReceived: `${this.bytesReceived}`, + bytesSent: `${this.bytesSent}`, + packetsReceived: `${this.packetsReceived}`, + packetsSent: `${this.packetsSent}`, + serverSessions: `${this.serverSessions}`, + clientSessions: `${this.clientSessions}`, + serverBusyCount: `${this.serverBusyCount}`, + retryCount: `${this.retryCount}`, + versionNegotiationCount: `${this.versionNegotiationCount}`, + statelessResetCount: `${this.statelessResetCount}`, + immediateCloseCount: `${this.immediateCloseCount}`, + }; + } + + [kInspect](depth, options) { + if (depth < 0) + return this; + + const opts = { + ...options, + depth: options.depth == null ? null : options.depth - 1, + }; + + return `QuicEndpointStats ${inspect({ + connected: this.isConnected, + createdAt: this.createdAt, + destroyedAt: this.destroyedAt, + bytesReceived: this.bytesReceived, + bytesSent: this.bytesSent, + packetsReceived: this.packetsReceived, + packetsSent: this.packetsSent, + serverSessions: this.serverSessions, + clientSessions: this.clientSessions, + serverBusyCount: this.serverBusyCount, + retryCount: this.retryCount, + versionNegotiationCount: this.versionNegotiationCount, + statelessResetCount: this.statelessResetCount, + immediateCloseCount: this.immediateCloseCount, + }, opts)}`; + } + + /** + * True if this QuicEndpointStats object is still connected to the underlying + * Endpoint stats source. If this returns false, then the stats object is + * no longer being updated and should be considered stale. + * @returns {boolean} + */ + get isConnected() { + return !this.#disconnected; + } + + [kFinishClose]() { + // Snapshot the stats into a new BigUint64Array since the underlying + // buffer will be destroyed. + this.#handle = new BigUint64Array(this.#handle); + this.#disconnected = true; + } +} + +class QuicSessionStats { + /** @type {BigUint64Array} */ + #handle; + /** @type {boolean} */ + #disconnected = false; + + /** + * @param {symbol} privateSynbol + * @param {BigUint64Array} buffer + */ + constructor(privateSynbol, buffer) { + // We use the kPrivateConstructor symbol to restrict the ability to + // create new instances of QuicSessionStats to internal code. + if (privateSynbol !== kPrivateConstructor) { + throw new ERR_ILLEGAL_CONSTRUCTOR(); + } + if (!isArrayBuffer(buffer)) { + throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); + } + this.#handle = new BigUint64Array(buffer); + } + + /** @type {bigint} */ + get createdAt() { + return this.#handle[IDX_STATS_SESSION_CREATED_AT]; + } + + /** @type {bigint} */ + get closingAt() { + return this.#handle[IDX_STATS_SESSION_CLOSING_AT]; + } + + /** @type {bigint} */ + get destroyedAt() { + return this.#handle[IDX_STATS_SESSION_DESTROYED_AT]; + } + + /** @type {bigint} */ + get handshakeCompletedAt() { + return this.#handle[IDX_STATS_SESSION_HANDSHAKE_COMPLETED_AT]; + } + + /** @type {bigint} */ + get handshakeConfirmedAt() { + return this.#handle[IDX_STATS_SESSION_HANDSHAKE_CONFIRMED_AT]; + } + + /** @type {bigint} */ + get gracefulClosingAt() { + return this.#handle[IDX_STATS_SESSION_GRACEFUL_CLOSING_AT]; + } + + /** @type {bigint} */ + get bytesReceived() { + return this.#handle[IDX_STATS_SESSION_BYTES_RECEIVED]; + } + + /** @type {bigint} */ + get bytesSent() { + return this.#handle[IDX_STATS_SESSION_BYTES_SENT]; + } + + /** @type {bigint} */ + get bidiInStreamCount() { + return this.#handle[IDX_STATS_SESSION_BIDI_IN_STREAM_COUNT]; + } + + /** @type {bigint} */ + get bidiOutStreamCount() { + return this.#handle[IDX_STATS_SESSION_BIDI_OUT_STREAM_COUNT]; + } + + /** @type {bigint} */ + get uniInStreamCount() { + return this.#handle[IDX_STATS_SESSION_UNI_IN_STREAM_COUNT]; + } + + /** @type {bigint} */ + get uniOutStreamCount() { + return this.#handle[IDX_STATS_SESSION_UNI_OUT_STREAM_COUNT]; + } + + /** @type {bigint} */ + get lossRetransmitCount() { + return this.#handle[IDX_STATS_SESSION_LOSS_RETRANSMIT_COUNT]; + } + + /** @type {bigint} */ + get maxBytesInFlights() { + return this.#handle[IDX_STATS_SESSION_MAX_BYTES_IN_FLIGHT]; + } + + /** @type {bigint} */ + get bytesInFlight() { + return this.#handle[IDX_STATS_SESSION_BYTES_IN_FLIGHT]; + } + + /** @type {bigint} */ + get blockCount() { + return this.#handle[IDX_STATS_SESSION_BLOCK_COUNT]; + } + + /** @type {bigint} */ + get cwnd() { + return this.#handle[IDX_STATS_SESSION_CWND]; + } + + /** @type {bigint} */ + get latestRtt() { + return this.#handle[IDX_STATS_SESSION_LATEST_RTT]; + } + + /** @type {bigint} */ + get minRtt() { + return this.#handle[IDX_STATS_SESSION_MIN_RTT]; + } + + /** @type {bigint} */ + get rttVar() { + return this.#handle[IDX_STATS_SESSION_RTTVAR]; + } + + /** @type {bigint} */ + get smoothedRtt() { + return this.#handle[IDX_STATS_SESSION_SMOOTHED_RTT]; + } + + /** @type {bigint} */ + get ssthresh() { + return this.#handle[IDX_STATS_SESSION_SSTHRESH]; + } + + /** @type {bigint} */ + get datagramsReceived() { + return this.#handle[IDX_STATS_SESSION_DATAGRAMS_RECEIVED]; + } + + /** @type {bigint} */ + get datagramsSent() { + return this.#handle[IDX_STATS_SESSION_DATAGRAMS_SENT]; + } + + /** @type {bigint} */ + get datagramsAcknowledged() { + return this.#handle[IDX_STATS_SESSION_DATAGRAMS_ACKNOWLEDGED]; + } + + /** @type {bigint} */ + get datagramsLost() { + return this.#handle[IDX_STATS_SESSION_DATAGRAMS_LOST]; + } + + toString() { + return JSONStringify(this.toJSON()); + } + + toJSON() { + return { + __proto__: null, + connected: this.isConnected, + // We need to convert the values to strings because JSON does not + // support BigInts. + createdAt: `${this.createdAt}`, + closingAt: `${this.closingAt}`, + destroyedAt: `${this.destroyedAt}`, + handshakeCompletedAt: `${this.handshakeCompletedAt}`, + handshakeConfirmedAt: `${this.handshakeConfirmedAt}`, + gracefulClosingAt: `${this.gracefulClosingAt}`, + bytesReceived: `${this.bytesReceived}`, + bytesSent: `${this.bytesSent}`, + bidiInStreamCount: `${this.bidiInStreamCount}`, + bidiOutStreamCount: `${this.bidiOutStreamCount}`, + uniInStreamCount: `${this.uniInStreamCount}`, + uniOutStreamCount: `${this.uniOutStreamCount}`, + lossRetransmitCount: `${this.lossRetransmitCount}`, + maxBytesInFlights: `${this.maxBytesInFlights}`, + bytesInFlight: `${this.bytesInFlight}`, + blockCount: `${this.blockCount}`, + cwnd: `${this.cwnd}`, + latestRtt: `${this.latestRtt}`, + minRtt: `${this.minRtt}`, + rttVar: `${this.rttVar}`, + smoothedRtt: `${this.smoothedRtt}`, + ssthresh: `${this.ssthresh}`, + datagramsReceived: `${this.datagramsReceived}`, + datagramsSent: `${this.datagramsSent}`, + datagramsAcknowledged: `${this.datagramsAcknowledged}`, + datagramsLost: `${this.datagramsLost}`, + }; + } + + [kInspect](depth, options) { + if (depth < 0) + return this; + + const opts = { + ...options, + depth: options.depth == null ? null : options.depth - 1, + }; + + return `QuicSessionStats ${inspect({ + connected: this.isConnected, + createdAt: this.createdAt, + closingAt: this.closingAt, + destroyedAt: this.destroyedAt, + handshakeCompletedAt: this.handshakeCompletedAt, + handshakeConfirmedAt: this.handshakeConfirmedAt, + gracefulClosingAt: this.gracefulClosingAt, + bytesReceived: this.bytesReceived, + bytesSent: this.bytesSent, + bidiInStreamCount: this.bidiInStreamCount, + bidiOutStreamCount: this.bidiOutStreamCount, + uniInStreamCount: this.uniInStreamCount, + uniOutStreamCount: this.uniOutStreamCount, + lossRetransmitCount: this.lossRetransmitCount, + maxBytesInFlights: this.maxBytesInFlights, + bytesInFlight: this.bytesInFlight, + blockCount: this.blockCount, + cwnd: this.cwnd, + latestRtt: this.latestRtt, + minRtt: this.minRtt, + rttVar: this.rttVar, + smoothedRtt: this.smoothedRtt, + ssthresh: this.ssthresh, + datagramsReceived: this.datagramsReceived, + datagramsSent: this.datagramsSent, + datagramsAcknowledged: this.datagramsAcknowledged, + datagramsLost: this.datagramsLost, + }, opts)}`; + } + + /** + * True if this QuicSessionStats object is still connected to the underlying + * Session stats source. If this returns false, then the stats object is + * no longer being updated and should be considered stale. + * @returns {boolean} + */ + get isConnected() { + return !this.#disconnected; + } + + [kFinishClose]() { + // Snapshot the stats into a new BigUint64Array since the underlying + // buffer will be destroyed. + this.#handle = new BigUint64Array(this.#handle); + this.#disconnected = true; + } +} + +class QuicStreamStats { + /** @type {BigUint64Array} */ + #handle; + /** type {boolean} */ + #disconnected = false; + + /** + * @param {symbol} privateSymbol + * @param {ArrayBuffer} buffer + */ + constructor(privateSymbol, buffer) { + // We use the kPrivateConstructor symbol to restrict the ability to + // create new instances of QuicStreamStats to internal code. + if (privateSymbol !== kPrivateConstructor) { + throw new ERR_ILLEGAL_CONSTRUCTOR(); + } + if (!isArrayBuffer(buffer)) { + throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); + } + this.#handle = new BigUint64Array(buffer); + } + + /** @type {bigint} */ + get createdAt() { + return this.#handle[IDX_STATS_STREAM_CREATED_AT]; + } + + /** @type {bigint} */ + get receivedAt() { + return this.#handle[IDX_STATS_STREAM_RECEIVED_AT]; + } + + /** @type {bigint} */ + get ackedAt() { + return this.#handle[IDX_STATS_STREAM_ACKED_AT]; + } + + /** @type {bigint} */ + get closingAt() { + return this.#handle[IDX_STATS_STREAM_CLOSING_AT]; + } + + /** @type {bigint} */ + get destroyedAt() { + return this.#handle[IDX_STATS_STREAM_DESTROYED_AT]; + } + + /** @type {bigint} */ + get bytesReceived() { + return this.#handle[IDX_STATS_STREAM_BYTES_RECEIVED]; + } + + /** @type {bigint} */ + get bytesSent() { + return this.#handle[IDX_STATS_STREAM_BYTES_SENT]; + } + + /** @type {bigint} */ + get maxOffset() { + return this.#handle[IDX_STATS_STREAM_MAX_OFFSET]; + } + + /** @type {bigint} */ + get maxOffsetAcknowledged() { + return this.#handle[IDX_STATS_STREAM_MAX_OFFSET_ACK]; + } + + /** @type {bigint} */ + get maxOffsetReceived() { + return this.#handle[IDX_STATS_STREAM_MAX_OFFSET_RECV]; + } + + /** @type {bigint} */ + get finalSize() { + return this.#handle[IDX_STATS_STREAM_FINAL_SIZE]; + } + + toString() { + return JSONStringify(this.toJSON()); + } + + toJSON() { + return { + __proto__: null, + connected: this.isConnected, + // We need to convert the values to strings because JSON does not + // support BigInts. + createdAt: `${this.createdAt}`, + receivedAt: `${this.receivedAt}`, + ackedAt: `${this.ackedAt}`, + closingAt: `${this.closingAt}`, + destroyedAt: `${this.destroyedAt}`, + bytesReceived: `${this.bytesReceived}`, + bytesSent: `${this.bytesSent}`, + maxOffset: `${this.maxOffset}`, + maxOffsetAcknowledged: `${this.maxOffsetAcknowledged}`, + maxOffsetReceived: `${this.maxOffsetReceived}`, + finalSize: `${this.finalSize}`, + }; + } + + [kInspect](depth, options) { + if (depth < 0) + return this; + + const opts = { + ...options, + depth: options.depth == null ? null : options.depth - 1, + }; + + return `StreamStats ${inspect({ + connected: this.isConnected, + createdAt: this.createdAt, + receivedAt: this.receivedAt, + ackedAt: this.ackedAt, + closingAt: this.closingAt, + destroyedAt: this.destroyedAt, + bytesReceived: this.bytesReceived, + bytesSent: this.bytesSent, + maxOffset: this.maxOffset, + maxOffsetAcknowledged: this.maxOffsetAcknowledged, + maxOffsetReceived: this.maxOffsetReceived, + finalSize: this.finalSize, + }, opts)}`; + } + + /** + * True if this QuicStreamStats object is still connected to the underlying + * Stream stats source. If this returns false, then the stats object is + * no longer being updated and should be considered stale. + * @returns {boolean} + */ + get isConnected() { + return !this.#disconnected; + } + + [kFinishClose]() { + // Snapshot the stats into a new BigUint64Array since the underlying + // buffer will be destroyed. + this.#handle = new BigUint64Array(this.#handle); + this.#disconnected = true; + } +} + +module.exports = { + QuicEndpointStats, + QuicSessionStats, + QuicStreamStats, +}; diff --git a/lib/internal/quic/symbols.js b/lib/internal/quic/symbols.js new file mode 100644 index 00000000000000..c436b5c4b787ff --- /dev/null +++ b/lib/internal/quic/symbols.js @@ -0,0 +1,60 @@ +'use strict'; + +const { + Symbol, +} = primordials; + +const { + customInspectSymbol: kInspect, +} = require('internal/util'); + +const { + kHandle: kKeyObjectHandle, + kKeyObject: kKeyObjectInner, +} = require('internal/crypto/util'); + +// Symbols used to hide various private properties and methods from the +// public API. + +const kBlocked = Symbol('kBlocked'); +const kDatagram = Symbol('kDatagram'); +const kDatagramStatus = Symbol('kDatagramStatus'); +const kError = Symbol('kError'); +const kFinishClose = Symbol('kFinishClose'); +const kHandshake = Symbol('kHandshake'); +const kHeaders = Symbol('kHeaders'); +const kOwner = Symbol('kOwner'); +const kRemoveSession = Symbol('kRemoveSession'); +const kNewSession = Symbol('kNewSession'); +const kRemoveStream = Symbol('kRemoveStream'); +const kNewStream = Symbol('kNewStream'); +const kPathValidation = Symbol('kPathValidation'); +const kReset = Symbol('kReset'); +const kSessionTicket = Symbol('kSessionTicket'); +const kTrailers = Symbol('kTrailers'); +const kVersionNegotiation = Symbol('kVersionNegotiation'); +const kPrivateConstructor = Symbol('kPrivateConstructor'); + +module.exports = { + kBlocked, + kDatagram, + kDatagramStatus, + kError, + kFinishClose, + kHandshake, + kHeaders, + kOwner, + kRemoveSession, + kNewSession, + kRemoveStream, + kNewStream, + kPathValidation, + kReset, + kSessionTicket, + kTrailers, + kVersionNegotiation, + kInspect, + kKeyObjectHandle, + kKeyObjectInner, + kPrivateConstructor, +}; diff --git a/lib/internal/socketaddress.js b/lib/internal/socketaddress.js index e432c8a7d7593a..7fbe63980a0226 100644 --- a/lib/internal/socketaddress.js +++ b/lib/internal/socketaddress.js @@ -37,6 +37,8 @@ const { kDeserialize, } = require('internal/worker/js_transferable'); +const { URL } = require('internal/url'); + const kHandle = Symbol('kHandle'); const kDetail = Symbol('kDetail'); @@ -74,7 +76,7 @@ class SocketAddress { validatePort(port, 'options.port'); validateUint32(flowlabel, 'options.flowlabel', false); - this[kHandle] = new _SocketAddress(address, port, type, flowlabel); + this[kHandle] = new _SocketAddress(address, port | 0, type, flowlabel | 0); this[kDetail] = this[kHandle].detail({ address: undefined, port: undefined, @@ -138,6 +140,36 @@ class SocketAddress { flowlabel: this.flowlabel, }; } + + /** + * Parse an "${ip}:${port}" formatted string into a SocketAddress. + * Returns undefined if the input cannot be successfully parsed. + * @param {string} input + * @returns {SocketAddress|undefined} + */ + static parse(input) { + validateString(input, 'input'); + // While URL.parse is not expected to throw, there are several + // other pieces here that do... the destucturing, the SocketAddress + // constructor, etc. So we wrap this in a try/catch to be safe. + try { + const { + hostname: address, + port, + } = URL.parse(`http://${input}`); + if (address.startsWith('[') && address.endsWith(']')) { + return new SocketAddress({ + address: address.slice(1, -1), + port: port | 0, + family: 'ipv6', + }); + } + return new SocketAddress({ address, port: port | 0 }); + } catch { + // Ignore errors here. Return undefined if the input cannot + // be successfully parsed or is not a proper socket address. + } + } } class InternalSocketAddress { diff --git a/lib/internal/test_runner/runner.js b/lib/internal/test_runner/runner.js index 0ba40d7ba7f71f..43a62b5b4307e4 100644 --- a/lib/internal/test_runner/runner.js +++ b/lib/internal/test_runner/runner.js @@ -17,7 +17,6 @@ const { ArrayPrototypeSort, ObjectAssign, PromisePrototypeThen, - PromiseResolve, PromiseWithResolvers, SafeMap, SafePromiseAll, @@ -801,9 +800,17 @@ function run(options = kEmptyObject) { } } - const setupPromise = PromiseResolve(setup?.(root.reporter)); - PromisePrototypeThen(PromisePrototypeThen(PromisePrototypeThen(setupPromise, runFiles), postRun), teardown); + const runChain = async () => { + if (typeof setup === 'function') { + await setup(root.reporter); + } + + await runFiles(); + postRun?.(); + teardown?.(); + }; + runChain(); return root.reporter; } diff --git a/lib/internal/test_runner/test.js b/lib/internal/test_runner/test.js index b679410c5561e5..6a92ec335ddf34 100644 --- a/lib/internal/test_runner/test.js +++ b/lib/internal/test_runner/test.js @@ -2,7 +2,6 @@ const { ArrayPrototypePush, ArrayPrototypePushApply, - ArrayPrototypeReduce, ArrayPrototypeShift, ArrayPrototypeSlice, ArrayPrototypeSome, @@ -24,7 +23,6 @@ const { SafeMap, SafePromiseAll, SafePromiseAllReturnVoid, - SafePromisePrototypeFinally, SafePromiseRace, SafeSet, StringPrototypeStartsWith, @@ -104,6 +102,7 @@ function lazyAssertObject(harness) { if (assertObj === undefined) { assertObj = new SafeMap(); const assert = require('assert'); + const { SnapshotManager } = require('internal/test_runner/snapshot'); const methodsToCopy = [ 'deepEqual', 'deepStrictEqual', @@ -118,6 +117,7 @@ function lazyAssertObject(harness) { 'notDeepStrictEqual', 'notEqual', 'notStrictEqual', + 'partialDeepStrictEqual', 'rejects', 'strictEqual', 'throws', @@ -126,12 +126,8 @@ function lazyAssertObject(harness) { assertObj.set(methodsToCopy[i], assert[methodsToCopy[i]]); } - const { getOptionValue } = require('internal/options'); - if (getOptionValue('--experimental-test-snapshots')) { - const { SnapshotManager } = require('internal/test_runner/snapshot'); - harness.snapshotManager = new SnapshotManager(harness.config.updateSnapshots); - assertObj.set('snapshot', harness.snapshotManager.createAssert()); - } + harness.snapshotManager = new SnapshotManager(harness.config.updateSnapshots); + assertObj.set('snapshot', harness.snapshotManager.createAssert()); } return assertObj; } @@ -848,13 +844,14 @@ class Test extends AsyncResource { async runHook(hook, args) { validateOneOf(hook, 'hook name', kHookNames); try { - await ArrayPrototypeReduce(this.hooks[hook], async (prev, hook) => { - await prev; + const hooks = this.hooks[hook]; + for (let i = 0; i < hooks.length; ++i) { + const hook = hooks[i]; await hook.run(args); if (hook.error) { throw hook.error; } - }, PromiseResolve()); + } } catch (err) { const error = new ERR_TEST_FAILURE(`failed running ${hook} hook`, kHookFailure); error.cause = isTestFailureError(err) ? err.cause : err; @@ -1261,27 +1258,20 @@ class Suite extends Test { this.skipped = false; } + this.buildSuite = this.createBuild(); + this.fn = noop; + } + + async createBuild() { try { const { ctx, args } = this.getRunArgs(); const runArgs = [this.fn, ctx]; ArrayPrototypePushApply(runArgs, args); - this.buildSuite = SafePromisePrototypeFinally( - PromisePrototypeThen( - PromiseResolve(ReflectApply(this.runInAsyncScope, this, runArgs)), - undefined, - (err) => { - this.fail(new ERR_TEST_FAILURE(err, kTestCodeFailure)); - }), - () => this.postBuild(), - ); + await ReflectApply(this.runInAsyncScope, this, runArgs); } catch (err) { this.fail(new ERR_TEST_FAILURE(err, kTestCodeFailure)); - this.postBuild(); } - this.fn = noop; - } - postBuild() { this.buildPhaseFinished = true; } diff --git a/lib/internal/util/colors.js b/lib/internal/util/colors.js index 812048c923bee7..0b37694b513fb0 100644 --- a/lib/internal/util/colors.js +++ b/lib/internal/util/colors.js @@ -24,16 +24,27 @@ module.exports = { stream.getColorDepth() > 2 : true); }, refresh() { - const hasColors = module.exports.shouldColorize(process.stderr); - module.exports.blue = hasColors ? '\u001b[34m' : ''; - module.exports.green = hasColors ? '\u001b[32m' : ''; - module.exports.white = hasColors ? '\u001b[39m' : ''; - module.exports.yellow = hasColors ? '\u001b[33m' : ''; - module.exports.red = hasColors ? '\u001b[31m' : ''; - module.exports.gray = hasColors ? '\u001b[90m' : ''; - module.exports.clear = hasColors ? '\u001bc' : ''; - module.exports.reset = hasColors ? '\u001b[0m' : ''; - module.exports.hasColors = hasColors; + if (module.exports.shouldColorize(process.stderr)) { + module.exports.blue = '\u001b[34m'; + module.exports.green = '\u001b[32m'; + module.exports.white = '\u001b[39m'; + module.exports.yellow = '\u001b[33m'; + module.exports.red = '\u001b[31m'; + module.exports.gray = '\u001b[90m'; + module.exports.clear = '\u001bc'; + module.exports.reset = '\u001b[0m'; + module.exports.hasColors = true; + } else { + module.exports.blue = ''; + module.exports.green = ''; + module.exports.white = ''; + module.exports.yellow = ''; + module.exports.red = ''; + module.exports.gray = ''; + module.exports.clear = ''; + module.exports.reset = ''; + module.exports.hasColors = false; + } }, }; diff --git a/lib/net.js b/lib/net.js index 52085df6ba0574..b8c0057bfd1a6c 100644 --- a/lib/net.js +++ b/lib/net.js @@ -105,6 +105,7 @@ const { ERR_INVALID_FD_TYPE, ERR_INVALID_HANDLE_TYPE, ERR_INVALID_IP_ADDRESS, + ERR_IP_BLOCKED, ERR_MISSING_ARGS, ERR_SERVER_ALREADY_LISTEN, ERR_SERVER_NOT_RUNNING, @@ -510,6 +511,12 @@ function Socket(options) { // Used after `.destroy()` this[kBytesRead] = 0; this[kBytesWritten] = 0; + if (options.blockList) { + if (!module.exports.BlockList.isBlockList(options.blockList)) { + throw new ERR_INVALID_ARG_TYPE('options.blockList', 'net.BlockList', options.blockList); + } + this.blockList = options.blockList; + } } ObjectSetPrototypeOf(Socket.prototype, stream.Duplex.prototype); ObjectSetPrototypeOf(Socket, stream.Duplex); @@ -1073,6 +1080,10 @@ function internalConnect( self.emit('connectionAttempt', address, port, addressType); if (addressType === 6 || addressType === 4) { + if (self.blockList?.check(address, `ipv${addressType}`)) { + self.destroy(new ERR_IP_BLOCKED(address)); + return; + } const req = new TCPConnectWrap(); req.oncomplete = afterConnect; req.address = address; @@ -1162,6 +1173,14 @@ function internalConnectMultiple(context, canceled) { } } + if (self.blockList?.check(address, `ipv${addressType}`)) { + const ex = new ERR_IP_BLOCKED(address); + ArrayPrototypePush(context.errors, ex); + self.emit('connectionAttemptFailed', address, port, addressType, ex); + internalConnectMultiple(context); + return; + } + debug('connect/multiple: attempting to connect to %s:%d (addressType: %d)', address, port, addressType); self.emit('connectionAttempt', address, port, addressType); @@ -1791,6 +1810,12 @@ function Server(options, connectionListener) { this.keepAlive = Boolean(options.keepAlive); this.keepAliveInitialDelay = ~~(options.keepAliveInitialDelay / 1000); this.highWaterMark = options.highWaterMark ?? getDefaultHighWaterMark(); + if (options.blockList) { + if (!module.exports.BlockList.isBlockList(options.blockList)) { + throw new ERR_INVALID_ARG_TYPE('options.blockList', 'net.BlockList', options.blockList); + } + this.blockList = options.blockList; + } } ObjectSetPrototypeOf(Server.prototype, EventEmitter.prototype); ObjectSetPrototypeOf(Server, EventEmitter); @@ -2239,7 +2264,15 @@ function onconnection(err, clientHandle) { clientHandle.close(); return; } - + if (self.blockList && typeof clientHandle.getpeername === 'function') { + const remoteInfo = { __proto__: null }; + clientHandle.getpeername(remoteInfo); + const addressType = isIP(remoteInfo.address); + if (addressType && self.blockList.check(remoteInfo.address, `ipv${addressType}`)) { + clientHandle.close(); + return; + } + } const socket = new Socket({ handle: clientHandle, allowHalfOpen: self.allowHalfOpen, diff --git a/lib/test.js b/lib/test.js index ba72d6e4ecbdbf..8e7c6295a37225 100644 --- a/lib/test.js +++ b/lib/test.js @@ -7,7 +7,6 @@ const { const { test, suite, before, after, beforeEach, afterEach } = require('internal/test_runner/harness'); const { run } = require('internal/test_runner/runner'); -const { getOptionValue } = require('internal/options'); module.exports = test; ObjectAssign(module.exports, { @@ -39,28 +38,26 @@ ObjectDefineProperty(module.exports, 'mock', { }, }); -if (getOptionValue('--experimental-test-snapshots')) { - let lazySnapshot; +let lazySnapshot; - ObjectDefineProperty(module.exports, 'snapshot', { - __proto__: null, - configurable: true, - enumerable: true, - get() { - if (lazySnapshot === undefined) { - const { - setDefaultSnapshotSerializers, - setResolveSnapshotPath, - } = require('internal/test_runner/snapshot'); - - lazySnapshot = { - __proto__: null, - setDefaultSnapshotSerializers, - setResolveSnapshotPath, - }; - } +ObjectDefineProperty(module.exports, 'snapshot', { + __proto__: null, + configurable: true, + enumerable: true, + get() { + if (lazySnapshot === undefined) { + const { + setDefaultSnapshotSerializers, + setResolveSnapshotPath, + } = require('internal/test_runner/snapshot'); + + lazySnapshot = { + __proto__: null, + setDefaultSnapshotSerializers, + setResolveSnapshotPath, + }; + } - return lazySnapshot; - }, - }); -} + return lazySnapshot; + }, +}); diff --git a/lib/vm.js b/lib/vm.js index 3eea66b3f07437..ae710806201893 100644 --- a/lib/vm.js +++ b/lib/vm.js @@ -319,6 +319,7 @@ function runInThisContext(code, options) { function compileFunction(code, params, options = kEmptyObject) { validateString(code, 'code'); + validateObject(options, 'options'); if (params !== undefined) { validateStringArray(params, 'params'); } diff --git a/node.gni b/node.gni index 057686f0046e5e..e199bba2d38641 100644 --- a/node.gni +++ b/node.gni @@ -10,6 +10,8 @@ declare_args() { # The location of V8, use the one from node's deps by default. node_v8_path = "$node_path/deps/v8" + node_openssl_path = "$node_path/deps/openssl" + # The NODE_MODULE_VERSION defined in node_version.h. node_module_version = exec_script("$node_path/tools/getmoduleversion.py", [], "value") diff --git a/node.gyp b/node.gyp index f018e0a34d5782..1633ed2d832fc5 100644 --- a/node.gyp +++ b/node.gyp @@ -482,6 +482,7 @@ '-Wno-unused-parameter', '-Werror=undefined-inline', '-Werror=extra-semi', + '-Werror=ctad-maybe-unsupported', ], }, @@ -849,9 +850,6 @@ 'dependencies': [ 'deps/googletest/googletest.gyp:gtest_prod', 'deps/histogram/histogram.gyp:histogram', - 'deps/simdjson/simdjson.gyp:simdjson', - 'deps/simdutf/simdutf.gyp:simdutf', - 'deps/ada/ada.gyp:ada', 'deps/nbytes/nbytes.gyp:nbytes', 'node_js2c#host', ], @@ -1126,7 +1124,6 @@ 'deps/googletest/googletest.gyp:gtest_prod', 'deps/histogram/histogram.gyp:histogram', 'deps/uvwasi/uvwasi.gyp:uvwasi', - 'deps/ada/ada.gyp:ada', 'deps/nbytes/nbytes.gyp:nbytes', ], 'includes': [ @@ -1173,9 +1170,6 @@ 'deps/googletest/googletest.gyp:gtest', 'deps/googletest/googletest.gyp:gtest_main', 'deps/histogram/histogram.gyp:histogram', - 'deps/simdjson/simdjson.gyp:simdjson', - 'deps/simdutf/simdutf.gyp:simdutf', - 'deps/ada/ada.gyp:ada', 'deps/nbytes/nbytes.gyp:nbytes', ], @@ -1253,7 +1247,6 @@ 'dependencies': [ '<(node_lib_target_name)', 'deps/histogram/histogram.gyp:histogram', - 'deps/ada/ada.gyp:ada', 'deps/nbytes/nbytes.gyp:nbytes', ], @@ -1328,9 +1321,6 @@ 'target_name': 'node_js2c', 'type': 'executable', 'toolsets': ['host'], - 'dependencies': [ - 'deps/simdutf/simdutf.gyp:simdutf#host', - ], 'include_dirs': [ 'tools', 'src', @@ -1342,6 +1332,9 @@ 'src/embedded_data.cc', ], 'conditions': [ + [ 'node_shared_simdutf=="false"', { + 'dependencies': [ 'deps/simdutf/simdutf.gyp:simdutf#host' ], + }], [ 'node_shared_libuv=="false"', { 'dependencies': [ 'deps/uv/uv.gyp:libuv#host' ], }], @@ -1367,10 +1360,7 @@ 'dependencies': [ '<(node_lib_target_name)', 'deps/histogram/histogram.gyp:histogram', - 'deps/ada/ada.gyp:ada', 'deps/nbytes/nbytes.gyp:nbytes', - 'deps/simdjson/simdjson.gyp:simdjson', - 'deps/simdutf/simdutf.gyp:simdutf', ], 'includes': [ diff --git a/node.gypi b/node.gypi index 9c989022a9ad36..7ae62bd7e7134a 100644 --- a/node.gypi +++ b/node.gypi @@ -27,7 +27,11 @@ 'conditions': [ [ 'clang==1', { - 'cflags': [ '-Werror=undefined-inline', '-Werror=extra-semi'] + 'cflags': [ + '-Werror=undefined-inline', + '-Werror=extra-semi', + '-Werror=ctad-maybe-unsupported', + ], }], [ '"<(_type)"=="executable"', { 'msvs_settings': { @@ -212,6 +216,18 @@ 'dependencies': [ 'deps/nghttp2/nghttp2.gyp:nghttp2' ], }], + [ 'node_shared_ada=="false"', { + 'dependencies': [ 'deps/ada/ada.gyp:ada' ], + }], + + [ 'node_shared_simdjson=="false"', { + 'dependencies': [ 'deps/simdjson/simdjson.gyp:simdjson' ], + }], + + [ 'node_shared_simdutf=="false"', { + 'dependencies': [ 'deps/simdutf/simdutf.gyp:simdutf' ], + }], + [ 'node_shared_brotli=="false"', { 'dependencies': [ 'deps/brotli/brotli.gyp:brotli' ], }], diff --git a/src/async_wrap.cc b/src/async_wrap.cc index 8868245403d658..30490b90e77b64 100644 --- a/src/async_wrap.cc +++ b/src/async_wrap.cc @@ -488,31 +488,15 @@ AsyncWrap::AsyncWrap(Environment* env, Local object, ProviderType provider, double execution_async_id) - : AsyncWrap(env, object, provider, execution_async_id, false) {} - -AsyncWrap::AsyncWrap(Environment* env, - Local object, - ProviderType provider, - double execution_async_id, - bool silent) : AsyncWrap(env, object) { CHECK_NE(provider, PROVIDER_NONE); provider_type_ = provider; // Use AsyncReset() call to execute the init() callbacks. - AsyncReset(object, execution_async_id, silent); + AsyncReset(object, execution_async_id); init_hook_ran_ = true; } -AsyncWrap::AsyncWrap(Environment* env, - Local object, - ProviderType provider, - double execution_async_id, - double trigger_async_id) - : AsyncWrap(env, object, provider, execution_async_id, true) { - trigger_async_id_ = trigger_async_id; -} - AsyncWrap::AsyncWrap(Environment* env, Local object) : BaseObject(env, object), context_frame_(env->isolate(), @@ -592,8 +576,7 @@ void AsyncWrap::EmitDestroy(Environment* env, double async_id) { // Generalized call for both the constructor and for handles that are pooled // and reused over their lifetime. This way a new uid can be assigned when // the resource is pulled out of the pool and put back into use. -void AsyncWrap::AsyncReset(Local resource, double execution_async_id, - bool silent) { +void AsyncWrap::AsyncReset(Local resource, double execution_async_id) { CHECK_NE(provider_type(), PROVIDER_NONE); if (async_id_ != kInvalidAsyncId) { @@ -642,8 +625,6 @@ void AsyncWrap::AsyncReset(Local resource, double execution_async_id, context_frame_.Reset(isolate, async_context_frame::current(isolate)); - if (silent) return; - EmitAsyncInit(env(), resource, env()->async_hooks()->provider_string(provider_type()), async_id_, trigger_async_id_); diff --git a/src/async_wrap.h b/src/async_wrap.h index d656f72fdabd62..5b33290b4bb2d0 100644 --- a/src/async_wrap.h +++ b/src/async_wrap.h @@ -190,8 +190,7 @@ class AsyncWrap : public BaseObject { inline v8::Local context_frame() const; void AsyncReset(v8::Local resource, - double execution_async_id = kInvalidAsyncId, - bool silent = false); + double execution_async_id = kInvalidAsyncId); // Only call these within a valid HandleScope. v8::MaybeLocal MakeCallback(const v8::Local cb, @@ -224,18 +223,6 @@ class AsyncWrap : public BaseObject { bool IsDoneInitializing() const override; private: - friend class PromiseWrap; - - AsyncWrap(Environment* env, - v8::Local promise, - ProviderType provider, - double execution_async_id, - bool silent); - AsyncWrap(Environment* env, - v8::Local promise, - ProviderType provider, - double execution_async_id, - double trigger_async_id); ProviderType provider_type_ = PROVIDER_NONE; bool init_hook_ran_ = false; // Because the values may be Reset(), cannot be made const. diff --git a/src/debug_utils.cc b/src/debug_utils.cc index 9d36082db672ce..c8b3b11ee34d7a 100644 --- a/src/debug_utils.cc +++ b/src/debug_utils.cc @@ -62,9 +62,9 @@ EnabledDebugList enabled_debug_list; using v8::Local; using v8::StackTrace; -void EnabledDebugList::Parse(std::shared_ptr env_vars) { +void EnabledDebugList::Parse(Environment* env) { std::string cats; - credentials::SafeGetenv("NODE_DEBUG_NATIVE", &cats, env_vars); + credentials::SafeGetenv("NODE_DEBUG_NATIVE", &cats, env); Parse(cats); } diff --git a/src/debug_utils.h b/src/debug_utils.h index 359b8d6b421020..d4391ac987ba5b 100644 --- a/src/debug_utils.h +++ b/src/debug_utils.h @@ -74,10 +74,10 @@ class NODE_EXTERN_PRIVATE EnabledDebugList { return enabled_[static_cast(category)]; } - // Uses NODE_DEBUG_NATIVE to initialize the categories. The env_vars variable + // Uses NODE_DEBUG_NATIVE to initialize the categories. env->env_vars() // is parsed if it is not a nullptr, otherwise the system environment // variables are parsed. - void Parse(std::shared_ptr env_vars); + void Parse(Environment* env); private: // Enable all categories matching cats. diff --git a/src/encoding_binding.cc b/src/encoding_binding.cc index 97ddd59fb661c8..a132eeb62306c6 100644 --- a/src/encoding_binding.cc +++ b/src/encoding_binding.cc @@ -1,6 +1,7 @@ #include "encoding_binding.h" #include "ada.h" #include "env-inl.h" +#include "node_buffer.h" #include "node_errors.h" #include "node_external_reference.h" #include "simdutf.h" @@ -226,6 +227,7 @@ void BindingData::CreatePerIsolateProperties(IsolateData* isolate_data, SetMethodNoSideEffect(isolate, target, "decodeUTF8", DecodeUTF8); SetMethodNoSideEffect(isolate, target, "toASCII", ToASCII); SetMethodNoSideEffect(isolate, target, "toUnicode", ToUnicode); + SetMethodNoSideEffect(isolate, target, "decodeLatin1", DecodeLatin1); } void BindingData::CreatePerContextProperties(Local target, @@ -243,6 +245,50 @@ void BindingData::RegisterTimerExternalReferences( registry->Register(DecodeUTF8); registry->Register(ToASCII); registry->Register(ToUnicode); + registry->Register(DecodeLatin1); +} + +void BindingData::DecodeLatin1(const FunctionCallbackInfo& args) { + Environment* env = Environment::GetCurrent(args); + + CHECK_GE(args.Length(), 1); + if (!(args[0]->IsArrayBuffer() || args[0]->IsSharedArrayBuffer() || + args[0]->IsArrayBufferView())) { + return node::THROW_ERR_INVALID_ARG_TYPE( + env->isolate(), + "The \"input\" argument must be an instance of ArrayBuffer, " + "SharedArrayBuffer, or ArrayBufferView."); + } + + bool ignore_bom = args[1]->IsTrue(); + bool has_fatal = args[2]->IsTrue(); + + ArrayBufferViewContents buffer(args[0]); + const uint8_t* data = buffer.data(); + size_t length = buffer.length(); + + if (ignore_bom && length > 0 && data[0] == 0xFF) { + data++; + length--; + } + + if (length == 0) { + return args.GetReturnValue().SetEmptyString(); + } + + std::string result(length * 2, '\0'); + + size_t written = simdutf::convert_latin1_to_utf8( + reinterpret_cast(data), length, result.data()); + + if (has_fatal && written == 0) { + return node::THROW_ERR_ENCODING_INVALID_ENCODED_DATA( + env->isolate(), "The encoded data was not valid for encoding latin1"); + } + + Local buffer_result = + node::Buffer::Copy(env, result.c_str(), written).ToLocalChecked(); + args.GetReturnValue().Set(buffer_result); } } // namespace encoding_binding diff --git a/src/encoding_binding.h b/src/encoding_binding.h index 2690cb74f8a05b..97f55394d27641 100644 --- a/src/encoding_binding.h +++ b/src/encoding_binding.h @@ -31,6 +31,7 @@ class BindingData : public SnapshotableObject { static void EncodeInto(const v8::FunctionCallbackInfo& args); static void EncodeUtf8String(const v8::FunctionCallbackInfo& args); static void DecodeUTF8(const v8::FunctionCallbackInfo& args); + static void DecodeLatin1(const v8::FunctionCallbackInfo& args); static void ToASCII(const v8::FunctionCallbackInfo& args); static void ToUnicode(const v8::FunctionCallbackInfo& args); diff --git a/src/env.cc b/src/env.cc index 458fd3b5611c47..8842f69e9bf58f 100644 --- a/src/env.cc +++ b/src/env.cc @@ -87,13 +87,13 @@ void AsyncHooks::ResetPromiseHooks(Local init, js_promise_hooks_[3].Reset(env()->isolate(), resolve); } -Local AsyncHooks::GetPromiseHooks(Isolate* isolate) { - std::vector> values; +Local AsyncHooks::GetPromiseHooks(Isolate* isolate) const { + v8::LocalVector values(isolate, js_promise_hooks_.size()); for (size_t i = 0; i < js_promise_hooks_.size(); ++i) { if (js_promise_hooks_[i].IsEmpty()) { - values.push_back(Undefined(isolate)); + values[i] = Undefined(isolate); } else { - values.push_back(js_promise_hooks_[i].Get(isolate)); + values[i] = js_promise_hooks_[i].Get(isolate); } } return Array::New(isolate, values.data(), values.size()); @@ -236,13 +236,10 @@ void Environment::TrackContext(Local context) { void Environment::UntrackContext(Local context) { HandleScope handle_scope(isolate_); - contexts_.erase(std::remove_if(contexts_.begin(), - contexts_.end(), - [&](auto&& el) { return el.IsEmpty(); }), - contexts_.end()); + std::erase_if(contexts_, [&](auto&& el) { return el.IsEmpty(); }); for (auto it = contexts_.begin(); it != contexts_.end(); it++) { - Local saved_context = PersistentToLocal::Weak(isolate_, *it); - if (saved_context == context) { + if (Local saved_context = PersistentToLocal::Weak(isolate_, *it); + saved_context == context) { it->Reset(); contexts_.erase(it); break; @@ -351,9 +348,11 @@ IsolateDataSerializeInfo IsolateData::Serialize(SnapshotCreator* creator) { #undef VS #undef VP - for (size_t i = 0; i < AsyncWrap::PROVIDERS_LENGTH; i++) + info.primitive_values.reserve(info.primitive_values.size() + + AsyncWrap::PROVIDERS_LENGTH); + for (size_t i = 0; i < AsyncWrap::PROVIDERS_LENGTH; i++) { info.primitive_values.push_back(creator->AddData(async_wrap_provider(i))); - + } uint32_t id = 0; #define VM(PropertyName) V(PropertyName##_binding_template, ObjectTemplate) #define V(PropertyName, TypeName) \ @@ -375,7 +374,7 @@ IsolateDataSerializeInfo IsolateData::Serialize(SnapshotCreator* creator) { void IsolateData::DeserializeProperties(const IsolateDataSerializeInfo* info) { size_t i = 0; - v8::Isolate::Scope isolate_scope(isolate_); + Isolate::Scope isolate_scope(isolate_); HandleScope handle_scope(isolate_); if (per_process::enabled_debug_list.enabled(DebugCategory::MKSNAPSHOT)) { @@ -721,9 +720,8 @@ void Environment::TryLoadAddon( std::string Environment::GetCwd(const std::string& exec_path) { char cwd[PATH_MAX_BYTES]; size_t size = PATH_MAX_BYTES; - const int err = uv_cwd(cwd, &size); - if (err == 0) { + if (uv_cwd(cwd, &size) == 0) { CHECK_GT(size, 0); return cwd; } @@ -769,7 +767,7 @@ std::string Environment::GetExecPath(const std::vector& argv) { std::string exec_path; if (uv_exepath(exec_path_buf, &exec_path_len) == 0) { exec_path = std::string(exec_path_buf, exec_path_len); - } else if (argv.size() > 0) { + } else if (!argv.empty()) { exec_path = argv[0]; } @@ -864,9 +862,6 @@ Environment::Environment(IsolateData* isolate_data, EnvironmentFlags::kOwnsInspector; } - set_env_vars(per_process::system_environment); - enabled_debug_list_.Parse(env_vars()); - // We create new copies of the per-Environment option sets, so that it is // easier to modify them after Environment creation. The defaults are // part of the per-Isolate option set, for which in turn the defaults are @@ -876,6 +871,13 @@ Environment::Environment(IsolateData* isolate_data, inspector_host_port_ = std::make_shared>( options_->debug_options().host_port); + set_env_vars(per_process::system_environment); + // This should be done after options is created, so that --trace-env can be + // checked when parsing NODE_DEBUG_NATIVE. It should also be done after + // env_vars() is set so that the parser uses values from env->env_vars() + // which may or may not be the system environment variable store. + enabled_debug_list_.Parse(this); + heap_snapshot_near_heap_limit_ = static_cast(options_->heap_snapshot_near_heap_limit); @@ -1104,8 +1106,7 @@ void Environment::InitializeLibuv() { void Environment::InitializeCompileCache() { std::string dir_from_env; - if (!credentials::SafeGetenv( - "NODE_COMPILE_CACHE", &dir_from_env, env_vars()) || + if (!credentials::SafeGetenv("NODE_COMPILE_CACHE", &dir_from_env, this) || dir_from_env.empty()) { return; } @@ -1117,7 +1118,7 @@ CompileCacheEnableResult Environment::EnableCompileCache( CompileCacheEnableResult result; std::string disable_env; if (credentials::SafeGetenv( - "NODE_DISABLE_COMPILE_CACHE", &disable_env, env_vars())) { + "NODE_DISABLE_COMPILE_CACHE", &disable_env, this)) { result.status = CompileCacheEnableStatus::DISABLED; result.message = "Disabled by NODE_DISABLE_COMPILE_CACHE"; Debug(this, @@ -1236,7 +1237,8 @@ void Environment::StartProfilerIdleNotifier() { } void Environment::PrintSyncTrace() const { - if (!trace_sync_io_) return; + if (!trace_sync_io_) [[likely]] + return; HandleScope handle_scope(isolate()); @@ -1311,7 +1313,7 @@ void Environment::AtExit(void (*cb)(void* arg), void* arg) { at_exit_functions_.push_front(ExitCallback{cb, arg}); } -Maybe Environment::CheckUnsettledTopLevelAwait() { +Maybe Environment::CheckUnsettledTopLevelAwait() const { HandleScope scope(isolate_); Local ctx = context(); Local value; @@ -1513,7 +1515,7 @@ void Environment::RunTimers(uv_timer_t* handle) { int64_t expiry_ms = ret.ToLocalChecked()->IntegerValue(env->context()).FromJust(); - uv_handle_t* h = reinterpret_cast(handle); + auto* h = reinterpret_cast(handle); if (expiry_ms != 0) { int64_t duration_ms = @@ -1579,8 +1581,7 @@ Local Environment::GetNow() { uint64_t now = GetNowUint64(); if (now <= 0xffffffff) return Integer::NewFromUnsigned(isolate(), static_cast(now)); - else - return Number::New(isolate(), static_cast(now)); + return Number::New(isolate(), static_cast(now)); } void CollectExceptionInfo(Environment* env, @@ -1639,8 +1640,8 @@ void Environment::CollectUVExceptionInfo(Local object, message = uv_strerror(errorno); } - node::CollectExceptionInfo(this, obj, errorno, err_string, - syscall, message, path, dest); + CollectExceptionInfo( + this, obj, errorno, err_string, syscall, message, path, dest); } ImmediateInfo::ImmediateInfo(Isolate* isolate, const SerializeInfo* info) @@ -1970,7 +1971,7 @@ void Environment::BuildEmbedderGraph(Isolate* isolate, EmbedderGraph* graph, void* data) { MemoryTracker tracker(isolate, graph); - Environment* env = static_cast(data); + auto* env = static_cast(data); // Start traversing embedder objects from the root Environment object. tracker.Track(env); } @@ -2032,7 +2033,7 @@ void Environment::TracePromises(PromiseHookType type, size_t Environment::NearHeapLimitCallback(void* data, size_t current_heap_limit, size_t initial_heap_limit) { - Environment* env = static_cast(data); + auto* env = static_cast(data); Debug(env, DebugCategory::DIAGNOSTICS, @@ -2078,8 +2079,8 @@ size_t Environment::NearHeapLimitCallback(void* data, DebugCategory::DIAGNOSTICS, "Estimated available memory=%" PRIu64 ", " "estimated overhead=%" PRIu64 "\n", - static_cast(available), - static_cast(estimated_overhead)); + available, + estimated_overhead); // This might be hit when the snapshot is being taken in another // NearHeapLimitCallback invocation. diff --git a/src/env.h b/src/env.h index 11d956710b8969..e67c34d31a8ce4 100644 --- a/src/env.h +++ b/src/env.h @@ -329,7 +329,7 @@ class AsyncHooks : public MemoryRetainer { v8::Local resolve); // Used for testing since V8 doesn't provide API for retrieving configured // JS promise hooks. - v8::Local GetPromiseHooks(v8::Isolate* isolate); + v8::Local GetPromiseHooks(v8::Isolate* isolate) const; inline v8::Local provider_string(int idx); inline void no_force_checks(); @@ -848,7 +848,7 @@ class Environment final : public MemoryRetainer { void AtExit(void (*cb)(void* arg), void* arg); void RunAtExitCallbacks(); - v8::Maybe CheckUnsettledTopLevelAwait(); + v8::Maybe CheckUnsettledTopLevelAwait() const; void RunWeakRefCleanup(); v8::MaybeLocal RunSnapshotSerializeCallback() const; diff --git a/src/env_properties.h b/src/env_properties.h index 84bb1d492b512b..592e95b0584a87 100644 --- a/src/env_properties.h +++ b/src/env_properties.h @@ -194,8 +194,10 @@ V(ipv4_string, "IPv4") \ V(ipv6_string, "IPv6") \ V(isclosing_string, "isClosing") \ + V(isfinished_string, "isFinished") \ V(issuer_string, "issuer") \ V(issuercert_string, "issuerCertificate") \ + V(iterator_string, "Iterator") \ V(jwk_crv_string, "crv") \ V(jwk_d_string, "d") \ V(jwk_dp_string, "dp") \ @@ -241,6 +243,7 @@ V(nistcurve_string, "nistCurve") \ V(node_string, "node") \ V(nsname_string, "nsname") \ + V(num_cols_string, "num_cols") \ V(object_string, "Object") \ V(ocsp_request_string, "OCSPRequest") \ V(oncertcb_string, "oncertcb") \ @@ -288,6 +291,7 @@ V(priority_string, "priority") \ V(process_string, "process") \ V(promise_string, "promise") \ + V(prototype_string, "prototype") \ V(psk_string, "psk") \ V(pubkey_string, "pubkey") \ V(public_exponent_string, "publicExponent") \ @@ -309,6 +313,7 @@ V(require_string, "require") \ V(resource_string, "resource") \ V(retry_string, "retry") \ + V(return_string, "return") \ V(salt_length_string, "saltLength") \ V(scheme_string, "scheme") \ V(scopeid_string, "scopeid") \ @@ -332,6 +337,7 @@ V(standard_name_string, "standardName") \ V(start_time_string, "startTime") \ V(state_string, "state") \ + V(statement_string, "statement") \ V(stats_string, "stats") \ V(status_string, "status") \ V(stdio_string, "stdio") \ diff --git a/src/module_wrap.cc b/src/module_wrap.cc index 6f8c9d10955b88..8be6dbd1d0262c 100644 --- a/src/module_wrap.cc +++ b/src/module_wrap.cc @@ -663,6 +663,22 @@ void ModuleWrap::EvaluateSync(const FunctionCallbackInfo& args) { CHECK(result->IsPromise()); Local promise = result.As(); if (promise->State() == Promise::PromiseState::kRejected) { + // The rejected promise is created by V8, so we don't get a chance to mark + // it as resolved before the rejection happens from evaluation. But we can + // tell the promise rejection callback to treat it as a promise rejected + // before handler was added which would remove it from the unhandled + // rejection handling, since we are converting it into an error and throw + // from here directly. + Local type = v8::Integer::New( + isolate, + static_cast( + v8::PromiseRejectEvent::kPromiseHandlerAddedAfterReject)); + Local args[] = {type, promise, Undefined(isolate)}; + if (env->promise_reject_callback() + ->Call(context, Undefined(isolate), arraysize(args), args) + .IsEmpty()) { + return; + } Local exception = promise->Result(); Local message = v8::Exception::CreateMessage(isolate, exception); diff --git a/src/node.cc b/src/node.cc index 26f94be94bae70..dd52fbffac0dee 100644 --- a/src/node.cc +++ b/src/node.cc @@ -993,7 +993,7 @@ InitializeOncePerProcessInternal(const std::vector& args, if (!(flags & ProcessInitializationFlags::kNoParseGlobalDebugVariables)) { // Initialized the enabled list for Debug() calls with system // environment variables. - per_process::enabled_debug_list.Parse(per_process::system_environment); + per_process::enabled_debug_list.Parse(nullptr); } PlatformInit(flags); diff --git a/src/node_builtins.cc b/src/node_builtins.cc index 1bec44f6f29b0b..9aaf5626fcfe4a 100644 --- a/src/node_builtins.cc +++ b/src/node_builtins.cc @@ -133,7 +133,8 @@ BuiltinLoader::BuiltinCategories BuiltinLoader::GetBuiltinCategories() const { "internal/streams/lazy_transform", #endif // !HAVE_OPENSSL #if !NODE_OPENSSL_HAS_QUIC - "internal/quic/quic", + "internal/quic/quic", "internal/quic/symbols", "internal/quic/stats", + "internal/quic/state", #endif // !NODE_OPENSSL_HAS_QUIC "sqlite", // Experimental. "sys", // Deprecated. diff --git a/src/node_credentials.cc b/src/node_credentials.cc index 2a7f2e878bc953..0152f6269c83ef 100644 --- a/src/node_credentials.cc +++ b/src/node_credentials.cc @@ -72,9 +72,7 @@ static bool HasOnly(int capability) { // process only has the capability CAP_NET_BIND_SERVICE set. If the current // process does not have any capabilities set and the process is running as // setuid root then lookup will not be allowed. -bool SafeGetenv(const char* key, - std::string* text, - std::shared_ptr env_vars) { +bool SafeGetenv(const char* key, std::string* text, Environment* env) { #if !defined(__CloudABI__) && !defined(_WIN32) #if defined(__linux__) if ((!HasOnly(CAP_NET_BIND_SERVICE) && linux_at_secure()) || @@ -87,14 +85,31 @@ bool SafeGetenv(const char* key, // Fallback to system environment which reads the real environment variable // through uv_os_getenv. - if (env_vars == nullptr) { + std::shared_ptr env_vars; + if (env == nullptr) { env_vars = per_process::system_environment; + } else { + env_vars = env->env_vars(); } std::optional value = env_vars->Get(key); - if (!value.has_value()) return false; - *text = value.value(); - return true; + + bool has_env = value.has_value(); + if (has_env) { + *text = value.value(); + } + + auto options = + (env != nullptr ? env->options() + : per_process::cli_options->per_isolate->per_env); + + if (options->trace_env) { + fprintf(stderr, "[--trace-env] get environment variable \"%s\"\n", key); + + PrintTraceEnvStack(options); + } + + return has_env; } static void SafeGetenv(const FunctionCallbackInfo& args) { @@ -103,7 +118,7 @@ static void SafeGetenv(const FunctionCallbackInfo& args) { Isolate* isolate = env->isolate(); Utf8Value strenvtag(isolate, args[0]); std::string text; - if (!SafeGetenv(*strenvtag, &text, env->env_vars())) return; + if (!SafeGetenv(*strenvtag, &text, env)) return; Local result = ToV8Value(isolate->GetCurrentContext(), text).ToLocalChecked(); args.GetReturnValue().Set(result); @@ -117,7 +132,7 @@ static void GetTempDir(const FunctionCallbackInfo& args) { // Let's wrap SafeGetEnv since it returns true for empty string. auto get_env = [&dir, &env](std::string_view key) { - USE(SafeGetenv(key.data(), &dir, env->env_vars())); + USE(SafeGetenv(key.data(), &dir, env)); return !dir.empty(); }; diff --git a/src/node_debug.cc b/src/node_debug.cc index d87cdf61d2d92d..0f254b1fbfe820 100644 --- a/src/node_debug.cc +++ b/src/node_debug.cc @@ -12,8 +12,7 @@ #include #endif // DEBUG -namespace node { -namespace debug { +namespace node::debug { #ifdef DEBUG using v8::Context; @@ -41,7 +40,7 @@ void GetV8FastApiCallCount(const FunctionCallbackInfo& args) { return; } Utf8Value utf8_key(env->isolate(), args[0]); - args.GetReturnValue().Set(GetV8FastApiCallCount(utf8_key.ToString())); + args.GetReturnValue().Set(GetV8FastApiCallCount(utf8_key.ToStringView())); } void SlowIsEven(const FunctionCallbackInfo& args) { @@ -93,8 +92,7 @@ void Initialize(Local target, } #endif // DEBUG -} // namespace debug -} // namespace node +} // namespace node::debug #ifdef DEBUG NODE_BINDING_CONTEXT_AWARE_INTERNAL(debug, node::debug::Initialize) diff --git a/src/node_dotenv.cc b/src/node_dotenv.cc index f594df875d7a0c..049f5cfcb77b9c 100644 --- a/src/node_dotenv.cc +++ b/src/node_dotenv.cc @@ -182,7 +182,10 @@ void Dotenv::ParseContent(const std::string_view input) { } store_.insert_or_assign(std::string(key), multi_line_value); - content.remove_prefix(content.find('\n', closing_quote + 1)); + auto newline = content.find('\n', closing_quote + 1); + if (newline != std::string_view::npos) { + content.remove_prefix(newline); + } continue; } } @@ -210,7 +213,10 @@ void Dotenv::ParseContent(const std::string_view input) { store_.insert_or_assign(std::string(key), value); // Select the first newline after the closing quotation mark // since there could be newline characters inside the value. - content.remove_prefix(content.find('\n', closing_quote + 1)); + auto newline = content.find('\n', closing_quote + 1); + if (newline != std::string_view::npos) { + content.remove_prefix(newline); + } } } else { // Regular key value pair. diff --git a/src/node_env_var.cc b/src/node_env_var.cc index d19d11dc714e08..82b8231c435775 100644 --- a/src/node_env_var.cc +++ b/src/node_env_var.cc @@ -337,6 +337,19 @@ Maybe KVStore::AssignToObject(v8::Isolate* isolate, return JustVoid(); } +void PrintTraceEnvStack(Environment* env) { + PrintTraceEnvStack(env->options()); +} + +void PrintTraceEnvStack(std::shared_ptr options) { + if (options->trace_env_native_stack) { + DumpNativeBacktrace(stderr); + } + if (options->trace_env_js_stack) { + DumpJavaScriptBacktrace(stderr); + } +} + static Intercepted EnvGetter(Local property, const PropertyCallbackInfo& info) { Environment* env = Environment::GetCurrent(info); @@ -348,7 +361,18 @@ static Intercepted EnvGetter(Local property, CHECK(property->IsString()); MaybeLocal value_string = env->env_vars()->Get(env->isolate(), property.As()); - if (!value_string.IsEmpty()) { + + bool has_env = !value_string.IsEmpty(); + if (env->options()->trace_env) { + Utf8Value key(env->isolate(), property.As()); + fprintf(stderr, + "[--trace-env] get environment variable \"%.*s\"\n", + static_cast(key.length()), + key.out()); + PrintTraceEnvStack(env); + } + + if (has_env) { info.GetReturnValue().Set(value_string.ToLocalChecked()); return Intercepted::kYes; } @@ -386,6 +410,14 @@ static Intercepted EnvSetter(Local property, } env->env_vars()->Set(env->isolate(), key, value_string); + if (env->options()->trace_env) { + Utf8Value key_utf8(env->isolate(), key); + fprintf(stderr, + "[--trace-env] set environment variable \"%.*s\"\n", + static_cast(key_utf8.length()), + key_utf8.out()); + PrintTraceEnvStack(env); + } return Intercepted::kYes; } @@ -396,7 +428,18 @@ static Intercepted EnvQuery(Local property, CHECK(env->has_run_bootstrapping_code()); if (property->IsString()) { int32_t rc = env->env_vars()->Query(env->isolate(), property.As()); - if (rc != -1) { + bool has_env = (rc != -1); + + if (env->options()->trace_env) { + Utf8Value key_utf8(env->isolate(), property.As()); + fprintf(stderr, + "[--trace-env] query environment variable \"%.*s\": %s\n", + static_cast(key_utf8.length()), + key_utf8.out(), + has_env ? "is set" : "is not set"); + PrintTraceEnvStack(env); + } + if (has_env) { // Return attributes for the property. info.GetReturnValue().Set(v8::None); return Intercepted::kYes; @@ -411,6 +454,15 @@ static Intercepted EnvDeleter(Local property, CHECK(env->has_run_bootstrapping_code()); if (property->IsString()) { env->env_vars()->Delete(env->isolate(), property.As()); + + if (env->options()->trace_env) { + Utf8Value key_utf8(env->isolate(), property.As()); + fprintf(stderr, + "[--trace-env] delete environment variable \"%.*s\"\n", + static_cast(key_utf8.length()), + key_utf8.out()); + PrintTraceEnvStack(env); + } } // process.env never has non-configurable properties, so always @@ -423,6 +475,12 @@ static void EnvEnumerator(const PropertyCallbackInfo& info) { Environment* env = Environment::GetCurrent(info); CHECK(env->has_run_bootstrapping_code()); + if (env->options()->trace_env) { + fprintf(stderr, "[--trace-env] enumerate environment variables\n"); + + PrintTraceEnvStack(env); + } + info.GetReturnValue().Set( env->env_vars()->Enumerate(env->isolate())); } diff --git a/src/node_http2.cc b/src/node_http2.cc index 593c8b5f07a2a4..888f70bd4df8a3 100644 --- a/src/node_http2.cc +++ b/src/node_http2.cc @@ -803,13 +803,15 @@ void Http2Session::Close(uint32_t code, bool socket_closed) { CHECK_EQ(nghttp2_session_terminate_session(session_.get(), code), 0); SendPendingData(); } else if (stream_ != nullptr) { + // so that the previous listener of the socket, typically, JS code of a + // (tls) socket will be notified of any activity later stream_->RemoveStreamListener(this); } set_destroyed(); // If we are writing we will get to make the callback in OnStreamAfterWrite. - if (!is_write_in_progress()) { + if (!is_write_in_progress() || !stream_) { Debug(this, "make done session callback"); HandleScope scope(env()->isolate()); MakeCallback(env()->ondone_string(), 0, nullptr); diff --git a/src/node_internals.h b/src/node_internals.h index 85b666e11f5654..000ba16303740d 100644 --- a/src/node_internals.h +++ b/src/node_internals.h @@ -321,11 +321,12 @@ class ThreadPoolWork { #endif // defined(__POSIX__) && !defined(__ANDROID__) && !defined(__CloudABI__) namespace credentials { -bool SafeGetenv(const char* key, - std::string* text, - std::shared_ptr env_vars = nullptr); +bool SafeGetenv(const char* key, std::string* text, Environment* env = nullptr); } // namespace credentials +void PrintTraceEnvStack(Environment* env); +void PrintTraceEnvStack(std::shared_ptr options); + void DefineZlibConstants(v8::Local target); v8::Isolate* NewIsolate(v8::Isolate::CreateParams* params, uv_loop_t* event_loop, diff --git a/src/node_options.cc b/src/node_options.cc index dbdec4781a86de..8d39925ff524c8 100644 --- a/src/node_options.cc +++ b/src/node_options.cc @@ -113,13 +113,6 @@ void EnvironmentOptions::CheckOptions(std::vector* errors, } } - if (!type.empty()) { - if (type != "commonjs" && type != "module") { - errors->push_back("--experimental-default-type must be " - "\"module\" or \"commonjs\""); - } - } - if (syntax_check_only && has_eval_string) { errors->push_back("either --check or --eval can be used, not both"); } @@ -427,7 +420,8 @@ EnvironmentOptionsParser::EnvironmentOptionsParser() { AddOption("--experimental-sqlite", "experimental node:sqlite module", &EnvironmentOptions::experimental_sqlite, - kAllowedInEnvvar); + kAllowedInEnvvar, + true); AddOption("--experimental-webstorage", "experimental Web Storage API", &EnvironmentOptions::experimental_webstorage, @@ -690,9 +684,7 @@ EnvironmentOptionsParser::EnvironmentOptionsParser() { AddOption("--experimental-test-module-mocks", "enable module mocking in the test runner", &EnvironmentOptions::test_runner_module_mocks); - AddOption("--experimental-test-snapshots", - "enable snapshot testing in the test runner", - &EnvironmentOptions::test_runner_snapshots); + AddOption("--experimental-test-snapshots", "", NoOp{}); AddOption("--test-name-pattern", "run tests whose name matches this regular expression", &EnvironmentOptions::test_name_pattern, @@ -760,10 +752,24 @@ EnvironmentOptionsParser::EnvironmentOptionsParser() { "show stack traces on promise initialization and resolution", &EnvironmentOptions::trace_promises, kAllowedInEnvvar); - AddOption("--experimental-default-type", - "set module system to use by default", - &EnvironmentOptions::type, + + AddOption("--trace-env", + "Print accesses to the environment variables", + &EnvironmentOptions::trace_env, + kAllowedInEnvvar); + Implies("--trace-env-js-stack", "--trace-env"); + Implies("--trace-env-native-stack", "--trace-env"); + AddOption("--trace-env-js-stack", + "Print accesses to the environment variables and the JavaScript " + "stack trace", + &EnvironmentOptions::trace_env_js_stack, kAllowedInEnvvar); + AddOption( + "--trace-env-native-stack", + "Print accesses to the environment variables and the native stack trace", + &EnvironmentOptions::trace_env_native_stack, + kAllowedInEnvvar); + AddOption("--extra-info-on-fatal-exception", "hide extra information on fatal exception that causes exit", &EnvironmentOptions::extra_info_on_fatal_exception, diff --git a/src/node_options.h b/src/node_options.h index a3521e9aea88b1..1b0adf32595fd3 100644 --- a/src/node_options.h +++ b/src/node_options.h @@ -123,7 +123,7 @@ class EnvironmentOptions : public Options { bool experimental_eventsource = false; bool experimental_fetch = true; bool experimental_websocket = true; - bool experimental_sqlite = false; + bool experimental_sqlite = true; bool experimental_webstorage = false; std::string localstorage_file; bool experimental_global_navigator = true; @@ -131,7 +131,6 @@ class EnvironmentOptions : public Options { bool experimental_wasm_modules = false; bool experimental_import_meta_resolve = false; std::string input_type; // Value of --input-type - std::string type; // Value of --experimental-default-type bool entry_is_url = false; bool experimental_permission = false; std::vector allow_fs_read; @@ -189,7 +188,6 @@ class EnvironmentOptions : public Options { uint64_t test_coverage_functions = 0; uint64_t test_coverage_lines = 0; bool test_runner_module_mocks = false; - bool test_runner_snapshots = false; bool test_runner_update_snapshots = false; std::vector test_name_pattern; std::vector test_reporter; @@ -209,6 +207,9 @@ class EnvironmentOptions : public Options { bool trace_uncaught = false; bool trace_warnings = false; bool trace_promises = false; + bool trace_env = false; + bool trace_env_js_stack = false; + bool trace_env_native_stack = false; bool extra_info_on_fatal_exception = true; std::string unhandled_rejections; std::vector userland_loaders; diff --git a/src/node_process.h b/src/node_process.h index d4f1c0d45dec50..461a92161ab2e5 100644 --- a/src/node_process.h +++ b/src/node_process.h @@ -24,25 +24,27 @@ void CreateEnvProxyTemplate(IsolateData* isolate_data); void RawDebug(const v8::FunctionCallbackInfo& args); v8::MaybeLocal ProcessEmit(Environment* env, - const char* event, + std::string_view event, v8::Local message); v8::Maybe ProcessEmitWarningGeneric(Environment* env, - const char* warning, - const char* type = nullptr, - const char* code = nullptr); + std::string_view warning, + std::string_view type = "", + std::string_view code = ""); template inline v8::Maybe ProcessEmitWarning(Environment* env, const char* fmt, Args&&... args); -v8::Maybe ProcessEmitWarningSync(Environment* env, const char* message); +v8::Maybe ProcessEmitWarningSync(Environment* env, + std::string_view message); v8::Maybe ProcessEmitExperimentalWarning(Environment* env, - const char* warning); -v8::Maybe ProcessEmitDeprecationWarning(Environment* env, - const char* warning, - const char* deprecation_code); + const std::string& warning); +v8::Maybe ProcessEmitDeprecationWarning( + Environment* env, + const std::string& warning, + std::string_view deprecation_code); v8::MaybeLocal CreateProcessObject(Realm* env); void PatchProcessObject(const v8::FunctionCallbackInfo& args); diff --git a/src/node_process_events.cc b/src/node_process_events.cc index 19774607830a93..128ad9fbb1f257 100644 --- a/src/node_process_events.cc +++ b/src/node_process_events.cc @@ -18,10 +18,11 @@ using v8::Object; using v8::String; using v8::Value; -Maybe ProcessEmitWarningSync(Environment* env, const char* message) { +Maybe ProcessEmitWarningSync(Environment* env, std::string_view message) { Isolate* isolate = env->isolate(); Local context = env->context(); - Local message_string = OneByteString(isolate, message); + Local message_string = + OneByteString(isolate, message.data(), message.size()); Local argv[] = {message_string}; Local emit_function = env->process_emit_warning_sync(); @@ -37,13 +38,14 @@ Maybe ProcessEmitWarningSync(Environment* env, const char* message) { } MaybeLocal ProcessEmit(Environment* env, - const char* event, + std::string_view event, Local message) { Isolate* isolate = env->isolate(); - Local event_string; - if (!String::NewFromOneByte(isolate, reinterpret_cast(event)) - .ToLocal(&event_string)) return MaybeLocal(); + Local event_string; + if (!ToV8Value(env->context(), event).ToLocal(&event_string)) { + return MaybeLocal(); + } Local process = env->process_object(); Local argv[] = {event_string, message}; @@ -51,10 +53,12 @@ MaybeLocal ProcessEmit(Environment* env, } Maybe ProcessEmitWarningGeneric(Environment* env, - const char* warning, - const char* type, - const char* code) { - if (!env->can_call_into_js()) return Just(false); + std::string_view warning, + std::string_view type, + std::string_view code) { + if (!env->can_call_into_js()) { + return Just(false); + } HandleScope handle_scope(env->isolate()); Context::Scope context_scope(env->context()); @@ -73,19 +77,16 @@ Maybe ProcessEmitWarningGeneric(Environment* env, // The caller has to be able to handle a failure anyway, so we might as well // do proper error checking for string creation. - if (!String::NewFromUtf8(env->isolate(), warning).ToLocal(&args[argc++])) + if (!ToV8Value(env->context(), warning).ToLocal(&args[argc++])) { return Nothing(); + } - if (type != nullptr) { - if (!String::NewFromOneByte(env->isolate(), - reinterpret_cast(type)) - .ToLocal(&args[argc++])) { + if (!type.empty()) { + if (!ToV8Value(env->context(), type).ToLocal(&args[argc++])) { return Nothing(); } - if (code != nullptr && - !String::NewFromOneByte(env->isolate(), - reinterpret_cast(code)) - .ToLocal(&args[argc++])) { + if (!code.empty() && + !ToV8Value(env->context(), code).ToLocal(&args[argc++])) { return Nothing(); } } @@ -100,13 +101,11 @@ Maybe ProcessEmitWarningGeneric(Environment* env, return Just(true); } - std::set experimental_warnings; Maybe ProcessEmitExperimentalWarning(Environment* env, - const char* warning) { - if (experimental_warnings.find(warning) != experimental_warnings.end()) - return Nothing(); + const std::string& warning) { + if (experimental_warnings.contains(warning)) return Nothing(); experimental_warnings.insert(warning); std::string message(warning); @@ -115,8 +114,8 @@ Maybe ProcessEmitExperimentalWarning(Environment* env, } Maybe ProcessEmitDeprecationWarning(Environment* env, - const char* warning, - const char* deprecation_code) { + const std::string& warning, + std::string_view deprecation_code) { return ProcessEmitWarningGeneric( env, warning, "DeprecationWarning", deprecation_code); } diff --git a/src/node_sockaddr-inl.h b/src/node_sockaddr-inl.h index e16a09b04c7d6f..87ff9b62268657 100644 --- a/src/node_sockaddr-inl.h +++ b/src/node_sockaddr-inl.h @@ -172,22 +172,9 @@ bool SocketAddress::operator!=(const SocketAddress& other) const { return !(*this == other); } -bool SocketAddress::operator<(const SocketAddress& other) const { - return compare(other) == CompareResult::LESS_THAN; -} - -bool SocketAddress::operator>(const SocketAddress& other) const { - return compare(other) == CompareResult::GREATER_THAN; -} - -bool SocketAddress::operator<=(const SocketAddress& other) const { - CompareResult c = compare(other); - return c == CompareResult::NOT_COMPARABLE ? false : - c <= CompareResult::SAME; -} - -bool SocketAddress::operator>=(const SocketAddress& other) const { - return compare(other) >= CompareResult::SAME; +std::partial_ordering SocketAddress::operator<=>( + const SocketAddress& other) const { + return compare(other); } template diff --git a/src/node_sockaddr.cc b/src/node_sockaddr.cc index e1572187437f1b..19fcc6b89ac145 100644 --- a/src/node_sockaddr.cc +++ b/src/node_sockaddr.cc @@ -154,9 +154,8 @@ bool is_match_ipv4_ipv6( sizeof(uint32_t)) == 0; } -SocketAddress::CompareResult compare_ipv4( - const SocketAddress& one, - const SocketAddress& two) { +std::partial_ordering compare_ipv4(const SocketAddress& one, + const SocketAddress& two) { const sockaddr_in* one_in = reinterpret_cast(one.data()); const sockaddr_in* two_in = @@ -165,31 +164,29 @@ SocketAddress::CompareResult compare_ipv4( const uint32_t s_addr_two = ntohl(two_in->sin_addr.s_addr); if (s_addr_one < s_addr_two) - return SocketAddress::CompareResult::LESS_THAN; + return std::partial_ordering::less; else if (s_addr_one == s_addr_two) - return SocketAddress::CompareResult::SAME; + return std::partial_ordering::equivalent; else - return SocketAddress::CompareResult::GREATER_THAN; + return std::partial_ordering::greater; } -SocketAddress::CompareResult compare_ipv6( - const SocketAddress& one, - const SocketAddress& two) { +std::partial_ordering compare_ipv6(const SocketAddress& one, + const SocketAddress& two) { const sockaddr_in6* one_in = reinterpret_cast(one.data()); const sockaddr_in6* two_in = reinterpret_cast(two.data()); int ret = memcmp(&one_in->sin6_addr, &two_in->sin6_addr, 16); if (ret < 0) - return SocketAddress::CompareResult::LESS_THAN; + return std::partial_ordering::less; else if (ret > 0) - return SocketAddress::CompareResult::GREATER_THAN; - return SocketAddress::CompareResult::SAME; + return std::partial_ordering::greater; + return std::partial_ordering::equivalent; } -SocketAddress::CompareResult compare_ipv4_ipv6( - const SocketAddress& ipv4, - const SocketAddress& ipv6) { +std::partial_ordering compare_ipv4_ipv6(const SocketAddress& ipv4, + const SocketAddress& ipv6) { const sockaddr_in* ipv4_in = reinterpret_cast(ipv4.data()); const sockaddr_in6 * ipv6_in = @@ -199,7 +196,7 @@ SocketAddress::CompareResult compare_ipv4_ipv6( reinterpret_cast(&ipv6_in->sin6_addr); if (memcmp(ptr, mask, sizeof(mask)) != 0) - return SocketAddress::CompareResult::NOT_COMPARABLE; + return std::partial_ordering::unordered; int ret = memcmp( &ipv4_in->sin_addr, @@ -207,10 +204,10 @@ SocketAddress::CompareResult compare_ipv4_ipv6( sizeof(uint32_t)); if (ret < 0) - return SocketAddress::CompareResult::LESS_THAN; + return std::partial_ordering::less; else if (ret > 0) - return SocketAddress::CompareResult::GREATER_THAN; - return SocketAddress::CompareResult::SAME; + return std::partial_ordering::greater; + return std::partial_ordering::equivalent; } bool in_network_ipv4( @@ -235,7 +232,7 @@ bool in_network_ipv6( // Special case, if prefix == 128, then just do a // straight comparison. if (prefix == 128) - return compare_ipv6(ip, net) == SocketAddress::CompareResult::SAME; + return compare_ipv6(ip, net) == std::partial_ordering::equivalent; uint8_t r = prefix % 8; int len = (prefix - r) / 8; @@ -263,7 +260,7 @@ bool in_network_ipv4_ipv6( int prefix) { if (prefix == 128) - return compare_ipv4_ipv6(ip, net) == SocketAddress::CompareResult::SAME; + return compare_ipv4_ipv6(ip, net) == std::partial_ordering::equivalent; uint8_t r = prefix % 8; int len = (prefix - r) / 8; @@ -293,7 +290,7 @@ bool in_network_ipv6_ipv4( const SocketAddress& net, int prefix) { if (prefix == 32) - return compare_ipv4_ipv6(net, ip) == SocketAddress::CompareResult::SAME; + return compare_ipv4_ipv6(net, ip) == std::partial_ordering::equivalent; uint32_t m = ((1ull << prefix) - 1) << (32 - prefix); @@ -337,8 +334,7 @@ bool SocketAddress::is_match(const SocketAddress& other) const { return false; } -SocketAddress::CompareResult SocketAddress::compare( - const SocketAddress& other) const { +std::partial_ordering SocketAddress::compare(const SocketAddress& other) const { switch (family()) { case AF_INET: switch (other.family()) { @@ -349,16 +345,15 @@ SocketAddress::CompareResult SocketAddress::compare( case AF_INET6: switch (other.family()) { case AF_INET: { - CompareResult c = compare_ipv4_ipv6(other, *this); - switch (c) { - case SocketAddress::CompareResult::NOT_COMPARABLE: - // Fall through - case SocketAddress::CompareResult::SAME: - return c; - case SocketAddress::CompareResult::GREATER_THAN: - return SocketAddress::CompareResult::LESS_THAN; - case SocketAddress::CompareResult::LESS_THAN: - return SocketAddress::CompareResult::GREATER_THAN; + auto c = compare_ipv4_ipv6(other, *this); + if (c == std::partial_ordering::unordered) { + return std::partial_ordering::unordered; + } else if (c == std::partial_ordering::equivalent) { + return std::partial_ordering::equivalent; + } else if (c == std::partial_ordering::less) { + return std::partial_ordering::greater; + } else if (c == std::partial_ordering::greater) { + return std::partial_ordering::less; } break; } @@ -366,7 +361,7 @@ SocketAddress::CompareResult SocketAddress::compare( } break; } - return SocketAddress::CompareResult::NOT_COMPARABLE; + return std::partial_ordering::unordered; } bool SocketAddress::is_in_network( diff --git a/src/node_sockaddr.h b/src/node_sockaddr.h index 84aa3adf5fc72a..b822e186969917 100644 --- a/src/node_sockaddr.h +++ b/src/node_sockaddr.h @@ -11,9 +11,10 @@ #include "uv.h" #include "v8.h" +#include +#include #include #include -#include #include namespace node { @@ -22,13 +23,6 @@ class Environment; class SocketAddress : public MemoryRetainer { public: - enum class CompareResult { - NOT_COMPARABLE = -2, - LESS_THAN, - SAME, - GREATER_THAN - }; - struct Hash { size_t operator()(const SocketAddress& addr) const; }; @@ -36,10 +30,7 @@ class SocketAddress : public MemoryRetainer { inline bool operator==(const SocketAddress& other) const; inline bool operator!=(const SocketAddress& other) const; - inline bool operator<(const SocketAddress& other) const; - inline bool operator>(const SocketAddress& other) const; - inline bool operator<=(const SocketAddress& other) const; - inline bool operator>=(const SocketAddress& other) const; + inline std::partial_ordering operator<=>(const SocketAddress& other) const; inline static bool is_numeric_host(const char* hostname); inline static bool is_numeric_host(const char* hostname, int family); @@ -102,7 +93,7 @@ class SocketAddress : public MemoryRetainer { bool is_match(const SocketAddress& other) const; // Compares this SocketAddress to the given other SocketAddress. - CompareResult compare(const SocketAddress& other) const; + std::partial_ordering compare(const SocketAddress& other) const; // Returns true if this SocketAddress is within the subnet // identified by the given network address and CIDR prefix. diff --git a/src/node_sqlite.cc b/src/node_sqlite.cc index e138d120197e33..dd78f4e6dacf59 100644 --- a/src/node_sqlite.cc +++ b/src/node_sqlite.cc @@ -22,6 +22,7 @@ using v8::ConstructorBehavior; using v8::Context; using v8::DontDelete; using v8::Exception; +using v8::External; using v8::Function; using v8::FunctionCallback; using v8::FunctionCallbackInfo; @@ -790,6 +791,180 @@ void StatementSync::All(const FunctionCallbackInfo& args) { args.GetReturnValue().Set(Array::New(isolate, rows.data(), rows.size())); } +void StatementSync::IterateReturnCallback( + const FunctionCallbackInfo& args) { + Environment* env = Environment::GetCurrent(args); + auto isolate = env->isolate(); + auto context = isolate->GetCurrentContext(); + + auto self = args.This(); + // iterator has fetch all result or break, prevent next func to return result + self->Set(context, env->isfinished_string(), Boolean::New(isolate, true)) + .ToChecked(); + + auto external_stmt = Local::Cast( + self->Get(context, env->statement_string()).ToLocalChecked()); + auto stmt = static_cast(external_stmt->Value()); + if (!stmt->IsFinalized()) { + sqlite3_reset(stmt->statement_); + } + + LocalVector keys(isolate, {env->done_string(), env->value_string()}); + LocalVector values(isolate, + {Boolean::New(isolate, true), Null(isolate)}); + + DCHECK_EQ(keys.size(), values.size()); + Local result = Object::New( + isolate, Null(isolate), keys.data(), values.data(), keys.size()); + args.GetReturnValue().Set(result); +} + +void StatementSync::IterateNextCallback( + const FunctionCallbackInfo& args) { + Environment* env = Environment::GetCurrent(args); + auto isolate = env->isolate(); + auto context = isolate->GetCurrentContext(); + + auto self = args.This(); + + // skip iteration if is_finished + auto is_finished = Local::Cast( + self->Get(context, env->isfinished_string()).ToLocalChecked()); + if (is_finished->Value()) { + LocalVector keys(isolate, {env->done_string(), env->value_string()}); + LocalVector values(isolate, + {Boolean::New(isolate, true), Null(isolate)}); + + DCHECK_EQ(keys.size(), values.size()); + Local result = Object::New( + isolate, Null(isolate), keys.data(), values.data(), keys.size()); + args.GetReturnValue().Set(result); + return; + } + + auto external_stmt = Local::Cast( + self->Get(context, env->statement_string()).ToLocalChecked()); + auto stmt = static_cast(external_stmt->Value()); + auto num_cols = + Local::Cast( + self->Get(context, env->num_cols_string()).ToLocalChecked()) + ->Value(); + + THROW_AND_RETURN_ON_BAD_STATE( + env, stmt->IsFinalized(), "statement has been finalized"); + + int r = sqlite3_step(stmt->statement_); + if (r != SQLITE_ROW) { + CHECK_ERROR_OR_THROW( + env->isolate(), stmt->db_->Connection(), r, SQLITE_DONE, void()); + + // cleanup when no more rows to fetch + sqlite3_reset(stmt->statement_); + self->Set(context, env->isfinished_string(), Boolean::New(isolate, true)) + .ToChecked(); + + LocalVector keys(isolate, {env->done_string(), env->value_string()}); + LocalVector values(isolate, + {Boolean::New(isolate, true), Null(isolate)}); + + DCHECK_EQ(keys.size(), values.size()); + Local result = Object::New( + isolate, Null(isolate), keys.data(), values.data(), keys.size()); + args.GetReturnValue().Set(result); + return; + } + + LocalVector row_keys(isolate); + row_keys.reserve(num_cols); + LocalVector row_values(isolate); + row_values.reserve(num_cols); + for (int i = 0; i < num_cols; ++i) { + Local key; + if (!stmt->ColumnNameToName(i).ToLocal(&key)) return; + Local val; + if (!stmt->ColumnToValue(i).ToLocal(&val)) return; + row_keys.emplace_back(key); + row_values.emplace_back(val); + } + + Local row = Object::New( + isolate, Null(isolate), row_keys.data(), row_values.data(), num_cols); + + LocalVector keys(isolate, {env->done_string(), env->value_string()}); + LocalVector values(isolate, {Boolean::New(isolate, false), row}); + + DCHECK_EQ(keys.size(), values.size()); + Local result = Object::New( + isolate, Null(isolate), keys.data(), values.data(), keys.size()); + args.GetReturnValue().Set(result); +} + +void StatementSync::Iterate(const FunctionCallbackInfo& args) { + StatementSync* stmt; + ASSIGN_OR_RETURN_UNWRAP(&stmt, args.This()); + Environment* env = Environment::GetCurrent(args); + THROW_AND_RETURN_ON_BAD_STATE( + env, stmt->IsFinalized(), "statement has been finalized"); + auto isolate = env->isolate(); + auto context = env->context(); + int r = sqlite3_reset(stmt->statement_); + CHECK_ERROR_OR_THROW( + env->isolate(), stmt->db_->Connection(), r, SQLITE_OK, void()); + + if (!stmt->BindParams(args)) { + return; + } + + Local next_func = + Function::New(context, StatementSync::IterateNextCallback) + .ToLocalChecked(); + Local return_func = + Function::New(context, StatementSync::IterateReturnCallback) + .ToLocalChecked(); + + LocalVector keys(isolate, {env->next_string(), env->return_string()}); + LocalVector values(isolate, {next_func, return_func}); + + Local global = context->Global(); + Local js_iterator; + Local js_iterator_prototype; + if (!global->Get(context, env->iterator_string()).ToLocal(&js_iterator)) + return; + if (!js_iterator.As() + ->Get(context, env->prototype_string()) + .ToLocal(&js_iterator_prototype)) + return; + + DCHECK_EQ(keys.size(), values.size()); + Local iterable_iterator = Object::New( + isolate, js_iterator_prototype, keys.data(), values.data(), keys.size()); + + auto num_cols_pd = v8::PropertyDescriptor( + v8::Integer::New(isolate, sqlite3_column_count(stmt->statement_)), false); + num_cols_pd.set_enumerable(false); + num_cols_pd.set_configurable(false); + iterable_iterator + ->DefineProperty(context, env->num_cols_string(), num_cols_pd) + .ToChecked(); + + auto stmt_pd = + v8::PropertyDescriptor(v8::External::New(isolate, stmt), false); + stmt_pd.set_enumerable(false); + stmt_pd.set_configurable(false); + iterable_iterator->DefineProperty(context, env->statement_string(), stmt_pd) + .ToChecked(); + + auto is_finished_pd = + v8::PropertyDescriptor(v8::Boolean::New(isolate, false), true); + stmt_pd.set_enumerable(false); + stmt_pd.set_configurable(false); + iterable_iterator + ->DefineProperty(context, env->isfinished_string(), is_finished_pd) + .ToChecked(); + + args.GetReturnValue().Set(iterable_iterator); +} + void StatementSync::Get(const FunctionCallbackInfo& args) { StatementSync* stmt; ASSIGN_OR_RETURN_UNWRAP(&stmt, args.This()); @@ -987,6 +1162,7 @@ Local StatementSync::GetConstructorTemplate( tmpl->SetClassName(FIXED_ONE_BYTE_STRING(isolate, "StatementSync")); tmpl->InstanceTemplate()->SetInternalFieldCount( StatementSync::kInternalFieldCount); + SetProtoMethod(isolate, tmpl, "iterate", StatementSync::Iterate); SetProtoMethod(isolate, tmpl, "all", StatementSync::All); SetProtoMethod(isolate, tmpl, "get", StatementSync::Get); SetProtoMethod(isolate, tmpl, "run", StatementSync::Run); diff --git a/src/node_sqlite.h b/src/node_sqlite.h index d778d28e9ac289..256be0c6df9d40 100644 --- a/src/node_sqlite.h +++ b/src/node_sqlite.h @@ -93,6 +93,7 @@ class StatementSync : public BaseObject { DatabaseSync* db, sqlite3_stmt* stmt); static void All(const v8::FunctionCallbackInfo& args); + static void Iterate(const v8::FunctionCallbackInfo& args); static void Get(const v8::FunctionCallbackInfo& args); static void Run(const v8::FunctionCallbackInfo& args); static void SourceSQLGetter(const v8::FunctionCallbackInfo& args); @@ -118,6 +119,11 @@ class StatementSync : public BaseObject { bool BindValue(const v8::Local& value, const int index); v8::MaybeLocal ColumnToValue(const int column); v8::MaybeLocal ColumnNameToName(const int column); + + static void IterateNextCallback( + const v8::FunctionCallbackInfo& args); + static void IterateReturnCallback( + const v8::FunctionCallbackInfo& args); }; using Sqlite3ChangesetGenFunc = int (*)(sqlite3_session*, int*, void**); diff --git a/src/node_version.h b/src/node_version.h index 190b8a1f29d502..c6f24c00af8cf3 100644 --- a/src/node_version.h +++ b/src/node_version.h @@ -23,13 +23,13 @@ #define SRC_NODE_VERSION_H_ #define NODE_MAJOR_VERSION 23 -#define NODE_MINOR_VERSION 3 -#define NODE_PATCH_VERSION 1 +#define NODE_MINOR_VERSION 4 +#define NODE_PATCH_VERSION 0 #define NODE_VERSION_IS_LTS 0 #define NODE_VERSION_LTS_CODENAME "" -#define NODE_VERSION_IS_RELEASE 0 +#define NODE_VERSION_IS_RELEASE 1 #ifndef NODE_STRINGIFY #define NODE_STRINGIFY(n) NODE_STRINGIFY_HELPER(n) diff --git a/src/path.cc b/src/path.cc index 47ddba31443644..1f88e38a857171 100644 --- a/src/path.cc +++ b/src/path.cc @@ -114,7 +114,7 @@ std::string PathResolve(Environment* env, // a UNC path at this points, because UNC paths are always absolute. std::string resolvedDevicePath; const std::string envvar = "=" + resolvedDevice; - credentials::SafeGetenv(envvar.c_str(), &resolvedDevicePath); + credentials::SafeGetenv(envvar.c_str(), &resolvedDevicePath, env); path = resolvedDevicePath.empty() ? cwd : resolvedDevicePath; // Verify that a cwd was found and that it actually points diff --git a/src/process_wrap.cc b/src/process_wrap.cc index 14c9e99934e27b..27a294eb384960 100644 --- a/src/process_wrap.cc +++ b/src/process_wrap.cc @@ -314,6 +314,12 @@ class ProcessWrap : public HandleWrap { ProcessWrap* wrap; ASSIGN_OR_RETURN_UNWRAP(&wrap, args.This()); int signal = args[0]->Int32Value(env->context()).FromJust(); +#ifdef _WIN32 + if (signal != SIGKILL && signal != SIGTERM && signal != SIGINT && + signal != SIGQUIT) { + signal = SIGKILL; + } +#endif int err = uv_process_kill(&wrap->process_, signal); args.GetReturnValue().Set(err); } diff --git a/src/quic/session.cc b/src/quic/session.cc index b98ce4a132af60..4323c9268fdac2 100644 --- a/src/quic/session.cc +++ b/src/quic/session.cc @@ -1531,7 +1531,7 @@ void Session::EmitDatagram(Store&& datagram, DatagramReceivedFlags flag) { DCHECK(!is_destroyed()); if (!env()->can_call_into_js()) return; - CallbackScope cbv_scope(this); + CallbackScope cbv_scope(this); Local argv[] = {datagram.ToUint8Array(env()), v8::Boolean::New(env()->isolate(), flag.early)}; diff --git a/src/util.cc b/src/util.cc index a372eb4d88ca1d..8bf239db6e47d4 100644 --- a/src/util.cc +++ b/src/util.cc @@ -782,6 +782,16 @@ std::string DetermineSpecificErrorType(Environment* env, input.As()->GetConstructorName(); Utf8Value name(env->isolate(), constructor_name); return SPrintF("an instance of %s", name.out()); + } else if (input->IsSymbol()) { + v8::MaybeLocal str = + input.As()->ToDetailString(env->context()); + v8::Local js_str; + if (!str.ToLocal(&js_str)) { + return "Symbol"; + } + Utf8Value name(env->isolate(), js_str); + // Symbol(xxx) + return name.out(); } Utf8Value utf8_value(env->isolate(), diff --git a/src/zlib_version.h b/src/zlib_version.h index f9db56d4f51d37..3b53884aae2206 100644 --- a/src/zlib_version.h +++ b/src/zlib_version.h @@ -2,5 +2,5 @@ // Refer to tools/dep_updaters/update-zlib.sh #ifndef SRC_ZLIB_VERSION_H_ #define SRC_ZLIB_VERSION_H_ -#define ZLIB_VERSION "1.3.0.1-motley-71660e1" +#define ZLIB_VERSION "1.3.0.1-motley-82a5fec" #endif // SRC_ZLIB_VERSION_H_ diff --git a/test/cctest/test_encoding_binding.cc b/test/cctest/test_encoding_binding.cc new file mode 100644 index 00000000000000..06cc36d8f6ae34 --- /dev/null +++ b/test/cctest/test_encoding_binding.cc @@ -0,0 +1,155 @@ +#include "encoding_binding.h" +#include "env-inl.h" +#include "gtest/gtest.h" +#include "node_test_fixture.h" +#include "v8.h" + +namespace node { +namespace encoding_binding { + +bool RunDecodeLatin1(Environment* env, + Local args[], + bool ignore_bom, + bool has_fatal, + Local* result) { + Isolate* isolate = env->isolate(); + TryCatch try_catch(isolate); + + Local ignoreBOMValue = Boolean::New(isolate, ignore_bom); + Local fatalValue = Boolean::New(isolate, has_fatal); + + Local updatedArgs[] = {args[0], ignoreBOMValue, fatalValue}; + + BindingData::DecodeLatin1(FunctionCallbackInfo(updatedArgs)); + + if (try_catch.HasCaught()) { + return false; + } + + *result = try_catch.Exception(); + return true; +} + +class EncodingBindingTest : public NodeTestFixture {}; + +TEST_F(EncodingBindingTest, DecodeLatin1_ValidInput) { + Environment* env = CreateEnvironment(); + Isolate* isolate = env->isolate(); + HandleScope handle_scope(isolate); + + const uint8_t latin1_data[] = {0xC1, 0xE9, 0xF3}; + Local ab = ArrayBuffer::New(isolate, sizeof(latin1_data)); + memcpy(ab->GetBackingStore()->Data(), latin1_data, sizeof(latin1_data)); + + Local array = Uint8Array::New(ab, 0, sizeof(latin1_data)); + Local args[] = {array}; + + Local result; + EXPECT_TRUE(RunDecodeLatin1(env, args, false, false, &result)); + + String::Utf8Value utf8_result(isolate, result); + EXPECT_STREQ(*utf8_result, "Áéó"); +} + +TEST_F(EncodingBindingTest, DecodeLatin1_EmptyInput) { + Environment* env = CreateEnvironment(); + Isolate* isolate = env->isolate(); + HandleScope handle_scope(isolate); + + Local ab = ArrayBuffer::New(isolate, 0); + Local array = Uint8Array::New(ab, 0, 0); + Local args[] = {array}; + + Local result; + EXPECT_TRUE(RunDecodeLatin1(env, args, false, false, &result)); + + String::Utf8Value utf8_result(isolate, result); + EXPECT_STREQ(*utf8_result, ""); +} + +TEST_F(EncodingBindingTest, DecodeLatin1_InvalidInput) { + Environment* env = CreateEnvironment(); + Isolate* isolate = env->isolate(); + HandleScope handle_scope(isolate); + + Local args[] = {String::NewFromUtf8Literal(isolate, "Invalid input")}; + + Local result; + EXPECT_FALSE(RunDecodeLatin1(env, args, false, false, &result)); +} + +TEST_F(EncodingBindingTest, DecodeLatin1_IgnoreBOM) { + Environment* env = CreateEnvironment(); + Isolate* isolate = env->isolate(); + HandleScope handle_scope(isolate); + + const uint8_t latin1_data[] = {0xFE, 0xFF, 0xC1, 0xE9, 0xF3}; + Local ab = ArrayBuffer::New(isolate, sizeof(latin1_data)); + memcpy(ab->GetBackingStore()->Data(), latin1_data, sizeof(latin1_data)); + + Local array = Uint8Array::New(ab, 0, sizeof(latin1_data)); + Local args[] = {array}; + + Local result; + EXPECT_TRUE(RunDecodeLatin1(env, args, true, false, &result)); + + String::Utf8Value utf8_result(isolate, result); + EXPECT_STREQ(*utf8_result, "Áéó"); +} + +TEST_F(EncodingBindingTest, DecodeLatin1_FatalInvalidInput) { + Environment* env = CreateEnvironment(); + Isolate* isolate = env->isolate(); + HandleScope handle_scope(isolate); + + const uint8_t invalid_data[] = {0xFF, 0xFF, 0xFF}; + Local ab = ArrayBuffer::New(isolate, sizeof(invalid_data)); + memcpy(ab->GetBackingStore()->Data(), invalid_data, sizeof(invalid_data)); + + Local array = Uint8Array::New(ab, 0, sizeof(invalid_data)); + Local args[] = {array}; + + Local result; + EXPECT_FALSE(RunDecodeLatin1(env, args, false, true, &result)); +} + +TEST_F(EncodingBindingTest, DecodeLatin1_IgnoreBOMAndFatal) { + Environment* env = CreateEnvironment(); + Isolate* isolate = env->isolate(); + HandleScope handle_scope(isolate); + + const uint8_t latin1_data[] = {0xFE, 0xFF, 0xC1, 0xE9, 0xF3}; + Local ab = ArrayBuffer::New(isolate, sizeof(latin1_data)); + memcpy(ab->GetBackingStore()->Data(), latin1_data, sizeof(latin1_data)); + + Local array = Uint8Array::New(ab, 0, sizeof(latin1_data)); + Local args[] = {array}; + + Local result; + EXPECT_TRUE(RunDecodeLatin1(env, args, true, true, &result)); + + String::Utf8Value utf8_result(isolate, result); + EXPECT_STREQ(*utf8_result, "Áéó"); +} + +TEST_F(EncodingBindingTest, DecodeLatin1_BOMPresent) { + Environment* env = CreateEnvironment(); + Isolate* isolate = env->isolate(); + HandleScope handle_scope(isolate); + + const uint8_t latin1_data[] = {0xFF, 0xC1, 0xE9, 0xF3}; + Local ab = ArrayBuffer::New(isolate, sizeof(latin1_data)); + memcpy(ab->GetBackingStore()->Data(), latin1_data, sizeof(latin1_data)); + + Local array = Uint8Array::New(ab, 0, sizeof(latin1_data)); + Local args[] = {array}; + + Local result; + EXPECT_TRUE(RunDecodeLatin1(env, args, true, false, &result)); + + String::Utf8Value utf8_result(isolate, result); + EXPECT_STREQ(*utf8_result, "Áéó"); +} + +} // namespace encoding_binding +} // namespace node diff --git a/test/cctest/test_sockaddr.cc b/test/cctest/test_sockaddr.cc index bd80d59f821e5d..68b8739f97e1fc 100644 --- a/test/cctest/test_sockaddr.cc +++ b/test/cctest/test_sockaddr.cc @@ -145,9 +145,9 @@ TEST(SocketAddress, Comparison) { SocketAddress addr5(reinterpret_cast(&storage[4])); SocketAddress addr6(reinterpret_cast(&storage[5])); - CHECK_EQ(addr1.compare(addr1), SocketAddress::CompareResult::SAME); - CHECK_EQ(addr1.compare(addr2), SocketAddress::CompareResult::LESS_THAN); - CHECK_EQ(addr2.compare(addr1), SocketAddress::CompareResult::GREATER_THAN); + CHECK_EQ(addr1.compare(addr1), std::partial_ordering::equivalent); + CHECK_EQ(addr1.compare(addr2), std::partial_ordering::less); + CHECK_EQ(addr2.compare(addr1), std::partial_ordering::greater); CHECK(addr1 <= addr1); CHECK(addr1 < addr2); CHECK(addr1 <= addr2); @@ -155,9 +155,9 @@ TEST(SocketAddress, Comparison) { CHECK(addr2 > addr1); CHECK(addr2 >= addr1); - CHECK_EQ(addr3.compare(addr3), SocketAddress::CompareResult::SAME); - CHECK_EQ(addr3.compare(addr4), SocketAddress::CompareResult::LESS_THAN); - CHECK_EQ(addr4.compare(addr3), SocketAddress::CompareResult::GREATER_THAN); + CHECK_EQ(addr3.compare(addr3), std::partial_ordering::equivalent); + CHECK_EQ(addr3.compare(addr4), std::partial_ordering::less); + CHECK_EQ(addr4.compare(addr3), std::partial_ordering::greater); CHECK(addr3 <= addr3); CHECK(addr3 < addr4); CHECK(addr3 <= addr4); @@ -166,8 +166,8 @@ TEST(SocketAddress, Comparison) { CHECK(addr4 >= addr3); // Not comparable - CHECK_EQ(addr1.compare(addr3), SocketAddress::CompareResult::NOT_COMPARABLE); - CHECK_EQ(addr3.compare(addr1), SocketAddress::CompareResult::NOT_COMPARABLE); + CHECK_EQ(addr1.compare(addr3), std::partial_ordering::unordered); + CHECK_EQ(addr3.compare(addr1), std::partial_ordering::unordered); CHECK(!(addr1 < addr3)); CHECK(!(addr1 > addr3)); CHECK(!(addr1 >= addr3)); @@ -178,10 +178,10 @@ TEST(SocketAddress, Comparison) { CHECK(!(addr3 <= addr1)); // Comparable - CHECK_EQ(addr1.compare(addr5), SocketAddress::CompareResult::SAME); - CHECK_EQ(addr2.compare(addr6), SocketAddress::CompareResult::SAME); - CHECK_EQ(addr1.compare(addr6), SocketAddress::CompareResult::LESS_THAN); - CHECK_EQ(addr6.compare(addr1), SocketAddress::CompareResult::GREATER_THAN); + CHECK_EQ(addr1.compare(addr5), std::partial_ordering::equivalent); + CHECK_EQ(addr2.compare(addr6), std::partial_ordering::equivalent); + CHECK_EQ(addr1.compare(addr6), std::partial_ordering::less); + CHECK_EQ(addr6.compare(addr1), std::partial_ordering::greater); CHECK(addr1 <= addr5); CHECK(addr1 <= addr6); CHECK(addr1 < addr6); diff --git a/test/es-module/test-esm-detect-ambiguous.mjs b/test/es-module/test-esm-detect-ambiguous.mjs index 8da2fea8022a63..8b630b8cff6314 100644 --- a/test/es-module/test-esm-detect-ambiguous.mjs +++ b/test/es-module/test-esm-detect-ambiguous.mjs @@ -52,19 +52,6 @@ describe('Module syntax detection', { concurrency: !process.env.TEST_PARALLEL }, strictEqual(signal, null); }); - it('should be overridden by --experimental-default-type', async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=commonjs', - '--eval', - 'import.meta.url', - ]); - - match(stderr, /SyntaxError: Cannot use 'import\.meta' outside a module/); - strictEqual(stdout, ''); - strictEqual(code, 1); - strictEqual(signal, null); - }); - it('does not trigger detection via source code `eval()`', async () => { const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ '--eval', diff --git a/test/es-module/test-esm-extensionless-esm-and-wasm.mjs b/test/es-module/test-esm-extensionless-esm-and-wasm.mjs index 17786b61291ab6..41fda57cfa4e30 100644 --- a/test/es-module/test-esm-extensionless-esm-and-wasm.mjs +++ b/test/es-module/test-esm-extensionless-esm-and-wasm.mjs @@ -77,7 +77,6 @@ describe('extensionless ES modules within no package scope', { concurrency: !pro }); describe('extensionless Wasm within no package scope', { concurrency: !process.env.TEST_PARALLEL }, () => { - // This succeeds with `--experimental-default-type=module` it('should error as the entry point', async () => { const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ '--experimental-wasm-modules', diff --git a/test/es-module/test-esm-type-flag-cli-entry.mjs b/test/es-module/test-esm-type-flag-cli-entry.mjs deleted file mode 100644 index 673a5f2da15ae0..00000000000000 --- a/test/es-module/test-esm-type-flag-cli-entry.mjs +++ /dev/null @@ -1,96 +0,0 @@ -import { spawnPromisified } from '../common/index.mjs'; -import * as fixtures from '../common/fixtures.mjs'; -import { describe, it } from 'node:test'; -import { match, strictEqual } from 'node:assert'; - -describe('--experimental-default-type=module should not support extension searching', { - concurrency: !process.env.TEST_PARALLEL, -}, () => { - it('should support extension searching under --experimental-default-type=commonjs', async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=commonjs', - 'index', - ], { - cwd: fixtures.path('es-modules/package-without-type'), - }); - - strictEqual(stdout, 'package-without-type\n'); - strictEqual(stderr, ''); - strictEqual(code, 0); - strictEqual(signal, null); - }); - - it('should error with implicit extension under --experimental-default-type=module', async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - 'index', - ], { - cwd: fixtures.path('es-modules/package-without-type'), - }); - - match(stderr, /ENOENT.*Did you mean to import .*index\.js"\?/s); - strictEqual(stdout, ''); - strictEqual(code, 1); - strictEqual(signal, null); - }); -}); - -describe('--experimental-default-type=module should not parse paths as URLs', { - concurrency: !process.env.TEST_PARALLEL, -}, () => { - it('should not parse a `?` in a filename as starting a query string', async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - 'file#1.js', - ], { - cwd: fixtures.path('es-modules/package-without-type'), - }); - - strictEqual(stderr, ''); - strictEqual(stdout, 'file#1\n'); - strictEqual(code, 0); - strictEqual(signal, null); - }); - - it('should resolve `..`', async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - '../package-without-type/file#1.js', - ], { - cwd: fixtures.path('es-modules/package-without-type'), - }); - - strictEqual(stderr, ''); - strictEqual(stdout, 'file#1\n'); - strictEqual(code, 0); - strictEqual(signal, null); - }); - - it('should allow a leading `./`', async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - './file#1.js', - ], { - cwd: fixtures.path('es-modules/package-without-type'), - }); - - strictEqual(stderr, ''); - strictEqual(stdout, 'file#1\n'); - strictEqual(code, 0); - strictEqual(signal, null); - }); - - it('should not require a leading `./`', async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - 'file#1.js', - ], { - cwd: fixtures.path('es-modules/package-without-type'), - }); - - strictEqual(stderr, ''); - strictEqual(stdout, 'file#1\n'); - strictEqual(code, 0); - strictEqual(signal, null); - }); -}); diff --git a/test/es-module/test-esm-type-flag-errors.mjs b/test/es-module/test-esm-type-flag-errors.mjs deleted file mode 100644 index 2f7a1db35d2423..00000000000000 --- a/test/es-module/test-esm-type-flag-errors.mjs +++ /dev/null @@ -1,103 +0,0 @@ -import { spawnPromisified } from '../common/index.mjs'; -import * as fixtures from '../common/fixtures.mjs'; -import { describe, it } from 'node:test'; -import { deepStrictEqual, match, strictEqual } from 'node:assert'; - -describe('--experimental-default-type=module', { concurrency: !process.env.TEST_PARALLEL }, () => { - describe('should not affect the interpretation of files with unknown extensions', { - concurrency: !process.env.TEST_PARALLEL, - }, () => { - it('should error on an entry point with an unknown extension', async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - fixtures.path('es-modules/package-type-module/extension.unknown'), - ]); - - match(stderr, /ERR_UNKNOWN_FILE_EXTENSION/); - strictEqual(stdout, ''); - strictEqual(code, 1); - strictEqual(signal, null); - }); - - it('should error on an import with an unknown extension', async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - fixtures.path('es-modules/package-type-module/imports-unknownext.mjs'), - ]); - - match(stderr, /ERR_UNKNOWN_FILE_EXTENSION/); - strictEqual(stdout, ''); - strictEqual(code, 1); - strictEqual(signal, null); - }); - }); - - it('should affect CJS .js files (imported, required, entry points)', async () => { - const result = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - fixtures.path('es-modules/package-type-commonjs/echo-require-cache.js'), - ]); - - deepStrictEqual(result, { - code: 0, - stderr: '', - stdout: 'undefined\n', - signal: null, - }); - }); - - it('should affect .cjs files that are imported', async () => { - const result = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - '-e', - `import ${JSON.stringify(fixtures.fileURL('es-module-require-cache/echo.cjs'))}`, - ]); - - deepStrictEqual(result, { - code: 0, - stderr: '', - stdout: 'undefined\n', - signal: null, - }); - }); - - it('should affect entry point .cjs files (with no hooks)', async () => { - const { stderr, stdout, code } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - fixtures.path('es-module-require-cache/echo.cjs'), - ]); - - strictEqual(stderr, ''); - match(stdout, /^undefined\n$/); - strictEqual(code, 0); - }); - - it('should affect entry point .cjs files (when any hooks is registered)', async () => { - const result = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - '--import', - 'data:text/javascript,import{register}from"node:module";register("data:text/javascript,");', - fixtures.path('es-module-require-cache/echo.cjs'), - ]); - - deepStrictEqual(result, { - code: 0, - stderr: '', - stdout: 'undefined\n', - signal: null, - }); - }); - - it('should not affect CJS from input-type', async () => { - const { stderr, stdout, code } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - '--input-type=commonjs', - '-p', - 'require.cache', - ]); - - strictEqual(stderr, ''); - match(stdout, /^\[Object: null prototype\] \{\}\n$/); - strictEqual(code, 0); - }); -}); diff --git a/test/es-module/test-esm-type-flag-loose-files.mjs b/test/es-module/test-esm-type-flag-loose-files.mjs deleted file mode 100644 index afa306099d5e6a..00000000000000 --- a/test/es-module/test-esm-type-flag-loose-files.mjs +++ /dev/null @@ -1,75 +0,0 @@ -// Flags: --experimental-default-type=module --experimental-wasm-modules -import { spawnPromisified } from '../common/index.mjs'; -import * as fixtures from '../common/fixtures.mjs'; -import { describe, it } from 'node:test'; -import { strictEqual } from 'node:assert'; - -describe('the type flag should change the interpretation of certain files outside of any package scope', - { concurrency: !process.env.TEST_PARALLEL }, () => { - it('should run as ESM a .js file that is outside of any package scope', async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - fixtures.path('es-modules/loose.js'), - ]); - - strictEqual(stderr, ''); - strictEqual(stdout, 'executed\n'); - strictEqual(code, 0); - strictEqual(signal, null); - }); - - it('should run as ESM an extensionless JavaScript file that is outside of any package scope', async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - fixtures.path('es-modules/noext-esm'), - ]); - - strictEqual(stderr, ''); - strictEqual(stdout, 'executed\n'); - strictEqual(code, 0); - strictEqual(signal, null); - }); - - it('should run as Wasm an extensionless Wasm file that is outside of any package scope', async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - '--experimental-wasm-modules', - '--no-warnings', - fixtures.path('es-modules/noext-wasm'), - ]); - - strictEqual(stderr, ''); - strictEqual(stdout, ''); - strictEqual(code, 0); - strictEqual(signal, null); - }); - - it('should import as ESM a .js file that is outside of any package scope', async () => { - const { default: defaultExport } = await import(fixtures.fileURL('es-modules/loose.js')); - strictEqual(defaultExport, 'module'); - }); - - it('should import as ESM an extensionless JavaScript file that is outside of any package scope', - async () => { - const { default: defaultExport } = await import(fixtures.fileURL('es-modules/noext-esm')); - strictEqual(defaultExport, 'module'); - }); - - it('should import as Wasm an extensionless Wasm file that is outside of any package scope', async () => { - const { add } = await import(fixtures.fileURL('es-modules/noext-wasm')); - strictEqual(add(1, 2), 3); - }); - - it('should check as ESM input passed via --check', async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - '--check', - fixtures.path('es-modules/loose.js'), - ]); - - strictEqual(stderr, ''); - strictEqual(stdout, ''); - strictEqual(code, 0); - strictEqual(signal, null); - }); - }); diff --git a/test/es-module/test-esm-type-flag-package-scopes.mjs b/test/es-module/test-esm-type-flag-package-scopes.mjs deleted file mode 100644 index 18d7d04f53a25b..00000000000000 --- a/test/es-module/test-esm-type-flag-package-scopes.mjs +++ /dev/null @@ -1,167 +0,0 @@ -// Flags: --experimental-default-type=module --experimental-wasm-modules -import { spawnPromisified } from '../common/index.mjs'; -import * as fixtures from '../common/fixtures.mjs'; -import { describe, it } from 'node:test'; -import { strictEqual } from 'node:assert'; - -describe('the type flag should change the interpretation of certain files within a "type": "module" package scope', - { concurrency: !process.env.TEST_PARALLEL }, () => { - it('should run as ESM an extensionless JavaScript file within a "type": "module" scope', async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - fixtures.path('es-modules/package-type-module/noext-esm'), - ]); - - strictEqual(stderr, ''); - strictEqual(stdout, 'executed\n'); - strictEqual(code, 0); - strictEqual(signal, null); - }); - - it('should import an extensionless JavaScript file within a "type": "module" scope', async () => { - const { default: defaultExport } = - await import(fixtures.fileURL('es-modules/package-type-module/noext-esm')); - strictEqual(defaultExport, 'module'); - }); - - it('should import an extensionless JavaScript file within a "type": "module" scope under node_modules', - async () => { - const { default: defaultExport } = - await import(fixtures.fileURL( - 'es-modules/package-type-module/node_modules/dep-with-package-json-type-module/noext-esm')); - strictEqual(defaultExport, 'module'); - }); - - it('should run as Wasm an extensionless Wasm file within a "type": "module" scope', async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - '--experimental-wasm-modules', - '--no-warnings', - fixtures.path('es-modules/package-type-module/noext-wasm'), - ]); - - strictEqual(stderr, ''); - strictEqual(stdout, 'executed\n'); - strictEqual(code, 0); - strictEqual(signal, null); - }); - - it('should import as Wasm an extensionless Wasm file within a "type": "module" scope', async () => { - const { add } = await import(fixtures.fileURL('es-modules/package-type-module/noext-wasm')); - strictEqual(add(1, 2), 3); - }); - - it('should import an extensionless Wasm file within a "type": "module" scope under node_modules', - async () => { - const { add } = await import(fixtures.fileURL( - 'es-modules/package-type-module/node_modules/dep-with-package-json-type-module/noext-wasm')); - strictEqual(add(1, 2), 3); - }); - }); - -describe(`the type flag should change the interpretation of certain files within a package scope that lacks a -"type" field and is not under node_modules`, { concurrency: !process.env.TEST_PARALLEL }, () => { - it('should run as ESM a .js file within package scope that has no defined "type" and is not under node_modules', - async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - fixtures.path('es-modules/package-without-type/module.js'), - ]); - - strictEqual(stderr, ''); - strictEqual(stdout, 'executed\n'); - strictEqual(code, 0); - strictEqual(signal, null); - }); - - it(`should run as ESM an extensionless JavaScript file within a package scope that has no defined "type" and is not -under node_modules`, async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - fixtures.path('es-modules/package-without-type/noext-esm'), - ]); - - strictEqual(stderr, ''); - strictEqual(stdout, 'executed\n'); - strictEqual(code, 0); - strictEqual(signal, null); - }); - - it(`should run as Wasm an extensionless Wasm file within a package scope that has no defined "type" and is not under - node_modules`, async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - '--experimental-wasm-modules', - '--no-warnings', - fixtures.path('es-modules/noext-wasm'), - ]); - - strictEqual(stderr, ''); - strictEqual(stdout, ''); - strictEqual(code, 0); - strictEqual(signal, null); - }); - - it('should import as ESM a .js file within package scope that has no defined "type" and is not under node_modules', - async () => { - const { default: defaultExport } = await import(fixtures.fileURL('es-modules/package-without-type/module.js')); - strictEqual(defaultExport, 'module'); - }); - - it(`should import as ESM an extensionless JavaScript file within a package scope that has no defined "type" and is - not under node_modules`, async () => { - const { default: defaultExport } = await import(fixtures.fileURL('es-modules/package-without-type/noext-esm')); - strictEqual(defaultExport, 'module'); - }); - - it(`should import as Wasm an extensionless Wasm file within a package scope that has no defined "type" and is not - under node_modules`, async () => { - const { add } = await import(fixtures.fileURL('es-modules/noext-wasm')); - strictEqual(add(1, 2), 3); - }); -}); - -describe(`the type flag should NOT change the interpretation of certain files within a package scope that lacks a -"type" field and is under node_modules`, { concurrency: !process.env.TEST_PARALLEL }, () => { - it('should run as CommonJS a .js file within package scope that has no defined "type" and is under node_modules', - async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - fixtures.path('es-modules/package-type-module/node_modules/dep-with-package-json-without-type/run.js'), - ]); - - strictEqual(stderr, ''); - strictEqual(stdout, 'executed\n'); - strictEqual(code, 0); - strictEqual(signal, null); - }); - - it(`should import as CommonJS a .js file within a package scope that has no defined "type" and is under - node_modules`, async () => { - const { default: defaultExport } = - await import(fixtures.fileURL( - 'es-modules/package-type-module/node_modules/dep-with-package-json-without-type/run.js')); - strictEqual(defaultExport, 42); - }); - - it(`should run as CommonJS an extensionless JavaScript file within a package scope that has no defined "type" and is - under node_modules`, async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - fixtures.path('es-modules/package-type-module/node_modules/dep-with-package-json-without-type/noext-cjs'), - ]); - - strictEqual(stderr, ''); - strictEqual(stdout, 'executed\n'); - strictEqual(code, 0); - strictEqual(signal, null); - }); - - it(`should import as CommonJS an extensionless JavaScript file within a package scope that has no defined "type" and - is under node_modules`, async () => { - const { default: defaultExport } = - await import(fixtures.fileURL( - 'es-modules/package-type-module/node_modules/dep-with-package-json-without-type/noext-cjs')); - strictEqual(defaultExport, 42); - }); -}); diff --git a/test/es-module/test-esm-type-flag-string-input.mjs b/test/es-module/test-esm-type-flag-string-input.mjs deleted file mode 100644 index 5581aaa426b56f..00000000000000 --- a/test/es-module/test-esm-type-flag-string-input.mjs +++ /dev/null @@ -1,46 +0,0 @@ -import { spawnPromisified } from '../common/index.mjs'; -import { spawn } from 'node:child_process'; -import { describe, it } from 'node:test'; -import { strictEqual, match } from 'node:assert'; - -describe('the type flag should change the interpretation of string input', { - concurrency: !process.env.TEST_PARALLEL, -}, () => { - it('should run as ESM input passed via --eval', async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - '--eval', - 'import "data:text/javascript,console.log(42)"', - ]); - - strictEqual(stderr, ''); - strictEqual(stdout, '42\n'); - strictEqual(code, 0); - strictEqual(signal, null); - }); - - // ESM is unsupported for --print via --input-type=module - - it('should run as ESM input passed via STDIN', async () => { - const child = spawn(process.execPath, [ - '--experimental-default-type=module', - ]); - child.stdin.end('console.log(typeof import.meta.resolve)'); - - match((await child.stdout.toArray()).toString(), /^function\r?\n$/); - }); - - it('should be overridden by --input-type', async () => { - const { code, signal, stdout, stderr } = await spawnPromisified(process.execPath, [ - '--experimental-default-type=module', - '--input-type=commonjs', - '--eval', - 'console.log(require("process").version)', - ]); - - strictEqual(stderr, ''); - strictEqual(stdout, `${process.version}\n`); - strictEqual(code, 0); - strictEqual(signal, null); - }); -}); diff --git a/test/es-module/test-require-module-error-catching.js b/test/es-module/test-require-module-error-catching.js new file mode 100644 index 00000000000000..c314513d9bbf04 --- /dev/null +++ b/test/es-module/test-require-module-error-catching.js @@ -0,0 +1,21 @@ +// This tests synchronous errors in ESM from require(esm) can be caught. + +'use strict'; + +require('../common'); +const assert = require('assert'); + +// Runtime errors from throw should be caught. +assert.throws(() => { + require('../fixtures/es-modules/runtime-error-esm.js'); +}, { + message: 'hello' +}); + +// References errors should be caught too. +assert.throws(() => { + require('../fixtures/es-modules/reference-error-esm.js'); +}, { + name: 'ReferenceError', + message: 'exports is not defined' +}); diff --git a/test/es-module/test-require-module-synchronous-rejection-handling.js b/test/es-module/test-require-module-synchronous-rejection-handling.js new file mode 100644 index 00000000000000..76a25e742bb900 --- /dev/null +++ b/test/es-module/test-require-module-synchronous-rejection-handling.js @@ -0,0 +1,13 @@ +// This synchronous rejections from require(esm) still go to the unhandled rejection +// handler. + +'use strict'; + +const common = require('../common'); +const assert = require('assert'); + +process.on('unhandledRejection', common.mustCall((reason, promise) => { + assert.strictEqual(reason, 'reject!'); +})); + +require('../fixtures/es-modules/synchronous-rejection-esm.js'); diff --git a/test/es-module/test-require-node-modules-warning.js b/test/es-module/test-require-node-modules-warning.js new file mode 100644 index 00000000000000..837f174fd28950 --- /dev/null +++ b/test/es-module/test-require-node-modules-warning.js @@ -0,0 +1,59 @@ +'use strict'; + +// This checks the experimental warning for require(esm) is disabled when the +// require() comes from node_modules. +require('../common'); +const { spawnSyncAndAssert } = require('../common/child_process'); +const fixtures = require('../common/fixtures'); + +const warningRE = /Support for loading ES Module in require\(\)/; + +// The fixtures are placed in a directory that includes "node_modules" in its name +// to check false negatives. + +// require() in non-node_modules -> esm in node_modules should warn. +spawnSyncAndAssert( + process.execPath, + [fixtures.path('es-modules', 'test_node_modules', 'require-esm.js')], + { + trim: true, + stderr: warningRE, + stdout: 'world', + } +); + +// require() in non-node_modules -> require() in node_modules -> esm in node_modules +// should not warn. +spawnSyncAndAssert( + process.execPath, + [fixtures.path('es-modules', 'test_node_modules', 'require-require-esm.js')], + { + trim: true, + stderr: '', + stdout: 'world', + } +); + +// Import in non-node_modules -> require() in node_modules -> esm in node_modules +// should not warn. +spawnSyncAndAssert( + process.execPath, + [fixtures.path('es-modules', 'test_node_modules', 'import-require-esm.mjs')], + { + trim: true, + stderr: '', + stdout: 'world', + } +); + +// Import in non-node_modules -> import in node_modules -> +// require() in node_modules -> esm in node_modules should not warn. +spawnSyncAndAssert( + process.execPath, + [fixtures.path('es-modules', 'test_node_modules', 'import-import-require-esm.mjs')], + { + trim: true, + stderr: '', + stdout: 'world', + } +); diff --git a/test/es-module/test-typescript-commonjs.mjs b/test/es-module/test-typescript-commonjs.mjs index c24576c4a0c392..bbaaa7a414bc1d 100644 --- a/test/es-module/test-typescript-commonjs.mjs +++ b/test/es-module/test-typescript-commonjs.mjs @@ -121,19 +121,6 @@ test('execute a .cts file importing a .mts file export', async () => { strictEqual(result.code, 0); }); -test('execute a .cts file with default type module', async () => { - const result = await spawnPromisified(process.execPath, [ - '--experimental-strip-types', - '--experimental-default-type=module', // Keeps working with commonjs - '--no-warnings', - fixtures.path('typescript/cts/test-require-commonjs.cts'), - ]); - - strictEqual(result.stderr, ''); - match(result.stdout, /Hello, TypeScript!/); - strictEqual(result.code, 0); -}); - test('expect failure of a .cts file in node_modules', async () => { const result = await spawnPromisified(process.execPath, [ '--experimental-strip-types', diff --git a/test/es-module/test-typescript-module.mjs b/test/es-module/test-typescript-module.mjs index 6b6b0e68f251f6..8a99c2e9576add 100644 --- a/test/es-module/test-typescript-module.mjs +++ b/test/es-module/test-typescript-module.mjs @@ -39,19 +39,6 @@ test('execute an .mts file importing a .ts file', async () => { strictEqual(result.code, 0); }); -test('execute an .mts file importing a .ts file with default-type module', async () => { - const result = await spawnPromisified(process.execPath, [ - '--experimental-strip-types', - '--experimental-default-type=module', - '--no-warnings', - fixtures.path('typescript/mts/test-import-ts-file.mts'), - ]); - - strictEqual(result.stderr, ''); - match(result.stdout, /Hello, TypeScript!/); - strictEqual(result.code, 0); -}); - test('execute an .mts file importing a .cts file', async () => { const result = await spawnPromisified(process.execPath, [ '--experimental-strip-types', @@ -64,31 +51,6 @@ test('execute an .mts file importing a .cts file', async () => { strictEqual(result.code, 0); }); -test('execute an .mts file with wrong default module', async () => { - const result = await spawnPromisified(process.execPath, [ - '--experimental-strip-types', - '--no-experimental-require-module', - '--experimental-default-type=commonjs', - fixtures.path('typescript/mts/test-import-module.mts'), - ]); - - strictEqual(result.stdout, ''); - match(result.stderr, /Error \[ERR_REQUIRE_ESM\]: require\(\) of ES Module/); - strictEqual(result.code, 1); -}); - -test('execute an .mts file with wrong default module', async () => { - const result = await spawnPromisified(process.execPath, [ - '--experimental-strip-types', - '--experimental-require-module', - '--experimental-default-type=commonjs', - fixtures.path('typescript/mts/test-import-module.mts'), - ]); - - match(result.stdout, /Hello, TypeScript!/); - strictEqual(result.code, 0); -}); - test('execute an .mts file from node_modules', async () => { const result = await spawnPromisified(process.execPath, [ '--experimental-strip-types', diff --git a/test/es-module/test-typescript.mjs b/test/es-module/test-typescript.mjs index a7ca6d70dd5e10..00cc80e0ce93ff 100644 --- a/test/es-module/test-typescript.mjs +++ b/test/es-module/test-typescript.mjs @@ -65,19 +65,6 @@ test('execute a TypeScript file with imports', async () => { strictEqual(result.code, 0); }); -test('execute a TypeScript file with imports with default-type module', async () => { - const result = await spawnPromisified(process.execPath, [ - '--experimental-strip-types', - '--experimental-default-type=module', - '--no-warnings', - fixtures.path('typescript/ts/test-import-foo.ts'), - ]); - - strictEqual(result.stderr, ''); - match(result.stdout, /Hello, TypeScript!/); - strictEqual(result.code, 0); -}); - test('execute a TypeScript file with node_modules', async () => { const result = await spawnPromisified(process.execPath, [ '--experimental-strip-types', @@ -90,19 +77,6 @@ test('execute a TypeScript file with node_modules', async () => { strictEqual(result.code, 0); }); -test('execute a TypeScript file with node_modules with default-type module', async () => { - const result = await spawnPromisified(process.execPath, [ - '--experimental-strip-types', - '--experimental-default-type=module', - '--no-warnings', - fixtures.path('typescript/ts/test-typescript-node-modules.ts'), - ]); - - strictEqual(result.stderr, ''); - match(result.stdout, /Hello, TypeScript!/); - strictEqual(result.code, 0); -}); - test('expect error when executing a TypeScript file with imports with no extensions', async () => { const result = await spawnPromisified(process.execPath, [ '--experimental-strip-types', @@ -114,19 +88,6 @@ test('expect error when executing a TypeScript file with imports with no extensi strictEqual(result.code, 1); }); -test('expect error when executing a TypeScript file with imports with no extensions with default-type module', - async () => { - const result = await spawnPromisified(process.execPath, [ - '--experimental-strip-types', - '--experimental-default-type=module', - fixtures.path('typescript/ts/test-import-no-extension.ts'), - ]); - - match(result.stderr, /Error \[ERR_MODULE_NOT_FOUND\]:/); - strictEqual(result.stdout, ''); - strictEqual(result.code, 1); - }); - test('expect error when executing a TypeScript file with enum', async () => { const result = await spawnPromisified(process.execPath, [ '--experimental-strip-types', @@ -184,18 +145,6 @@ test('execute a TypeScript file with type definition but no type keyword', async strictEqual(result.code, 1); }); -test('execute a TypeScript file with type definition but no type keyword with default-type modue', async () => { - const result = await spawnPromisified(process.execPath, [ - '--experimental-strip-types', - '--experimental-default-type=module', - fixtures.path('typescript/ts/test-import-no-type-keyword.ts'), - ]); - - match(result.stderr, /does not provide an export named 'MyType'/); - strictEqual(result.stdout, ''); - strictEqual(result.code, 1); -}); - test('execute a TypeScript file with CommonJS syntax', async () => { const result = await spawnPromisified(process.execPath, [ '--experimental-strip-types', @@ -219,19 +168,6 @@ test('execute a TypeScript file with ES module syntax', async () => { strictEqual(result.code, 0); }); -test('execute a TypeScript file with ES module syntax with default-type module', async () => { - const result = await spawnPromisified(process.execPath, [ - '--experimental-strip-types', - '--experimental-default-type=module', - '--no-warnings', - fixtures.path('typescript/ts/test-module-typescript.ts'), - ]); - - strictEqual(result.stderr, ''); - match(result.stdout, /Hello, TypeScript!/); - strictEqual(result.code, 0); -}); - test('expect failure of a TypeScript file requiring ES module syntax', async () => { const result = await spawnPromisified(process.execPath, [ '--experimental-strip-types', @@ -266,30 +202,6 @@ test('execute CommonJS TypeScript file from node_modules with require-module', a strictEqual(result.code, 1); }); -test('execute CommonJS TypeScript file from node_modules with require-module and default-type module', - async () => { - const result = await spawnPromisified(process.execPath, [ - '--experimental-strip-types', - '--experimental-default-type=module', - fixtures.path('typescript/ts/test-import-ts-node-modules.ts'), - ]); - - match(result.stderr, /ERR_UNSUPPORTED_NODE_MODULES_TYPE_STRIPPING/); - strictEqual(result.stdout, ''); - strictEqual(result.code, 1); - }); - -test('execute a TypeScript file with CommonJS syntax but default type module', async () => { - const result = await spawnPromisified(process.execPath, [ - '--experimental-strip-types', - '--experimental-default-type=module', - fixtures.path('typescript/ts/test-commonjs-parsing.ts'), - ]); - strictEqual(result.stdout, ''); - match(result.stderr, /require is not defined in ES module scope, you can use import instead/); - strictEqual(result.code, 1); -}); - test('execute a TypeScript file with CommonJS syntax requiring .cts', async () => { const result = await spawnPromisified(process.execPath, [ '--experimental-strip-types', @@ -336,11 +248,10 @@ test('execute a TypeScript file with CommonJS syntax requiring .cts using common strictEqual(result.code, 0); }); -test('execute a TypeScript file with CommonJS syntax requiring .mts with require-module with default-type commonjs', +test('execute a TypeScript file with CommonJS syntax requiring .mts with require-module', async () => { const result = await spawnPromisified(process.execPath, [ '--experimental-strip-types', - '--experimental-default-type=commonjs', '--no-warnings', fixtures.path('typescript/ts/test-require-cts.ts'), ]); diff --git a/test/fixtures/dotenv/no-final-newline-single-quotes.env b/test/fixtures/dotenv/no-final-newline-single-quotes.env new file mode 100644 index 00000000000000..4f1b37d7741e29 --- /dev/null +++ b/test/fixtures/dotenv/no-final-newline-single-quotes.env @@ -0,0 +1 @@ +BASIC='basic' \ No newline at end of file diff --git a/test/fixtures/dotenv/no-final-newline.env b/test/fixtures/dotenv/no-final-newline.env new file mode 100644 index 00000000000000..ef996552bd9c90 --- /dev/null +++ b/test/fixtures/dotenv/no-final-newline.env @@ -0,0 +1 @@ +BASIC="basic" \ No newline at end of file diff --git a/test/fixtures/es-modules/loose.js b/test/fixtures/es-modules/loose.js index c0d85f7eab9a2b..251d6e538a1fcf 100644 --- a/test/fixtures/es-modules/loose.js +++ b/test/fixtures/es-modules/loose.js @@ -1,5 +1,2 @@ -// This file can be run or imported only if `--experimental-default-type=module` is set -// or `--experimental-detect-module` is not disabled. If it's loaded by -// require(), then `--experimental-require-module` must not be disabled. export default 'module'; console.log('executed'); diff --git a/test/fixtures/es-modules/package-without-type/noext-esm b/test/fixtures/es-modules/package-without-type/noext-esm index 69147a3b8ca027..251d6e538a1fcf 100644 --- a/test/fixtures/es-modules/package-without-type/noext-esm +++ b/test/fixtures/es-modules/package-without-type/noext-esm @@ -1,3 +1,2 @@ -// This file can be run or imported only if `--experimental-default-type=module` is set. export default 'module'; console.log('executed'); diff --git a/test/fixtures/es-modules/reference-error-esm.js b/test/fixtures/es-modules/reference-error-esm.js new file mode 100644 index 00000000000000..baf773c78970ad --- /dev/null +++ b/test/fixtures/es-modules/reference-error-esm.js @@ -0,0 +1,5 @@ +// This module is invalid in both ESM and CJS, because +// 'exports' are not defined in ESM, while require cannot be +// redeclared in CJS. +Object.defineProperty(exports, "__esModule", { value: true }); +const require = () => {}; diff --git a/test/fixtures/es-modules/runtime-error-esm.js b/test/fixtures/es-modules/runtime-error-esm.js new file mode 100644 index 00000000000000..1df3cc469adbfc --- /dev/null +++ b/test/fixtures/es-modules/runtime-error-esm.js @@ -0,0 +1,2 @@ +import 'node:fs'; // Forces it to be recognized as ESM. +throw new Error('hello'); diff --git a/test/fixtures/es-modules/synchronous-rejection-esm.js b/test/fixtures/es-modules/synchronous-rejection-esm.js new file mode 100644 index 00000000000000..34d066037e2140 --- /dev/null +++ b/test/fixtures/es-modules/synchronous-rejection-esm.js @@ -0,0 +1,2 @@ +import 'node:fs'; // Forces it to be recognized as ESM. +Promise.reject('reject!'); diff --git a/test/fixtures/es-modules/test_node_modules/import-import-require-esm.mjs b/test/fixtures/es-modules/test_node_modules/import-import-require-esm.mjs new file mode 100644 index 00000000000000..d470088c38caee --- /dev/null +++ b/test/fixtures/es-modules/test_node_modules/import-import-require-esm.mjs @@ -0,0 +1,2 @@ +import mod from 'import-require-esm'; +console.log(mod.hello); diff --git a/test/fixtures/es-modules/test_node_modules/import-require-esm.mjs b/test/fixtures/es-modules/test_node_modules/import-require-esm.mjs new file mode 100644 index 00000000000000..2ad346de9e841c --- /dev/null +++ b/test/fixtures/es-modules/test_node_modules/import-require-esm.mjs @@ -0,0 +1,2 @@ +import mod from 'require-esm'; +console.log(mod.hello); diff --git a/test/fixtures/es-modules/test_node_modules/node_modules/esm/index.js b/test/fixtures/es-modules/test_node_modules/node_modules/esm/index.js new file mode 100644 index 00000000000000..35f468bf4856d8 --- /dev/null +++ b/test/fixtures/es-modules/test_node_modules/node_modules/esm/index.js @@ -0,0 +1 @@ +export const hello = 'world'; diff --git a/test/fixtures/es-modules/test_node_modules/node_modules/esm/package.json b/test/fixtures/es-modules/test_node_modules/node_modules/esm/package.json new file mode 100644 index 00000000000000..07aec65d5a4f31 --- /dev/null +++ b/test/fixtures/es-modules/test_node_modules/node_modules/esm/package.json @@ -0,0 +1,4 @@ +{ + "type": "module", + "main": "index.js" +} diff --git a/test/fixtures/es-modules/test_node_modules/node_modules/import-require-esm/index.js b/test/fixtures/es-modules/test_node_modules/node_modules/import-require-esm/index.js new file mode 100644 index 00000000000000..918bb5d5597e34 --- /dev/null +++ b/test/fixtures/es-modules/test_node_modules/node_modules/import-require-esm/index.js @@ -0,0 +1,2 @@ +import mod from 'require-esm'; +export default mod; diff --git a/test/fixtures/es-modules/test_node_modules/node_modules/import-require-esm/package.json b/test/fixtures/es-modules/test_node_modules/node_modules/import-require-esm/package.json new file mode 100644 index 00000000000000..07aec65d5a4f31 --- /dev/null +++ b/test/fixtures/es-modules/test_node_modules/node_modules/import-require-esm/package.json @@ -0,0 +1,4 @@ +{ + "type": "module", + "main": "index.js" +} diff --git a/test/fixtures/es-modules/test_node_modules/node_modules/require-esm/index.js b/test/fixtures/es-modules/test_node_modules/node_modules/require-esm/index.js new file mode 100644 index 00000000000000..ca6f0c264ad1fc --- /dev/null +++ b/test/fixtures/es-modules/test_node_modules/node_modules/require-esm/index.js @@ -0,0 +1,2 @@ +module.exports = require('esm'); + diff --git a/test/fixtures/es-modules/test_node_modules/require-esm.js b/test/fixtures/es-modules/test_node_modules/require-esm.js new file mode 100644 index 00000000000000..60ad3f7fff60c6 --- /dev/null +++ b/test/fixtures/es-modules/test_node_modules/require-esm.js @@ -0,0 +1,2 @@ +const { hello } = require('esm'); +console.log(hello); diff --git a/test/fixtures/es-modules/test_node_modules/require-require-esm.js b/test/fixtures/es-modules/test_node_modules/require-require-esm.js new file mode 100644 index 00000000000000..9fe255dce258a6 --- /dev/null +++ b/test/fixtures/es-modules/test_node_modules/require-require-esm.js @@ -0,0 +1,2 @@ +const { hello } = require('require-esm'); +console.log(hello); diff --git a/test/fixtures/permission/fs-read.js b/test/fixtures/permission/fs-read.js index 186117a6b768dd..fa4ea1207f50bd 100644 --- a/test/fixtures/permission/fs-read.js +++ b/test/fixtures/permission/fs-read.js @@ -289,6 +289,11 @@ const regularFile = __filename; permission: 'FileSystemRead', resource: path.toNamespacedPath(blockedFolder), })); + fs.readdir(blockedFolder, { recursive: true }, common.expectsError({ + code: 'ERR_ACCESS_DENIED', + permission: 'FileSystemRead', + resource: path.toNamespacedPath(blockedFolder), + })); assert.throws(() => { fs.readdirSync(blockedFolder); }, common.expectsError({ diff --git a/test/fixtures/process-env/define.js b/test/fixtures/process-env/define.js new file mode 100644 index 00000000000000..59d5744157696a --- /dev/null +++ b/test/fixtures/process-env/define.js @@ -0,0 +1,6 @@ +Object.defineProperty(process.env, 'FOO', { + configurable: true, + enumerable: true, + writable: true, + value: 'FOO', +}); diff --git a/test/fixtures/process-env/delete.js b/test/fixtures/process-env/delete.js new file mode 100644 index 00000000000000..19ea5160513f08 --- /dev/null +++ b/test/fixtures/process-env/delete.js @@ -0,0 +1 @@ +delete process.env.FOO; diff --git a/test/fixtures/process-env/enumerate.js b/test/fixtures/process-env/enumerate.js new file mode 100644 index 00000000000000..ea6f3972bf9470 --- /dev/null +++ b/test/fixtures/process-env/enumerate.js @@ -0,0 +1,3 @@ +Object.keys(process.env); + +const env = { ...process.env }; diff --git a/test/fixtures/process-env/get.js b/test/fixtures/process-env/get.js new file mode 100644 index 00000000000000..e0257a022be4eb --- /dev/null +++ b/test/fixtures/process-env/get.js @@ -0,0 +1,2 @@ +const foo = process.env.FOO; +const bar = process.env.BAR; diff --git a/test/fixtures/process-env/query.js b/test/fixtures/process-env/query.js new file mode 100644 index 00000000000000..1e3fc9b79c3d04 --- /dev/null +++ b/test/fixtures/process-env/query.js @@ -0,0 +1,3 @@ +const foo = 'FOO' in process.env; +const bar = Object.hasOwn(process.env, 'BAR'); +const baz = process.env.hasOwnProperty('BAZ'); diff --git a/test/fixtures/process-env/set.js b/test/fixtures/process-env/set.js new file mode 100644 index 00000000000000..a9863742d187e2 --- /dev/null +++ b/test/fixtures/process-env/set.js @@ -0,0 +1 @@ +process.env.FOO = "FOO"; diff --git a/test/fixtures/test-runner/output/abort_hooks.snapshot b/test/fixtures/test-runner/output/abort_hooks.snapshot index 278b5e5fd36ca5..e318e36d9d56a4 100644 --- a/test/fixtures/test-runner/output/abort_hooks.snapshot +++ b/test/fixtures/test-runner/output/abort_hooks.snapshot @@ -101,7 +101,7 @@ not ok 2 - 2 after describe * * * - async Promise.all (index 0) + * ... # Subtest: test 2 not ok 2 - test 2 @@ -122,7 +122,7 @@ not ok 2 - 2 after describe * * * - async Promise.all (index 0) + * ... 1..2 not ok 3 - 3 beforeEach describe diff --git a/test/fixtures/test-runner/output/global_after_should_fail_the_test.snapshot b/test/fixtures/test-runner/output/global_after_should_fail_the_test.snapshot index 3196f377b3d4bf..ee4d5f71072ba5 100644 --- a/test/fixtures/test-runner/output/global_after_should_fail_the_test.snapshot +++ b/test/fixtures/test-runner/output/global_after_should_fail_the_test.snapshot @@ -21,7 +21,6 @@ not ok 2 - /test/fixtures/test-runner/output/global_after_should_fail_the_test.j * * * - * ... 1..1 # tests 1 diff --git a/test/fixtures/test-runner/output/hooks.snapshot b/test/fixtures/test-runner/output/hooks.snapshot index 6b9d6d26a90e39..be8d1b210c60e4 100644 --- a/test/fixtures/test-runner/output/hooks.snapshot +++ b/test/fixtures/test-runner/output/hooks.snapshot @@ -77,7 +77,6 @@ not ok 3 - before throws * * * - * ... # Subtest: before throws - no subtests not ok 4 - before throws - no subtests @@ -97,7 +96,6 @@ not ok 4 - before throws - no subtests * * * - * ... # Subtest: after throws # Subtest: 1 @@ -129,6 +127,7 @@ not ok 5 - after throws * * * + * ... # Subtest: after throws - no subtests not ok 6 - after throws - no subtests @@ -149,6 +148,7 @@ not ok 6 - after throws - no subtests * * * + * ... # Subtest: beforeEach throws # Subtest: 1 @@ -167,9 +167,9 @@ not ok 6 - after throws - no subtests * * * - async Promise.all (index 0) * * + new Promise () ... # Subtest: 2 not ok 2 - 2 @@ -188,6 +188,8 @@ not ok 6 - after throws - no subtests * * * + * + async Promise.all (index 0) ... 1..2 not ok 7 - beforeEach throws @@ -482,7 +484,6 @@ not ok 15 - t.after throws * * * - * ... # Subtest: t.after throws - no subtests not ok 16 - t.after throws - no subtests @@ -502,7 +503,6 @@ not ok 16 - t.after throws - no subtests * * * - * ... # Subtest: t.beforeEach throws # Subtest: 1 @@ -765,7 +765,6 @@ not ok 24 - run after when before throws * * * - * ... # Subtest: test hooks - async # Subtest: 1 diff --git a/test/fixtures/test-runner/output/hooks_spec_reporter.snapshot b/test/fixtures/test-runner/output/hooks_spec_reporter.snapshot index b5c5ab7d1965e5..ea916c2ee754c4 100644 --- a/test/fixtures/test-runner/output/hooks_spec_reporter.snapshot +++ b/test/fixtures/test-runner/output/hooks_spec_reporter.snapshot @@ -26,7 +26,6 @@ * * * - * before throws - no subtests (*ms) Error: before @@ -38,7 +37,6 @@ * * * - * after throws 1 (*ms) @@ -55,6 +53,7 @@ * * * + * after throws - no subtests (*ms) Error: after @@ -67,6 +66,7 @@ * * * + * beforeEach throws 1 (*ms) @@ -78,9 +78,9 @@ * * * - at async Promise.all (index 0) * * + at new Promise () 2 (*ms) Error: beforeEach @@ -92,6 +92,8 @@ * * * + * + at async Promise.all (index 0) beforeEach throws (*ms) afterEach throws @@ -242,7 +244,6 @@ * * * - * t.after throws - no subtests (*ms) Error: after @@ -255,7 +256,6 @@ * * * - * t.beforeEach throws 1 (*ms) @@ -390,7 +390,6 @@ * * * - * test hooks - async 1 (*ms) @@ -430,7 +429,6 @@ * * * - * * before throws - no subtests (*ms) @@ -443,7 +441,6 @@ * * * - * * after throws (*ms) @@ -457,6 +454,7 @@ * * * + * * after throws - no subtests (*ms) @@ -470,6 +468,7 @@ * * * + * * 1 (*ms) @@ -481,9 +480,9 @@ * * * - at async Promise.all (index 0) * * + at new Promise () * 2 (*ms) @@ -496,6 +495,8 @@ * * * + * + at async Promise.all (index 0) * 1 (*ms) @@ -633,7 +634,6 @@ * * * - * * t.after throws - no subtests (*ms) @@ -647,7 +647,6 @@ * * * - * * 1 (*ms) @@ -776,4 +775,3 @@ * * * - * diff --git a/test/fixtures/wpt/README.md b/test/fixtures/wpt/README.md index 3f684bf1b32c5e..406e89b820e9f7 100644 --- a/test/fixtures/wpt/README.md +++ b/test/fixtures/wpt/README.md @@ -28,11 +28,11 @@ Last update: - resource-timing: https://github.com/web-platform-tests/wpt/tree/22d38586d0/resource-timing - resources: https://github.com/web-platform-tests/wpt/tree/1e140d63ec/resources - streams: https://github.com/web-platform-tests/wpt/tree/2bd26e124c/streams -- url: https://github.com/web-platform-tests/wpt/tree/6a39784534/url +- url: https://github.com/web-platform-tests/wpt/tree/67880a4eb8/url - user-timing: https://github.com/web-platform-tests/wpt/tree/5ae85bf826/user-timing - wasm/jsapi: https://github.com/web-platform-tests/wpt/tree/cde25e7e3c/wasm/jsapi - wasm/webapi: https://github.com/web-platform-tests/wpt/tree/fd1b23eeaa/wasm/webapi -- WebCryptoAPI: https://github.com/web-platform-tests/wpt/tree/b81831169b/WebCryptoAPI +- WebCryptoAPI: https://github.com/web-platform-tests/wpt/tree/3e3374efde/WebCryptoAPI - webidl/ecmascript-binding/es-exceptions: https://github.com/web-platform-tests/wpt/tree/a370aad338/webidl/ecmascript-binding/es-exceptions - webmessaging/broadcastchannel: https://github.com/web-platform-tests/wpt/tree/6495c91853/webmessaging/broadcastchannel - webstorage: https://github.com/web-platform-tests/wpt/tree/9dafa89214/webstorage diff --git a/test/fixtures/wpt/WebCryptoAPI/crypto_key_cached_slots.https.any.js b/test/fixtures/wpt/WebCryptoAPI/crypto_key_cached_slots.https.any.js new file mode 100644 index 00000000000000..f573fac1c96606 --- /dev/null +++ b/test/fixtures/wpt/WebCryptoAPI/crypto_key_cached_slots.https.any.js @@ -0,0 +1,44 @@ +// META: title=WebCryptoAPI: CryptoKey cached ECMAScript objects + +// https://w3c.github.io/webcrypto/#dom-cryptokey-algorithm +// https://github.com/servo/servo/issues/33908 + +promise_test(function() { + return self.crypto.subtle.generateKey( + { + name: "AES-CTR", + length: 256, + }, + true, + ["encrypt"], + ).then( + function(key) { + let a = key.algorithm; + let b = key.algorithm; + assert_true(a === b); + }, + function(err) { + assert_unreached("generateKey threw an unexpected error: " + err.toString()); + } + ); +}, "CryptoKey.algorithm getter returns cached object"); + +promise_test(function() { + return self.crypto.subtle.generateKey( + { + name: "AES-CTR", + length: 256, + }, + true, + ["encrypt"], + ).then( + function(key) { + let a = key.usages; + let b = key.usages; + assert_true(a === b); + }, + function(err) { + assert_unreached("generateKey threw an unexpected error: " + err.toString()); + } + ); +}, "CryptoKey.usages getter returns cached object"); diff --git a/test/fixtures/wpt/WebCryptoAPI/cryptokey_algorithm_returns_cached_object.https.any.js b/test/fixtures/wpt/WebCryptoAPI/cryptokey_algorithm_returns_cached_object.https.any.js deleted file mode 100644 index b2d73fbab78d64..00000000000000 --- a/test/fixtures/wpt/WebCryptoAPI/cryptokey_algorithm_returns_cached_object.https.any.js +++ /dev/null @@ -1,24 +0,0 @@ -// META: title=WebCryptoAPI: CryptoKey.algorithm getter returns cached object - -// https://w3c.github.io/webcrypto/#dom-cryptokey-algorithm -// https://github.com/servo/servo/issues/33908 - -promise_test(function() { - return self.crypto.subtle.generateKey( - { - name: "AES-CTR", - length: 256, - }, - true, - ["encrypt"], - ).then( - function(key) { - let a = key.algorithm; - let b = key.algorithm; - assert_true(a === b); - }, - function(err) { - assert_unreached("generateKey threw an unexpected error: " + err.toString()); - } - ); -}, "CryptoKey.algorithm getter returns cached object"); \ No newline at end of file diff --git a/test/fixtures/wpt/WebCryptoAPI/digest/digest.https.any.js b/test/fixtures/wpt/WebCryptoAPI/digest/digest.https.any.js index 379d9311f30247..3b0972b1f2bf7d 100644 --- a/test/fixtures/wpt/WebCryptoAPI/digest/digest.https.any.js +++ b/test/fixtures/wpt/WebCryptoAPI/digest/digest.https.any.js @@ -118,6 +118,20 @@ }); }); + // Call digest() with empty algorithm object + Object.keys(sourceData).forEach(function(size) { + promise_test(function(test) { + var promise = subtle.digest({}, sourceData[size]) + .then(function(result) { + assert_unreached("digest() with missing algorithm name should have thrown a TypeError"); + }, function(err) { + assert_equals(err.name, "TypeError", "Missing algorithm name should cause TypeError") + }); + + return promise; + }, "empty algorithm object with " + size); + }); + done(); diff --git a/test/fixtures/wpt/WebCryptoAPI/generateKey/failures.js b/test/fixtures/wpt/WebCryptoAPI/generateKey/failures.js index e0f0279a69bb88..deaac636a99be5 100644 --- a/test/fixtures/wpt/WebCryptoAPI/generateKey/failures.js +++ b/test/fixtures/wpt/WebCryptoAPI/generateKey/failures.js @@ -166,6 +166,14 @@ function run_test(algorithmNames) { }); }); + // Empty algorithm should fail with TypeError + allValidUsages(["decrypt", "sign", "deriveBits"], true, []) // Small search space, shouldn't matter because should fail before used + .forEach(function(usages) { + [false, true, "RED", 7].forEach(function(extractable){ + testError({}, extractable, usages, "TypeError", "Empty algorithm"); + }); + }); + // Algorithms normalize okay, but usages bad (though not empty). // It shouldn't matter what other extractable is. Should fail diff --git a/test/fixtures/wpt/WebCryptoAPI/import_export/ec_importKey_failures_fixtures.js b/test/fixtures/wpt/WebCryptoAPI/import_export/ec_importKey_failures_fixtures.js index 796db364c2e2dc..a2d25e816cbd73 100644 --- a/test/fixtures/wpt/WebCryptoAPI/import_export/ec_importKey_failures_fixtures.js +++ b/test/fixtures/wpt/WebCryptoAPI/import_export/ec_importKey_failures_fixtures.js @@ -19,6 +19,14 @@ function getMismatchedJWKKeyData(algorithm) { return []; } +function getMismatchedKtyField(algorithm) { + return mismatchedKtyField[algorithm.name]; +} + +function getMismatchedCrvField(algorithm) { + return mismatchedCrvField[algorithm.name]; +} + var validKeyData = { "P-521": [ { @@ -201,3 +209,17 @@ var missingJWKFieldKeyData = { } ] }; + +// The 'kty' field doesn't match the key algorithm. +var mismatchedKtyField = { + "P-521": "OKP", + "P-256": "OKP", + "P-384": "OKP", +} + +// The 'kty' field doesn't match the key algorithm. +var mismatchedCrvField = { + "P-521": "P-256", + "P-256": "P-384", + "P-384": "P-521", +} diff --git a/test/fixtures/wpt/WebCryptoAPI/import_export/importKey_failures.js b/test/fixtures/wpt/WebCryptoAPI/import_export/importKey_failures.js index bba48401e616a5..453461a8771f51 100644 --- a/test/fixtures/wpt/WebCryptoAPI/import_export/importKey_failures.js +++ b/test/fixtures/wpt/WebCryptoAPI/import_export/importKey_failures.js @@ -192,4 +192,79 @@ function run_test(algorithmNames) { }); }); }); + + // Missing mandatory "name" field on algorithm + testVectors.forEach(function(vector) { + var name = vector.name; + // We just need *some* valid keydata, so pick the first available algorithm. + var algorithm = allAlgorithmSpecifiersFor(name)[0]; + getValidKeyData(algorithm).forEach(function(test) { + validUsages(vector, test.format, test.data).forEach(function(usages) { + [true, false].forEach(function(extractable) { + testError(test.format, {}, test.data, name, usages, extractable, "TypeError", "Missing algorithm name"); + }); + }); + }); + }); + + // The 'kty' field is not correct. + testVectors.forEach(function(vector) { + var name = vector.name; + allAlgorithmSpecifiersFor(name).forEach(function(algorithm) { + getValidKeyData(algorithm).forEach(function(test) { + if (test.format === "jwk") { + var data = {crv: test.data.crv, kty: test.data.kty, d: test.data.d, x: test.data.x, d: test.data.d}; + data.kty = getMismatchedKtyField(algorithm); + var usages = validUsages(vector, 'jwk', test.data); + testError('jwk', algorithm, data, name, usages, true, "DataError", "Invalid 'kty' field"); + } + }); + }); + }); + + // The 'ext' field is not correct. + testVectors.forEach(function(vector) { + var name = vector.name; + allAlgorithmSpecifiersFor(name).forEach(function(algorithm) { + getValidKeyData(algorithm).forEach(function(test) { + if (test.format === "jwk") { + var data = {crv: test.data.crv, kty: test.data.kty, d: test.data.d, x: test.data.x, d: test.data.d}; + data.ext = false; + var usages = validUsages(vector, 'jwk', test.data); + testError('jwk', algorithm, data, name, usages, true, "DataError", "Import from a non-extractable"); + } + }); + }); + }); + + // The 'use' field is incorrect. + testVectors.forEach(function(vector) { + var name = vector.name; + allAlgorithmSpecifiersFor(name).forEach(function(algorithm) { + getValidKeyData(algorithm).forEach(function(test) { + if (test.format === "jwk") { + var data = {crv: test.data.crv, kty: test.data.kty, d: test.data.d, x: test.data.x, d: test.data.d}; + data.use = "invalid"; + var usages = validUsages(vector, 'jwk', test.data); + if (usages.length !== 0) + testError('jwk', algorithm, data, name, usages, true, "DataError", "Invalid 'use' field"); + } + }); + }); + }); + + // The 'crv' field is incorrect. + testVectors.forEach(function(vector) { + var name = vector.name; + allAlgorithmSpecifiersFor(name).forEach(function(algorithm) { + getValidKeyData(algorithm).forEach(function(test) { + if (test.format === "jwk") { + var data = {crv: test.data.crv, kty: test.data.kty, d: test.data.d, x: test.data.x, d: test.data.d}; + data.crv = getMismatchedCrvField(algorithm) + var usages = validUsages(vector, 'jwk', test.data); + testError('jwk', algorithm, data, name, usages, true, "DataError", "Invalid 'crv' field"); + } + }); + }); + }); } diff --git a/test/fixtures/wpt/WebCryptoAPI/import_export/okp_importKey_failures_fixtures.js b/test/fixtures/wpt/WebCryptoAPI/import_export/okp_importKey_failures_fixtures.js index 88f552291cdf6b..9bedddc5c5944a 100644 --- a/test/fixtures/wpt/WebCryptoAPI/import_export/okp_importKey_failures_fixtures.js +++ b/test/fixtures/wpt/WebCryptoAPI/import_export/okp_importKey_failures_fixtures.js @@ -17,6 +17,14 @@ function getMismatchedJWKKeyData(algorithm) { return mismatchedJWKKeyData[algorithm.name]; } +function getMismatchedKtyField(algorithm) { + return mismatchedKtyField[algorithm.name]; +} + +function getMismatchedCrvField(algorithm) { + return mismatchedCrvField[algorithm.name]; +} + var validKeyData = { "Ed25519": [ { @@ -412,3 +420,19 @@ var mismatchedJWKKeyData = { }, ], } + +// The 'kty' field doesn't match the key algorithm. +var mismatchedKtyField = { + "Ed25519": "EC", + "X25519": "EC", + "Ed448": "EC", + "X448": "EC", +} + +// The 'kty' field doesn't match the key algorithm. +var mismatchedCrvField = { + "Ed25519": "X25519", + "X25519": "Ed448", + "Ed448": "X25519", + "X448": "Ed25519", +} diff --git a/test/fixtures/wpt/WebCryptoAPI/wrapKey_unwrapKey/wrapKey_unwrapKey.https.any.js b/test/fixtures/wpt/WebCryptoAPI/wrapKey_unwrapKey/wrapKey_unwrapKey.https.any.js index edb67d9e30fdba..9dd837b3bf60a9 100644 --- a/test/fixtures/wpt/WebCryptoAPI/wrapKey_unwrapKey/wrapKey_unwrapKey.https.any.js +++ b/test/fixtures/wpt/WebCryptoAPI/wrapKey_unwrapKey/wrapKey_unwrapKey.https.any.js @@ -1,238 +1,256 @@ // META: title=WebCryptoAPI: wrapKey() and unwrapKey() // META: timeout=long // META: script=../util/helpers.js +// META: script=wrapKey_unwrapKey_vectors.js // Tests for wrapKey and unwrapKey round tripping var subtle = self.crypto.subtle; - var wrappers = []; // Things we wrap (and upwrap) keys with - var keys = []; // Things to wrap and unwrap - - // Generate all the keys needed, then iterate over all combinations + var wrappers = {}; // Things we wrap (and upwrap) keys with + var keys = {}; // Things to wrap and unwrap + + // There are five algorithms that can be used for wrapKey/unwrapKey. + // Generate one key with typical parameters for each kind. + // + // Note: we don't need cryptographically strong parameters for things + // like IV - just any legal value will do. + var wrappingKeysParameters = [ + { + name: "RSA-OAEP", + importParameters: {name: "RSA-OAEP", hash: "SHA-256"}, + wrapParameters: {name: "RSA-OAEP", label: new Uint8Array(8)} + }, + { + name: "AES-CTR", + importParameters: {name: "AES-CTR", length: 128}, + wrapParameters: {name: "AES-CTR", counter: new Uint8Array(16), length: 64} + }, + { + name: "AES-CBC", + importParameters: {name: "AES-CBC", length: 128}, + wrapParameters: {name: "AES-CBC", iv: new Uint8Array(16)} + }, + { + name: "AES-GCM", + importParameters: {name: "AES-GCM", length: 128}, + wrapParameters: {name: "AES-GCM", iv: new Uint8Array(16), additionalData: new Uint8Array(16), tagLength: 128} + }, + { + name: "AES-KW", + importParameters: {name: "AES-KW", length: 128}, + wrapParameters: {name: "AES-KW"} + } + ]; + + var keysToWrapParameters = [ + {algorithm: {name: "RSASSA-PKCS1-v1_5", hash: "SHA-256"}, privateUsages: ["sign"], publicUsages: ["verify"]}, + {algorithm: {name: "RSA-PSS", hash: "SHA-256"}, privateUsages: ["sign"], publicUsages: ["verify"]}, + {algorithm: {name: "RSA-OAEP", hash: "SHA-256"}, privateUsages: ["decrypt"], publicUsages: ["encrypt"]}, + {algorithm: {name: "ECDSA", namedCurve: "P-256"}, privateUsages: ["sign"], publicUsages: ["verify"]}, + {algorithm: {name: "ECDH", namedCurve: "P-256"}, privateUsages: ["deriveBits"], publicUsages: []}, + {algorithm: {name: "Ed25519" }, privateUsages: ["sign"], publicUsages: ["verify"]}, + {algorithm: {name: "Ed448" }, privateUsages: ["sign"], publicUsages: ["verify"]}, + {algorithm: {name: "X25519" }, privateUsages: ["deriveBits"], publicUsages: []}, + {algorithm: {name: "X448" }, privateUsages: ["deriveBits"], publicUsages: []}, + {algorithm: {name: "AES-CTR", length: 128}, usages: ["encrypt", "decrypt"]}, + {algorithm: {name: "AES-CBC", length: 128}, usages: ["encrypt", "decrypt"]}, + {algorithm: {name: "AES-GCM", length: 128}, usages: ["encrypt", "decrypt"]}, + {algorithm: {name: "AES-KW", length: 128}, usages: ["wrapKey", "unwrapKey"]}, + {algorithm: {name: "HMAC", length: 128, hash: "SHA-256"}, usages: ["sign", "verify"]} + ]; + + // Import all the keys needed, then iterate over all combinations // to test wrapping and unwrapping. promise_test(function() { - return Promise.all([generateWrappingKeys(), generateKeysToWrap()]) + return Promise.all([importWrappingKeys(), importKeysToWrap()]) .then(function(results) { - var promises = []; - wrappers.forEach(function(wrapper) { - keys.forEach(function(key) { - promises.push(testWrapping(wrapper, key)); - }) + wrappingKeysParameters.filter((param) => Object.keys(wrappers).includes(param.name)).forEach(function(wrapperParam) { + var wrapper = wrappers[wrapperParam.name]; + keysToWrapParameters.filter((param) => Object.keys(keys).includes(param.algorithm.name)).forEach(function(toWrapParam) { + var keyData = keys[toWrapParam.algorithm.name]; + ["raw", "spki", "pkcs8"].filter((fmt) => Object.keys(keyData).includes(fmt)).forEach(function(keyDataFormat) { + var toWrap = keyData[keyDataFormat]; + [keyDataFormat, "jwk"].forEach(function(format) { + if (wrappingIsPossible(toWrap.originalExport[format], wrapper.parameters.name)) { + testWrapping(wrapper, toWrap, format); + if (canCompareNonExtractableKeys(toWrap.key)) { + testWrappingNonExtractable(wrapper, toWrap, format); + if (format === "jwk") { + testWrappingNonExtractableAsExtractable(wrapper, toWrap); + } + } + } + }); + }); + }); }); - return Promise.allSettled(promises); + return Promise.resolve("setup done"); + }, function(err) { + return Promise.reject("setup failed: " + err.name + ': "' + err.message + '"'); }); }, "setup"); - function generateWrappingKeys() { - // There are five algorithms that can be used for wrapKey/unwrapKey. - // Generate one key with typical parameters for each kind. - // - // Note: we don't need cryptographically strong parameters for things - // like IV - just any legal value will do. - var parameters = [ - { - name: "RSA-OAEP", - generateParameters: {name: "RSA-OAEP", modulusLength: 4096, publicExponent: new Uint8Array([1,0,1]), hash: "SHA-256"}, - wrapParameters: {name: "RSA-OAEP", label: new Uint8Array(8)} - }, - { - name: "AES-CTR", - generateParameters: {name: "AES-CTR", length: 128}, - wrapParameters: {name: "AES-CTR", counter: new Uint8Array(16), length: 64} - }, - { - name: "AES-CBC", - generateParameters: {name: "AES-CBC", length: 128}, - wrapParameters: {name: "AES-CBC", iv: new Uint8Array(16)} - }, - { - name: "AES-GCM", - generateParameters: {name: "AES-GCM", length: 128}, - wrapParameters: {name: "AES-GCM", iv: new Uint8Array(16), additionalData: new Uint8Array(16), tagLength: 128} - }, - { - name: "AES-KW", - generateParameters: {name: "AES-KW", length: 128}, - wrapParameters: {name: "AES-KW"} + function importWrappingKeys() { + // Using allSettled to skip unsupported test cases. + var promises = []; + wrappingKeysParameters.forEach(function(params) { + if (params.name === "RSA-OAEP") { // we have a key pair, not just a key + var algorithm = {name: "RSA-OAEP", hash: "SHA-256"}; + wrappers[params.name] = {wrappingKey: undefined, unwrappingKey: undefined, parameters: params}; + promises.push(subtle.importKey("spki", wrappingKeyData["RSA"].spki, algorithm, true, ["wrapKey"]) + .then(function(key) { + wrappers["RSA-OAEP"].wrappingKey = key; + })); + promises.push(subtle.importKey("pkcs8", wrappingKeyData["RSA"].pkcs8, algorithm, true, ["unwrapKey"]) + .then(function(key) { + wrappers["RSA-OAEP"].unwrappingKey = key; + })); + } else { + var algorithm = {name: params.name}; + promises.push(subtle.importKey("raw", wrappingKeyData["SYMMETRIC"].raw, algorithm, true, ["wrapKey", "unwrapKey"]) + .then(function(key) { + wrappers[params.name] = {wrappingKey: key, unwrappingKey: key, parameters: params}; + })); } - ]; - + }); // Using allSettled to skip unsupported test cases. - return Promise.allSettled(parameters.map(function(params) { - return subtle.generateKey(params.generateParameters, true, ["wrapKey", "unwrapKey"]) - .then(function(key) { - var wrapper; - if (params.name === "RSA-OAEP") { // we have a key pair, not just a key - wrapper = {wrappingKey: key.publicKey, unwrappingKey: key.privateKey, parameters: params}; - } else { - wrapper = {wrappingKey: key, unwrappingKey: key, parameters: params}; - } - wrappers.push(wrapper); - return true; - }) - })); + return Promise.allSettled(promises); } + async function importAndExport(format, keyData, algorithm, keyUsages, keyType) { + var importedKey; + try { + importedKey = await subtle.importKey(format, keyData, algorithm, true, keyUsages); + keys[algorithm.name][format] = { name: algorithm.name + " " + keyType, algorithm: algorithm, usages: keyUsages, key: importedKey, originalExport: {} }; + } catch (err) { + delete keys[algorithm.name][format]; + throw("Error importing " + algorithm.name + " " + keyType + " key in '" + format + "' -" + err.name + ': "' + err.message + '"'); + }; + try { + var exportedKey = await subtle.exportKey(format, importedKey); + keys[algorithm.name][format].originalExport[format] = exportedKey; + } catch (err) { + delete keys[algorithm.name][format]; + throw("Error exporting " + algorithm.name + " '" + format + "' key -" + err.name + ': "' + err.message + '"'); + }; + try { + var jwkExportedKey = await subtle.exportKey("jwk", importedKey); + keys[algorithm.name][format].originalExport["jwk"] = jwkExportedKey; + } catch (err) { + delete keys[algorithm.name][format]; + throw("Error exporting " + algorithm.name + " '" + format + "' key to 'jwk' -" + err.name + ': "' + err.message + '"'); + }; + } - function generateKeysToWrap() { - var parameters = [ - {algorithm: {name: "RSASSA-PKCS1-v1_5", modulusLength: 1024, publicExponent: new Uint8Array([1,0,1]), hash: "SHA-256"}, privateUsages: ["sign"], publicUsages: ["verify"]}, - {algorithm: {name: "RSA-PSS", modulusLength: 1024, publicExponent: new Uint8Array([1,0,1]), hash: "SHA-256"}, privateUsages: ["sign"], publicUsages: ["verify"]}, - {algorithm: {name: "RSA-OAEP", modulusLength: 1024, publicExponent: new Uint8Array([1,0,1]), hash: "SHA-256"}, privateUsages: ["decrypt"], publicUsages: ["encrypt"]}, - {algorithm: {name: "ECDSA", namedCurve: "P-256"}, privateUsages: ["sign"], publicUsages: ["verify"]}, - {algorithm: {name: "ECDH", namedCurve: "P-256"}, privateUsages: ["deriveBits"], publicUsages: []}, - {algorithm: {name: "Ed25519" }, privateUsages: ["sign"], publicUsages: ["verify"]}, - {algorithm: {name: "Ed448" }, privateUsages: ["sign"], publicUsages: ["verify"]}, - {algorithm: {name: "X25519" }, privateUsages: ["deriveBits"], publicUsages: []}, - {algorithm: {name: "X448" }, privateUsages: ["deriveBits"], publicUsages: []}, - {algorithm: {name: "AES-CTR", length: 128}, usages: ["encrypt", "decrypt"]}, - {algorithm: {name: "AES-CBC", length: 128}, usages: ["encrypt", "decrypt"]}, - {algorithm: {name: "AES-GCM", length: 128}, usages: ["encrypt", "decrypt"]}, - {algorithm: {name: "AES-KW", length: 128}, usages: ["wrapKey", "unwrapKey"]}, - {algorithm: {name: "HMAC", length: 128, hash: "SHA-256"}, usages: ["sign", "verify"]} - ]; - - // Using allSettled to skip unsupported test cases. - return Promise.allSettled(parameters.map(function(params) { - var usages; - if ("usages" in params) { - usages = params.usages; + function importKeysToWrap() { + var promises = []; + keysToWrapParameters.forEach(function(params) { + if ("publicUsages" in params) { + keys[params.algorithm.name] = {}; + var keyData = toWrapKeyDataFromAlg(params.algorithm.name); + promises.push(importAndExport("spki", keyData.spki, params.algorithm, params.publicUsages, "public key ")); + promises.push(importAndExport("pkcs8", keyData.pkcs8, params.algorithm, params.privateUsages, "private key ")); } else { - usages = params.publicUsages.concat(params.privateUsages); + keys[params.algorithm.name] = {}; + promises.push(importAndExport("raw", toWrapKeyData["SYMMETRIC"].raw, params.algorithm, params.usages, "")); } - - return subtle.generateKey(params.algorithm, true, usages) - .then(function(result) { - if (result.constructor === CryptoKey) { - keys.push({name: params.algorithm.name, algorithm: params.algorithm, usages: params.usages, key: result}); - } else { - keys.push({name: params.algorithm.name + " public key", algorithm: params.algorithm, usages: params.publicUsages, key: result.publicKey}); - keys.push({name: params.algorithm.name + " private key", algorithm: params.algorithm, usages: params.privateUsages, key: result.privateKey}); - } - return true; - }); - })); + }); + // Using allSettled to skip unsupported test cases. + return Promise.allSettled(promises); } // Can we successfully "round-trip" (wrap, then unwrap, a key)? - function testWrapping(wrapper, toWrap) { - var formats; + function testWrapping(wrapper, toWrap, fmt) { + promise_test(async() => { + try { + var wrappedResult = await subtle.wrapKey(fmt, toWrap.key, wrapper.wrappingKey, wrapper.parameters.wrapParameters); + var unwrappedResult = await subtle.unwrapKey(fmt, wrappedResult, wrapper.unwrappingKey, wrapper.parameters.wrapParameters, toWrap.algorithm, true, toWrap.usages); + assert_goodCryptoKey(unwrappedResult, toWrap.algorithm, true, toWrap.usages, toWrap.key.type); + var roundTripExport = await subtle.exportKey(fmt, unwrappedResult); + assert_true(equalExport(toWrap.originalExport[fmt], roundTripExport), "Post-wrap export matches original export"); + } catch (err) { + if (err instanceof AssertionError) { + throw err; + } + assert_unreached("Round trip for extractable key threw an error - " + err.name + ': "' + err.message + '"'); + } + }, "Can wrap and unwrap " + toWrap.name + "keys using " + fmt + " and " + wrapper.parameters.name); + } - if (toWrap.name.includes("private")) { - formats = ["pkcs8", "jwk"]; - } else if (toWrap.name.includes("public")) { - formats = ["spki", "jwk"] - } else { - formats = ["raw", "jwk"] - } + function testWrappingNonExtractable(wrapper, toWrap, fmt) { + promise_test(async() => { + try { + var wrappedResult = await subtle.wrapKey(fmt, toWrap.key, wrapper.wrappingKey, wrapper.parameters.wrapParameters); + var unwrappedResult = await subtle.unwrapKey(fmt, wrappedResult, wrapper.unwrappingKey, wrapper.parameters.wrapParameters, toWrap.algorithm, false, toWrap.usages); + assert_goodCryptoKey(unwrappedResult, toWrap.algorithm, false, toWrap.usages, toWrap.key.type); + var result = await equalKeys(toWrap.key, unwrappedResult); + assert_true(result, "Unwrapped key matches original"); + } catch (err) { + if (err instanceof AssertionError) { + throw err; + } + assert_unreached("Round trip for key unwrapped non-extractable threw an error - " + err.name + ': "' + err.message + '"'); + }; + }, "Can wrap and unwrap " + toWrap.name + "keys as non-extractable using " + fmt + " and " + wrapper.parameters.name); + } - return Promise.all(formats.map(function(fmt) { - var originalExport; - return subtle.exportKey(fmt, toWrap.key).then(function(exportedKey) { - originalExport = exportedKey; - const isPossible = wrappingIsPossible(originalExport, wrapper.parameters.name); - promise_test(function(test) { - if (!isPossible) { - return Promise.resolve().then(() => { - assert_false(false, "Wrapping is not possible"); - }) - } - return subtle.wrapKey(fmt, toWrap.key, wrapper.wrappingKey, wrapper.parameters.wrapParameters) - .then(function(wrappedResult) { - return subtle.unwrapKey(fmt, wrappedResult, wrapper.unwrappingKey, wrapper.parameters.wrapParameters, toWrap.algorithm, true, toWrap.usages); - }).then(function(unwrappedResult) { - assert_goodCryptoKey(unwrappedResult, toWrap.algorithm, true, toWrap.usages, toWrap.key.type); - return subtle.exportKey(fmt, unwrappedResult) - }).then(function(roundTripExport) { - assert_true(equalExport(originalExport, roundTripExport), "Post-wrap export matches original export"); - }, function(err) { - assert_unreached("Round trip for extractable key threw an error - " + err.name + ': "' + err.message + '"'); - }); - }, "Can wrap and unwrap " + toWrap.name + " keys using " + fmt + " and " + wrapper.parameters.name); - - if (canCompareNonExtractableKeys(toWrap.key)) { - promise_test(function(test){ - if (!isPossible) { - return Promise.resolve().then(() => { - assert_false(false, "Wrapping is not possible"); - }) - } - return subtle.wrapKey(fmt, toWrap.key, wrapper.wrappingKey, wrapper.parameters.wrapParameters) - .then(function(wrappedResult) { - return subtle.unwrapKey(fmt, wrappedResult, wrapper.unwrappingKey, wrapper.parameters.wrapParameters, toWrap.algorithm, false, toWrap.usages); - }).then(function(unwrappedResult){ - assert_goodCryptoKey(unwrappedResult, toWrap.algorithm, false, toWrap.usages, toWrap.key.type); - return equalKeys(toWrap.key, unwrappedResult); - }).then(function(result){ - assert_true(result, "Unwrapped key matches original"); - }).catch(function(err){ - assert_unreached("Round trip for key unwrapped non-extractable threw an error - " + err.name + ': "' + err.message + '"'); - }); - }, "Can wrap and unwrap " + toWrap.name + " keys as non-extractable using " + fmt + " and " + wrapper.parameters.name); - - if (fmt === "jwk") { - promise_test(function(test){ - if (!isPossible) { - return Promise.resolve().then(() => { - assert_false(false, "Wrapping is not possible"); - }) - } - var wrappedKey; - return wrapAsNonExtractableJwk(toWrap.key,wrapper).then(function(wrappedResult){ - wrappedKey = wrappedResult; - return subtle.unwrapKey("jwk", wrappedKey, wrapper.unwrappingKey, wrapper.parameters.wrapParameters, toWrap.algorithm, false, toWrap.usages); - }).then(function(unwrappedResult){ - assert_false(unwrappedResult.extractable, "Unwrapped key is non-extractable"); - return equalKeys(toWrap.key,unwrappedResult); - }).then(function(result){ - assert_true(result, "Unwrapped key matches original"); - }).catch(function(err){ - assert_unreached("Round trip for non-extractable key threw an error - " + err.name + ': "' + err.message + '"'); - }).then(function(){ - return subtle.unwrapKey("jwk", wrappedKey, wrapper.unwrappingKey, wrapper.parameters.wrapParameters, toWrap.algorithm, true, toWrap.usages); - }).then(function(unwrappedResult){ - assert_unreached("Unwrapping a non-extractable JWK as extractable should fail"); - }).catch(function(err){ - assert_equals(err.name, "DataError", "Unwrapping a non-extractable JWK as extractable fails with DataError"); - }); - }, "Can unwrap " + toWrap.name + " non-extractable keys using jwk and " + wrapper.parameters.name); - } + function testWrappingNonExtractableAsExtractable(wrapper, toWrap) { + promise_test(async() => { + var wrappedKey; + try { + var wrappedResult = await wrapAsNonExtractableJwk(toWrap.key,wrapper); + wrappedKey = wrappedResult; + var unwrappedResult = await subtle.unwrapKey("jwk", wrappedKey, wrapper.unwrappingKey, wrapper.parameters.wrapParameters, toWrap.algorithm, false, toWrap.usages); + assert_false(unwrappedResult.extractable, "Unwrapped key is non-extractable"); + var result = await equalKeys(toWrap.key,unwrappedResult); + assert_true(result, "Unwrapped key matches original"); + } catch (err) { + if (err instanceof AssertionError) { + throw err; } - }); - })); + assert_unreached("Round trip for non-extractable key threw an error - " + err.name + ': "' + err.message + '"'); + }; + try { + var unwrappedResult = await subtle.unwrapKey("jwk", wrappedKey, wrapper.unwrappingKey, wrapper.parameters.wrapParameters, toWrap.algorithm, true, toWrap.usages); + assert_unreached("Unwrapping a non-extractable JWK as extractable should fail"); + } catch (err) { + if (err instanceof AssertionError) { + throw err; + } + assert_equals(err.name, "DataError", "Unwrapping a non-extractable JWK as extractable fails with DataError"); + } + }, "Can unwrap " + toWrap.name + "non-extractable keys using jwk and " + wrapper.parameters.name); } // Implement key wrapping by hand to wrap a key as non-extractable JWK - function wrapAsNonExtractableJwk(key, wrapper){ + async function wrapAsNonExtractableJwk(key, wrapper) { var wrappingKey = wrapper.wrappingKey, encryptKey; - return subtle.exportKey("jwk",wrappingKey) - .then(function(jwkWrappingKey){ - // Update the key generation parameters to work as key import parameters - var params = Object.create(wrapper.parameters.generateParameters); - if(params.name === "AES-KW") { - params.name = "AES-CBC"; - jwkWrappingKey.alg = "A"+params.length+"CBC"; - } else if (params.name === "RSA-OAEP") { - params.modulusLength = undefined; - params.publicExponent = undefined; - } - jwkWrappingKey.key_ops = ["encrypt"]; - return subtle.importKey("jwk", jwkWrappingKey, params, true, ["encrypt"]); - }).then(function(importedWrappingKey){ - encryptKey = importedWrappingKey; - return subtle.exportKey("jwk",key); - }).then(function(exportedKey){ - exportedKey.ext = false; - var jwk = JSON.stringify(exportedKey) - if (wrappingKey.algorithm.name === "AES-KW") { - return aeskw(encryptKey, str2ab(jwk.slice(0,-1) + " ".repeat(jwk.length%8 ? 8-jwk.length%8 : 0) + "}")); - } else { - return subtle.encrypt(wrapper.parameters.wrapParameters,encryptKey,str2ab(jwk)); - } - }); + var jwkWrappingKey = await subtle.exportKey("jwk",wrappingKey); + // Update the key generation parameters to work as key import parameters + var params = Object.create(wrapper.parameters.importParameters); + if(params.name === "AES-KW") { + params.name = "AES-CBC"; + jwkWrappingKey.alg = "A"+params.length+"CBC"; + } else if (params.name === "RSA-OAEP") { + params.modulusLength = undefined; + params.publicExponent = undefined; + } + jwkWrappingKey.key_ops = ["encrypt"]; + var importedWrappingKey = await subtle.importKey("jwk", jwkWrappingKey, params, true, ["encrypt"]); + encryptKey = importedWrappingKey; + var exportedKey = await subtle.exportKey("jwk",key); + exportedKey.ext = false; + var jwk = JSON.stringify(exportedKey) + var result; + if (wrappingKey.algorithm.name === "AES-KW") { + result = await aeskw(encryptKey, str2ab(jwk.slice(0,-1) + " ".repeat(jwk.length%8 ? 8-jwk.length%8 : 0) + "}")); + } else { + result = await subtle.encrypt(wrapper.parameters.wrapParameters,encryptKey,str2ab(jwk)); + } + return result; } @@ -365,9 +383,9 @@ } // Compare two keys by using them (works for non-extractable keys) - function equalKeys(expected, got){ + async function equalKeys(expected, got){ if ( expected.algorithm.name !== got.algorithm.name ) { - return Promise.resolve(false); + return false; } var cryptParams, signParams, wrapParams, deriveParams; @@ -419,75 +437,60 @@ } if (cryptParams) { - return subtle.exportKey("jwk",expected) - .then(function(jwkExpectedKey){ - if (expected.algorithm.name === "RSA-OAEP") { - ["d","p","q","dp","dq","qi","oth"].forEach(function(field){ delete jwkExpectedKey[field]; }); - } - jwkExpectedKey.key_ops = ["encrypt"]; - return subtle.importKey("jwk", jwkExpectedKey, expected.algorithm, true, ["encrypt"]); - }).then(function(expectedEncryptKey){ - return subtle.encrypt(cryptParams, expectedEncryptKey, new Uint8Array(32)); - }).then(function(encryptedData){ - return subtle.decrypt(cryptParams, got, encryptedData); - }).then(function(decryptedData){ - var result = new Uint8Array(decryptedData); - return !result.some(x => x); - }); + var jwkExpectedKey = await subtle.exportKey("jwk", expected); + if (expected.algorithm.name === "RSA-OAEP") { + ["d","p","q","dp","dq","qi","oth"].forEach(function(field){ delete jwkExpectedKey[field]; }); + } + jwkExpectedKey.key_ops = ["encrypt"]; + var expectedEncryptKey = await subtle.importKey("jwk", jwkExpectedKey, expected.algorithm, true, ["encrypt"]); + var encryptedData = await subtle.encrypt(cryptParams, expectedEncryptKey, new Uint8Array(32)); + var decryptedData = await subtle.decrypt(cryptParams, got, encryptedData); + var result = new Uint8Array(decryptedData); + return !result.some(x => x); } else if (signParams) { var verifyKey; - return subtle.exportKey("jwk",expected) - .then(function(jwkExpectedKey){ - if (expected.algorithm.name === "RSA-PSS" || expected.algorithm.name === "RSASSA-PKCS1-v1_5") { - ["d","p","q","dp","dq","qi","oth"].forEach(function(field){ delete jwkExpectedKey[field]; }); - } - if (expected.algorithm.name === "ECDSA" || expected.algorithm.name.startsWith("Ed")) { - delete jwkExpectedKey["d"]; - } - jwkExpectedKey.key_ops = ["verify"]; - return subtle.importKey("jwk", jwkExpectedKey, expected.algorithm, true, ["verify"]); - }).then(function(expectedVerifyKey){ - verifyKey = expectedVerifyKey; - return subtle.sign(signParams, got, new Uint8Array(32)); - }).then(function(signature){ - return subtle.verify(signParams, verifyKey, signature, new Uint8Array(32)); - }); + var jwkExpectedKey = await subtle.exportKey("jwk",expected); + if (expected.algorithm.name === "RSA-PSS" || expected.algorithm.name === "RSASSA-PKCS1-v1_5") { + ["d","p","q","dp","dq","qi","oth"].forEach(function(field){ delete jwkExpectedKey[field]; }); + } + if (expected.algorithm.name === "ECDSA" || expected.algorithm.name.startsWith("Ed")) { + delete jwkExpectedKey["d"]; + } + jwkExpectedKey.key_ops = ["verify"]; + var expectedVerifyKey = await subtle.importKey("jwk", jwkExpectedKey, expected.algorithm, true, ["verify"]); + verifyKey = expectedVerifyKey; + var signature = await subtle.sign(signParams, got, new Uint8Array(32)); + var result = await subtle.verify(signParams, verifyKey, signature, new Uint8Array(32)); + return result; } else if (wrapParams) { var aKeyToWrap, wrappedWithExpected; - return subtle.importKey("raw", new Uint8Array(16), "AES-CBC", true, ["encrypt"]) - .then(function(key){ - aKeyToWrap = key; - return subtle.wrapKey("raw", aKeyToWrap, expected, wrapParams); - }).then(function(wrapResult){ - wrappedWithExpected = Array.from((new Uint8Array(wrapResult)).values()); - return subtle.wrapKey("raw", aKeyToWrap, got, wrapParams); - }).then(function(wrapResult){ - var wrappedWithGot = Array.from((new Uint8Array(wrapResult)).values()); - return wrappedWithGot.every((x,i) => x === wrappedWithExpected[i]); - }); + var key = await subtle.importKey("raw",new Uint8Array(16), "AES-CBC", true, ["encrypt"]) + aKeyToWrap = key; + var wrapResult = await subtle.wrapKey("raw", aKeyToWrap, expected, wrapParams); + wrappedWithExpected = Array.from((new Uint8Array(wrapResult)).values()); + wrapResult = await subtle.wrapKey("raw", aKeyToWrap, got, wrapParams); + var wrappedWithGot = Array.from((new Uint8Array(wrapResult)).values()); + return wrappedWithGot.every((x,i) => x === wrappedWithExpected[i]); } else if (deriveParams) { var expectedDerivedBits; - return subtle.generateKey(expected.algorithm, true, ['deriveBits']).then(({ publicKey }) => { - deriveParams.public = publicKey; - return subtle.deriveBits(deriveParams, expected, 128) - }) - .then(function(result){ - expectedDerivedBits = Array.from((new Uint8Array(result)).values()); - return subtle.deriveBits(deriveParams, got, 128); - }).then(function(result){ - var gotDerivedBits = Array.from((new Uint8Array(result)).values()); - return gotDerivedBits.every((x,i) => x === expectedDerivedBits[i]); - }); + var key = await subtle.generateKey(expected.algorithm, true, ['deriveBits']); + deriveParams.public = key.publicKey; + var result = await subtle.deriveBits(deriveParams, expected, 128); + expectedDerivedBits = Array.from((new Uint8Array(result)).values()); + result = await subtle.deriveBits(deriveParams, got, 128); + var gotDerivedBits = Array.from((new Uint8Array(result)).values()); + return gotDerivedBits.every((x,i) => x === expectedDerivedBits[i]); } } // Raw AES encryption - function aes( k, p ) { - return subtle.encrypt({name: "AES-CBC", iv: new Uint8Array(16) }, k, p).then(function(ciphertext){return ciphertext.slice(0,16);}); + async function aes(k, p) { + const ciphertext = await subtle.encrypt({ name: "AES-CBC", iv: new Uint8Array(16) }, k, p); + return ciphertext.slice(0, 16); } // AES Key Wrap - function aeskw(key, data) { + async function aeskw(key, data) { if (data.byteLength % 8 !== 0) { throw new Error("AES Key Wrap data must be a multiple of 8 bytes in length"); } @@ -501,7 +504,7 @@ R.push(new Uint8Array(data.slice(i,i+8))); } - function aeskw_step(j, i, final, B) { + async function aeskw_step(j, i, final, B) { A.set(new Uint8Array(B.slice(0,8))); Av.setUint32(4,Av.getUint32(4) ^ (n*j+i+1)); R[i] = new Uint8Array(B.slice(8,16)); @@ -516,18 +519,16 @@ } } - var p = new Promise(function(resolve){ - A.set(R[0],8); - resolve(aes(key,A)); - }); + A.set(R[0], 8); + let B = await aes(key, A); for(var j=0;j<6;++j) { for(var i=0;i - + diff --git a/test/fixtures/wpt/url/a-element-origin.html b/test/fixtures/wpt/url/a-element-origin.html index 9cc8e94cbed060..7015f853f01a1d 100644 --- a/test/fixtures/wpt/url/a-element-origin.html +++ b/test/fixtures/wpt/url/a-element-origin.html @@ -5,4 +5,8 @@
    - + diff --git a/test/fixtures/wpt/url/a-element-xhtml.xhtml b/test/fixtures/wpt/url/a-element-xhtml.xhtml index 05bec4ce4b2f1e..610481a7819d62 100644 --- a/test/fixtures/wpt/url/a-element-xhtml.xhtml +++ b/test/fixtures/wpt/url/a-element-xhtml.xhtml @@ -17,4 +17,8 @@ - + diff --git a/test/fixtures/wpt/url/a-element.html b/test/fixtures/wpt/url/a-element.html index 3428fa00574c4d..a7621d2ded76c4 100644 --- a/test/fixtures/wpt/url/a-element.html +++ b/test/fixtures/wpt/url/a-element.html @@ -10,7 +10,11 @@
    - + Link with embedded \n is parsed correctly diff --git a/test/fixtures/wpt/url/failure.html b/test/fixtures/wpt/url/failure.html index e61f462f97456f..d95b1d52d67237 100644 --- a/test/fixtures/wpt/url/failure.html +++ b/test/fixtures/wpt/url/failure.html @@ -6,7 +6,10 @@