diff --git a/CHANGELOG.md b/CHANGELOG.md index d1dd26e106b91a..3c83abdaa7a603 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,7 +27,8 @@ release. -8.11.1
+8.11.2
+8.11.1
8.11.0
8.10.0
8.9.4
diff --git a/COLLABORATOR_GUIDE.md b/COLLABORATOR_GUIDE.md index 584e598c2ff38c..08b506f45683b8 100644 --- a/COLLABORATOR_GUIDE.md +++ b/COLLABORATOR_GUIDE.md @@ -593,20 +593,20 @@ Validate that the commit message is properly formatted using $ git rev-list upstream/master...HEAD | xargs core-validate-commit ``` +Optional: When landing your own commits, force push the amended commit to the +branch you used to open the pull request. If your branch is called `bugfix`, +then the command would be `git push --force-with-lease origin master:bugfix`. +When the pull request is closed, this will cause the pull request to +show the purple merged status rather than the red closed status that is +usually used for pull requests that weren't merged. + Time to push it: ```text $ git push upstream master ``` -* Optional: Force push the amended commit to the branch you used to -open the pull request. If your branch is called `bugfix`, then the -command would be `git push --force-with-lease origin master:bugfix`. -When the pull request is closed, this will cause the pull request to -show the purple merged status rather than the red closed status that is -usually used for pull requests that weren't merged. Only do this when -landing your own contributions. -* Close the pull request with a "Landed in ``" comment. If +Close the pull request with a "Landed in ``" comment. If your pull request shows the purple merged status then you should still add the "Landed in .." comment if you added multiple commits. diff --git a/Makefile b/Makefile index 9ea385e46a362d..2c62bf29cd2334 100644 --- a/Makefile +++ b/Makefile @@ -558,7 +558,7 @@ doc-only: $(apidoc_dirs) $(apiassets) if [ ! -d doc/api/assets ]; then \ $(MAKE) tools/doc/node_modules/js-yaml/package.json; \ fi; - @$(MAKE) -s $(apidocs_html) $(apidocs_json) + @$(MAKE) $(apidocs_html) $(apidocs_json) doc: $(NODE_EXE) doc-only @@ -1008,26 +1008,31 @@ lint-md-clean: lint-md-build: @if [ ! -d tools/remark-cli/node_modules ]; then \ echo "Markdown linter: installing remark-cli into tools/"; \ - cd tools/remark-cli && ../../$(NODE) ../../$(NPM) install; fi + cd tools/remark-cli && $(call available-node,$(run-npm-install)) fi @if [ ! -d tools/remark-preset-lint-node/node_modules ]; then \ echo "Markdown linter: installing remark-preset-lint-node into tools/"; \ - cd tools/remark-preset-lint-node && ../../$(NODE) ../../$(NPM) install; fi + cd tools/remark-preset-lint-node && $(call available-node,$(run-npm-install)) fi + ifneq ("","$(wildcard tools/remark-cli/node_modules/)") -LINT_MD_TARGETS = src lib benchmark tools/doc tools/icu -LINT_MD_ROOT_DOCS := $(wildcard *.md) -LINT_MD_FILES := $(shell find $(LINT_MD_TARGETS) -type f \ - -not -path '*node_modules*' -name '*.md') $(LINT_MD_ROOT_DOCS) -LINT_DOC_MD_FILES = $(shell ls doc/**/*.md) -tools/.docmdlintstamp: $(LINT_DOC_MD_FILES) +LINT_MD_DOC_FILES = $(shell ls doc/**/*.md) +run-lint-doc-md = tools/remark-cli/cli.js -q -f $(LINT_MD_DOC_FILES) +# Lint all changed markdown files under doc/ +tools/.docmdlintstamp: $(LINT_MD_DOC_FILES) @echo "Running Markdown linter on docs..." - @$(NODE) tools/remark-cli/cli.js -q -f $(LINT_DOC_MD_FILES) + @$(call available-node,$(run-lint-doc-md)) @touch $@ -tools/.miscmdlintstamp: $(LINT_MD_FILES) +LINT_MD_TARGETS = src lib benchmark tools/doc tools/icu +LINT_MD_ROOT_DOCS := $(wildcard *.md) +LINT_MD_MISC_FILES := $(shell find $(LINT_MD_TARGETS) -type f \ + -not -path '*node_modules*' -name '*.md') $(LINT_MD_ROOT_DOCS) +run-lint-misc-md = tools/remark-cli/cli.js -q -f $(LINT_MD_MISC_FILES) +# Lint other changed markdown files maintained by us +tools/.miscmdlintstamp: $(LINT_MD_MISC_FILES) @echo "Running Markdown linter on misc docs..." - @$(NODE) tools/remark-cli/cli.js -q -f $(LINT_MD_FILES) + @$(call available-node,$(run-lint-misc-md)) @touch $@ tools/.mdlintstamp: tools/.miscmdlintstamp tools/.docmdlintstamp @@ -1040,37 +1045,29 @@ lint-md: endif LINT_JS_TARGETS = benchmark doc lib test tools -LINT_JS_CMD = tools/eslint/bin/eslint.js --cache \ - --rulesdir=tools/eslint-rules --ext=.js,.mjs,.md \ - $(LINT_JS_TARGETS) + +run-lint-js = tools/eslint/bin/eslint.js --cache \ + --rulesdir=tools/eslint-rules --ext=.js,.mjs,.md $(LINT_JS_TARGETS) +run-lint-js-fix = $(run-lint-js) --fix lint-js-fix: - @if [ -x $(NODE) ]; then \ - $(NODE) $(LINT_JS_CMD) --fix; \ - else \ - node $(LINT_JS_CMD) --fix; \ - fi + @$(call available-node,$(run-lint-js-fix)) lint-js: @echo "Running JS linter..." - @if [ -x $(NODE) ]; then \ - $(NODE) $(LINT_JS_CMD); \ - else \ - node $(LINT_JS_CMD); \ - fi + @$(call available-node,$(run-lint-js)) jslint: lint-js @echo "Please use lint-js instead of jslint" +run-lint-js-ci = tools/lint-js.js $(PARALLEL_ARGS) -f tap -o test-eslint.tap \ + $(LINT_JS_TARGETS) + +.PHONY: lint-js-ci +# On the CI the output is emitted in the TAP format. lint-js-ci: @echo "Running JS linter..." - @if [ -x $(NODE) ]; then \ - $(NODE) tools/lint-js.js $(PARALLEL_ARGS) -f tap -o test-eslint.tap \ - $(LINT_JS_TARGETS); \ - else \ - node tools/lint-js.js $(PARALLEL_ARGS) -f tap -o test-eslint.tap \ - $(LINT_JS_TARGETS); \ - fi + @$(call available-node,$(run-lint-js-ci)) jslint-ci: lint-js-ci @echo "Please use lint-js-ci instead of jslint-ci" @@ -1128,6 +1125,7 @@ lint: ## Run JS, C++, MD and doc linters. $(MAKE) lint-cpp || EXIT_STATUS=$$? ; \ $(MAKE) lint-md || EXIT_STATUS=$$? ; \ $(MAKE) lint-addon-docs || EXIT_STATUS=$$? ; \ + $(MAKE) lint-md || EXIT_STATUS=$$? ; \ exit $$EXIT_STATUS CONFLICT_RE=^>>>>>>> [0-9A-Fa-f]+|^<<<<<<< [A-Za-z]+ lint-ci: lint-js-ci lint-cpp lint-md lint-addon-docs diff --git a/README.md b/README.md index 89f9e3998a2827..e537407c8685bd 100644 --- a/README.md +++ b/README.md @@ -7,10 +7,9 @@

-Node.js is a JavaScript runtime built on Chrome's V8 JavaScript engine. Node.js -uses an event-driven, non-blocking I/O model that makes it lightweight and -efficient. The Node.js package ecosystem, [npm][], is the largest ecosystem of -open source libraries in the world. +Node.js is a JavaScript runtime built on Chrome's V8 JavaScript engine. For +more information on using Node.js, see the +[Node.js Website][]. The Node.js project is supported by the [Node.js Foundation](https://nodejs.org/en/foundation/). Contributions, @@ -245,8 +244,8 @@ For more information about the governance of the Node.js project, see **Franziska Hinkelmann** <franziska.hinkelmann@gmail.com> (she/her) * [Fishrock123](https://github.com/Fishrock123) - **Jeremiah Senkpiel** <fishrock123@rocketmail.com> -* [indutny](https://github.com/indutny) - -**Fedor Indutny** <fedor.indutny@gmail.com> +* [gibfahn](https://github.com/gibfahn) - +**Gibson Fahnestock** <gibfahn@gmail.com> (he/him) * [jasnell](https://github.com/jasnell) - **James M Snell** <jasnell@gmail.com> (he/him) * [joyeecheung](https://github.com/joyeecheung) - @@ -255,8 +254,6 @@ For more information about the governance of the Node.js project, see **Matteo Collina** <matteo.collina@gmail.com> (he/him) * [mhdawson](https://github.com/mhdawson) - **Michael Dawson** <michael_dawson@ca.ibm.com> (he/him) -* [mscdex](https://github.com/mscdex) - -**Brian White** <mscdex@mscdex.net> * [MylesBorins](https://github.com/MylesBorins) - **Myles Borins** <myles.borins@gmail.com> (he/him) * [ofrobots](https://github.com/ofrobots) - @@ -278,10 +275,14 @@ For more information about the governance of the Node.js project, see **Ben Noordhuis** <info@bnoordhuis.nl> * [chrisdickinson](https://github.com/chrisdickinson) - **Chris Dickinson** <christopher.s.dickinson@gmail.com> +* [indutny](https://github.com/indutny) - +**Fedor Indutny** <fedor.indutny@gmail.com> * [isaacs](https://github.com/isaacs) - **Isaac Z. Schlueter** <i@izs.me> * [joshgav](https://github.com/joshgav) - **Josh Gavant** <josh.gavant@outlook.com> +* [mscdex](https://github.com/mscdex) - +**Brian White** <mscdex@mscdex.net> * [nebrius](https://github.com/nebrius) - **Bryan Hughes** <bryan@nebri.us> * [orangemocha](https://github.com/orangemocha) - @@ -339,6 +340,8 @@ For more information about the governance of the Node.js project, see **Daniel Bevenius** <daniel.bevenius@gmail.com> * [DavidCai1993](https://github.com/DavidCai1993) - **David Cai** <davidcai1993@yahoo.com> (he/him) +* [devsnek](https://github.com/devsnek) - +**Gus Caplan** <me@gus.host> (he/him) * [edsadr](https://github.com/edsadr) - **Adrian Estrada** <edsadr@gmail.com> (he/him) * [eljefedelrodeodeljefe](https://github.com/eljefedelrodeodeljefe) - @@ -423,6 +426,8 @@ For more information about the governance of the Node.js project, see **Mikeal Rogers** <mikeal.rogers@gmail.com> * [misterdjules](https://github.com/misterdjules) - **Julien Gilli** <jgilli@nodejs.org> +* [mmarchini](https://github.com/mmarchini) - +**Matheus Marchini** <matheus@sthima.com> * [mscdex](https://github.com/mscdex) - **Brian White** <mscdex@mscdex.net> * [MylesBorins](https://github.com/MylesBorins) - @@ -590,12 +595,13 @@ Previous releases may also have been signed with one of the following GPG keys: * [Contributing to the project][] * [Working Groups][] +* [Strategic Initiatives][] -[npm]: https://www.npmjs.com [Code of Conduct]: https://github.com/nodejs/admin/blob/master/CODE_OF_CONDUCT.md [Contributing to the project]: CONTRIBUTING.md [Node.js Help]: https://github.com/nodejs/help [Node.js Website]: https://nodejs.org/en/ [Questions tagged 'node.js' on StackOverflow]: https://stackoverflow.com/questions/tagged/node.js [Working Groups]: https://github.com/nodejs/TSC/blob/master/WORKING_GROUPS.md +[Strategic Initiatives]: https://github.com/nodejs/TSC/blob/master/Strategic-Initiatives.md [#node.js channel on chat.freenode.net]: https://webchat.freenode.net?channels=node.js&uio=d4 diff --git a/benchmark/compare.js b/benchmark/compare.js index 6b51a70eb9a41b..e7866b60e36418 100644 --- a/benchmark/compare.js +++ b/benchmark/compare.js @@ -1,6 +1,7 @@ 'use strict'; -const fork = require('child_process').fork; +const { fork } = require('child_process'); +const { inspect } = require('util'); const path = require('path'); const CLI = require('./_cli.js'); const BenchmarkProgress = require('./_benchmark_progress.js'); @@ -76,7 +77,7 @@ if (showProgress) { // Construct configuration string, " A=a, B=b, ..." let conf = ''; for (const key of Object.keys(data.conf)) { - conf += ` ${key}=${JSON.stringify(data.conf[key])}`; + conf += ` ${key}=${inspect(data.conf[key])}`; } conf = conf.slice(1); // Escape quotes (") for correct csv formatting diff --git a/benchmark/http/http_server_for_chunky_client.js b/benchmark/http/http_server_for_chunky_client.js index f079544e03d48e..1e5a4583669c0f 100644 --- a/benchmark/http/http_server_for_chunky_client.js +++ b/benchmark/http/http_server_for_chunky_client.js @@ -2,22 +2,15 @@ const assert = require('assert'); const http = require('http'); -const fs = require('fs'); const { fork } = require('child_process'); const common = require('../common.js'); -const { PIPE, tmpDir } = require('../../test/common'); +const { PIPE } = require('../../test/common'); +const tmpdir = require('../../test/common/tmpdir'); process.env.PIPE_NAME = PIPE; -try { - fs.accessSync(tmpDir, fs.F_OK); -} catch (e) { - fs.mkdirSync(tmpDir); -} +tmpdir.refresh(); var server; -try { - fs.unlinkSync(process.env.PIPE_NAME); -} catch (e) { /* ignore */ } server = http.createServer(function(req, res) { const headers = { diff --git a/benchmark/misc/punycode.js b/benchmark/misc/punycode.js index 630aea3195f098..40bcd70302003c 100644 --- a/benchmark/misc/punycode.js +++ b/benchmark/misc/punycode.js @@ -1,11 +1,14 @@ 'use strict'; const common = require('../common.js'); -const icu = process.binding('icu'); +let icu; +try { + icu = process.binding('icu'); +} catch (err) {} const punycode = require('punycode'); const bench = common.createBenchmark(main, { - method: ['punycode', 'icu'], + method: ['punycode'].concat(icu !== undefined ? ['icu'] : []), n: [1024], val: [ 'افغانستا.icom.museum', @@ -69,8 +72,11 @@ function main(conf) { runPunycode(n, val); break; case 'icu': - runICU(n, val); - break; + if (icu !== undefined) { + runICU(n, val); + break; + } + // fallthrough default: throw new Error('Unexpected method'); } diff --git a/benchmark/module/module-loader.js b/benchmark/module/module-loader.js index cca5fc2c229038..a0b8f7b6892633 100644 --- a/benchmark/module/module-loader.js +++ b/benchmark/module/module-loader.js @@ -3,8 +3,8 @@ const fs = require('fs'); const path = require('path'); const common = require('../common.js'); -const { refreshTmpDir, tmpDir } = require('../../test/common'); -const benchmarkDirectory = path.join(tmpDir, 'nodejs-benchmark-module'); +const tmpdir = require('../../test/common/tmpdir'); +const benchmarkDirectory = path.join(tmpdir.path, 'nodejs-benchmark-module'); const bench = common.createBenchmark(main, { thousands: [50], @@ -15,7 +15,7 @@ const bench = common.createBenchmark(main, { function main(conf) { const n = +conf.thousands * 1e3; - refreshTmpDir(); + tmpdir.refresh(); try { fs.mkdirSync(benchmarkDirectory); } catch (e) {} for (var i = 0; i <= n; i++) { @@ -35,7 +35,7 @@ function main(conf) { else measureDir(n, conf.useCache === 'true'); - refreshTmpDir(); + tmpdir.refresh(); } function measureFull(n, useCache) { diff --git a/benchmark/tls/throughput.js b/benchmark/tls/throughput.js index 51feb85cbaccc1..52907a3343fb1f 100644 --- a/benchmark/tls/throughput.js +++ b/benchmark/tls/throughput.js @@ -45,11 +45,11 @@ function main(conf) { }; server = tls.createServer(options, onConnection); - setTimeout(done, dur * 1000); var conn; server.listen(common.PORT, function() { const opt = { port: common.PORT, rejectUnauthorized: false }; conn = tls.connect(opt, function() { + setTimeout(done, dur * 1000); bench.start(); conn.on('drain', write); write(); diff --git a/common.gypi b/common.gypi index 99351610eb447b..b89a8afff03a63 100644 --- a/common.gypi +++ b/common.gypi @@ -282,7 +282,7 @@ ], }], [ 'OS in "linux freebsd openbsd solaris aix"', { - 'cflags': [ '-pthread', ], + 'cflags': [ '-pthread' ], 'ldflags': [ '-pthread' ], }], [ 'OS in "linux freebsd openbsd solaris android aix"', { @@ -295,6 +295,7 @@ 'standalone_static_library': 1, }], ['OS=="openbsd"', { + 'cflags': [ '-I/usr/local/include' ], 'ldflags': [ '-Wl,-z,wxneeded' ], }], ], diff --git a/configure b/configure index 5a242a1afeec0e..e6b55c6da06f31 100755 --- a/configure +++ b/configure @@ -61,7 +61,7 @@ parser = optparse.OptionParser() valid_os = ('win', 'mac', 'solaris', 'freebsd', 'openbsd', 'linux', 'android', 'aix') valid_arch = ('arm', 'arm64', 'ia32', 'mips', 'mipsel', 'mips64el', 'ppc', - 'ppc64', 'x32','x64', 'x86', 's390', 's390x') + 'ppc64', 'x32','x64', 'x86', 'x86_64', 's390', 's390x') valid_arm_float_abi = ('soft', 'softfp', 'hard') valid_arm_fpu = ('vfp', 'vfpv3', 'vfpv3-d16', 'neon') valid_mips_arch = ('loongson', 'r1', 'r2', 'r6', 'rx') @@ -861,6 +861,9 @@ def configure_node(o): # the Makefile resets this to x86 afterward if target_arch == 'x86': target_arch = 'ia32' + # x86_64 is common across linuxes, allow it as an alias for x64 + if target_arch == 'x86_64': + target_arch = 'x64' o['variables']['host_arch'] = host_arch o['variables']['target_arch'] = target_arch o['variables']['node_byteorder'] = sys.byteorder @@ -878,7 +881,6 @@ def configure_node(o): configure_mips(o) if flavor == 'aix': - o['variables']['node_core_target_name'] = 'node_base' o['variables']['node_target_type'] = 'static_library' if target_arch in ('x86', 'x64', 'ia32', 'x32'): @@ -988,6 +990,13 @@ def configure_node(o): else: o['variables']['coverage'] = 'false' + if options.shared: + o['variables']['node_target_type'] = 'shared_library' + elif options.enable_static: + o['variables']['node_target_type'] = 'static_library' + else: + o['variables']['node_target_type'] = 'executable' + def configure_library(lib, output): shared_lib = 'shared_' + lib output['variables']['node_' + shared_lib] = b(getattr(options, shared_lib)) @@ -1484,8 +1493,7 @@ config = { 'BUILDTYPE': 'Debug' if options.debug else 'Release', 'USE_XCODE': str(int(options.use_xcode or 0)), 'PYTHON': sys.executable, - 'NODE_TARGET_TYPE': variables['node_target_type'] if options.enable_static \ - else '', + 'NODE_TARGET_TYPE': variables['node_target_type'], } if options.prefix: diff --git a/deps/nghttp2/lib/CMakeLists.txt b/deps/nghttp2/lib/CMakeLists.txt index 7ef37ed85cc628..0846d06789a0f1 100644 --- a/deps/nghttp2/lib/CMakeLists.txt +++ b/deps/nghttp2/lib/CMakeLists.txt @@ -44,6 +44,10 @@ set_target_properties(nghttp2 PROPERTIES VERSION ${LT_VERSION} SOVERSION ${LT_SOVERSION} C_VISIBILITY_PRESET hidden ) +target_include_directories(nghttp2 INTERFACE + "${CMAKE_CURRENT_BINARY_DIR}/includes" + "${CMAKE_CURRENT_SOURCE_DIR}/includes" + ) if(HAVE_CUNIT) # Static library (for unittests because of symbol visibility) diff --git a/deps/nghttp2/lib/includes/config.h b/deps/nghttp2/lib/includes/config.h index 0346e0614fdb8d..242bbcfb62ff7a 100644 --- a/deps/nghttp2/lib/includes/config.h +++ b/deps/nghttp2/lib/includes/config.h @@ -1,8 +1,18 @@ /* Hint to the compiler that a function never returns */ #define NGHTTP2_NORETURN -/* Define to `int' if does not define. */ -#define ssize_t int +/* Edited to match src/node.h. */ +#include + +#ifdef _WIN32 +#if !defined(_SSIZE_T_) && !defined(_SSIZE_T_DEFINED) +typedef intptr_t ssize_t; +# define _SSIZE_T_ +# define _SSIZE_T_DEFINED +#endif +#else // !_WIN32 +# include // size_t, ssize_t +#endif // _WIN32 /* Define to 1 if you have the `std::map::emplace`. */ #define HAVE_STD_MAP_EMPLACE 1 diff --git a/deps/nghttp2/lib/includes/nghttp2/nghttp2.h b/deps/nghttp2/lib/includes/nghttp2/nghttp2.h index 5696a2ef633653..13cda9f29e28f5 100644 --- a/deps/nghttp2/lib/includes/nghttp2/nghttp2.h +++ b/deps/nghttp2/lib/includes/nghttp2/nghttp2.h @@ -387,6 +387,11 @@ typedef enum { * Indicates that a processing was canceled. */ NGHTTP2_ERR_CANCEL = -535, + /** + * When a local endpoint expects to receive SETTINGS frame, it + * receives an other type of frame. + */ + NGHTTP2_ERR_SETTINGS_EXPECTED = -536, /** * The errors < :enum:`NGHTTP2_ERR_FATAL` mean that the library is * under unexpected condition and processing was terminated (e.g., @@ -1987,6 +1992,9 @@ typedef ssize_t (*nghttp2_pack_extension_callback)(nghttp2_session *session, * of length |len|. |len| does not include the sentinel NULL * character. * + * This function is deprecated. The new application should use + * :type:`nghttp2_error_callback2`. + * * The format of error message may change between nghttp2 library * versions. The application should not depend on the particular * format. @@ -2003,6 +2011,33 @@ typedef ssize_t (*nghttp2_pack_extension_callback)(nghttp2_session *session, typedef int (*nghttp2_error_callback)(nghttp2_session *session, const char *msg, size_t len, void *user_data); +/** + * @functypedef + * + * Callback function invoked when library provides the error code, and + * message. This callback is solely for debugging purpose. + * |lib_error_code| is one of error code defined in + * :enum:`nghttp2_error`. The |msg| is typically NULL-terminated + * string of length |len|, and intended for human consumption. |len| + * does not include the sentinel NULL character. + * + * The format of error message may change between nghttp2 library + * versions. The application should not depend on the particular + * format. + * + * Normally, application should return 0 from this callback. If fatal + * error occurred while doing something in this callback, application + * should return :enum:`NGHTTP2_ERR_CALLBACK_FAILURE`. In this case, + * library will return immediately with return value + * :enum:`NGHTTP2_ERR_CALLBACK_FAILURE`. Currently, if nonzero value + * is returned from this callback, they are treated as + * :enum:`NGHTTP2_ERR_CALLBACK_FAILURE`, but application should not + * rely on this details. + */ +typedef int (*nghttp2_error_callback2)(nghttp2_session *session, + int lib_error_code, const char *msg, + size_t len, void *user_data); + struct nghttp2_session_callbacks; /** @@ -2267,10 +2302,30 @@ nghttp2_session_callbacks_set_on_extension_chunk_recv_callback( * * Sets callback function invoked when library tells error message to * the application. + * + * This function is deprecated. The new application should use + * `nghttp2_session_callbacks_set_error_callback2()`. + * + * If both :type:`nghttp2_error_callback` and + * :type:`nghttp2_error_callback2` are set, the latter takes + * precedence. */ NGHTTP2_EXTERN void nghttp2_session_callbacks_set_error_callback( nghttp2_session_callbacks *cbs, nghttp2_error_callback error_callback); +/** + * @function + * + * Sets callback function invoked when library tells error code, and + * message to the application. + * + * If both :type:`nghttp2_error_callback` and + * :type:`nghttp2_error_callback2` are set, the latter takes + * precedence. + */ +NGHTTP2_EXTERN void nghttp2_session_callbacks_set_error_callback2( + nghttp2_session_callbacks *cbs, nghttp2_error_callback2 error_callback2); + /** * @functypedef * @@ -4702,8 +4757,8 @@ nghttp2_hd_deflate_change_table_size(nghttp2_hd_deflater *deflater, * * After this function returns, it is safe to delete the |nva|. * - * This function returns 0 if it succeeds, or one of the following - * negative error codes: + * This function returns the number of bytes written to |buf| if it + * succeeds, or one of the following negative error codes: * * :enum:`NGHTTP2_ERR_NOMEM` * Out of memory. @@ -4734,8 +4789,8 @@ NGHTTP2_EXTERN ssize_t nghttp2_hd_deflate_hd(nghttp2_hd_deflater *deflater, * * After this function returns, it is safe to delete the |nva|. * - * This function returns 0 if it succeeds, or one of the following - * negative error codes: + * This function returns the number of bytes written to |vec| if it + * succeeds, or one of the following negative error codes: * * :enum:`NGHTTP2_ERR_NOMEM` * Out of memory. diff --git a/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h b/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h index 38c48bf041f1e8..455706a5868b3a 100644 --- a/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h +++ b/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h @@ -29,7 +29,7 @@ * @macro * Version number of the nghttp2 library release */ -#define NGHTTP2_VERSION "1.25.0" +#define NGHTTP2_VERSION "1.29.0" /** * @macro @@ -37,6 +37,6 @@ * release. This is a 24 bit number with 8 bits for major number, 8 bits * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203. */ -#define NGHTTP2_VERSION_NUM 0x011900 +#define NGHTTP2_VERSION_NUM 0x011d00 #endif /* NGHTTP2VER_H */ diff --git a/deps/nghttp2/lib/nghttp2_buf.h b/deps/nghttp2/lib/nghttp2_buf.h index 06ab1e4c630cc3..9f484a221acb5f 100644 --- a/deps/nghttp2/lib/nghttp2_buf.h +++ b/deps/nghttp2/lib/nghttp2_buf.h @@ -398,7 +398,7 @@ int nghttp2_bufs_advance(nghttp2_bufs *bufs); void nghttp2_bufs_seek_last_present(nghttp2_bufs *bufs); /* - * Returns nonzero if bufs->cur->next is not emtpy. + * Returns nonzero if bufs->cur->next is not empty. */ int nghttp2_bufs_next_present(nghttp2_bufs *bufs); diff --git a/deps/nghttp2/lib/nghttp2_callbacks.c b/deps/nghttp2/lib/nghttp2_callbacks.c index b6cf5957f01b59..3c38214859b17a 100644 --- a/deps/nghttp2/lib/nghttp2_callbacks.c +++ b/deps/nghttp2/lib/nghttp2_callbacks.c @@ -168,3 +168,8 @@ void nghttp2_session_callbacks_set_error_callback( nghttp2_session_callbacks *cbs, nghttp2_error_callback error_callback) { cbs->error_callback = error_callback; } + +void nghttp2_session_callbacks_set_error_callback2( + nghttp2_session_callbacks *cbs, nghttp2_error_callback2 error_callback2) { + cbs->error_callback2 = error_callback2; +} diff --git a/deps/nghttp2/lib/nghttp2_callbacks.h b/deps/nghttp2/lib/nghttp2_callbacks.h index 5967524e0c6493..b607bbb58b8e3d 100644 --- a/deps/nghttp2/lib/nghttp2_callbacks.h +++ b/deps/nghttp2/lib/nghttp2_callbacks.h @@ -119,6 +119,7 @@ struct nghttp2_session_callbacks { nghttp2_unpack_extension_callback unpack_extension_callback; nghttp2_on_extension_chunk_recv_callback on_extension_chunk_recv_callback; nghttp2_error_callback error_callback; + nghttp2_error_callback2 error_callback2; }; #endif /* NGHTTP2_CALLBACKS_H */ diff --git a/deps/nghttp2/lib/nghttp2_frame.h b/deps/nghttp2/lib/nghttp2_frame.h index 891289f61bf5e7..35ca214a4a7a59 100644 --- a/deps/nghttp2/lib/nghttp2_frame.h +++ b/deps/nghttp2/lib/nghttp2_frame.h @@ -70,7 +70,9 @@ #define NGHTTP2_MAX_PADLEN 256 /* Union of extension frame payload */ -typedef union { nghttp2_ext_altsvc altsvc; } nghttp2_ext_frame_payload; +typedef union { + nghttp2_ext_altsvc altsvc; +} nghttp2_ext_frame_payload; void nghttp2_frame_pack_frame_hd(uint8_t *buf, const nghttp2_frame_hd *hd); diff --git a/deps/nghttp2/lib/nghttp2_hd.h b/deps/nghttp2/lib/nghttp2_hd.h index 458edafe4d5847..760bfbc357efdc 100644 --- a/deps/nghttp2/lib/nghttp2_hd.h +++ b/deps/nghttp2/lib/nghttp2_hd.h @@ -211,7 +211,9 @@ typedef struct { #define HD_MAP_SIZE 128 -typedef struct { nghttp2_hd_entry *table[HD_MAP_SIZE]; } nghttp2_hd_map; +typedef struct { + nghttp2_hd_entry *table[HD_MAP_SIZE]; +} nghttp2_hd_map; struct nghttp2_hd_deflater { nghttp2_hd_context ctx; @@ -313,7 +315,7 @@ void nghttp2_hd_deflate_free(nghttp2_hd_deflater *deflater); * * This function expands |bufs| as necessary to store the result. If * buffers is full and the process still requires more space, this - * funtion fails and returns NGHTTP2_ERR_HEADER_COMP. + * function fails and returns NGHTTP2_ERR_HEADER_COMP. * * After this function returns, it is safe to delete the |nva|. * diff --git a/deps/nghttp2/lib/nghttp2_helper.c b/deps/nghttp2/lib/nghttp2_helper.c index b00c9073a92a13..3b282c7301f95b 100644 --- a/deps/nghttp2/lib/nghttp2_helper.c +++ b/deps/nghttp2/lib/nghttp2_helper.c @@ -322,6 +322,9 @@ const char *nghttp2_strerror(int error_code) { return "Internal error"; case NGHTTP2_ERR_CANCEL: return "Cancel"; + case NGHTTP2_ERR_SETTINGS_EXPECTED: + return "When a local endpoint expects to receive SETTINGS frame, it " + "receives an other type of frame"; case NGHTTP2_ERR_NOMEM: return "Out of memory"; case NGHTTP2_ERR_CALLBACK_FAILURE: diff --git a/deps/nghttp2/lib/nghttp2_outbound_item.h b/deps/nghttp2/lib/nghttp2_outbound_item.h index 8bda776bfe2728..89a8a92668dd5c 100644 --- a/deps/nghttp2/lib/nghttp2_outbound_item.h +++ b/deps/nghttp2/lib/nghttp2_outbound_item.h @@ -112,7 +112,7 @@ struct nghttp2_outbound_item { nghttp2_ext_frame_payload ext_frame_payload; nghttp2_aux_data aux_data; /* The priority used in priority comparion. Smaller is served - ealier. For PING, SETTINGS and non-DATA frames (excluding + earlier. For PING, SETTINGS and non-DATA frames (excluding response HEADERS frame) have dedicated cycle value defined above. For DATA frame, cycle is computed by taking into account of effective weight and frame payload length previously sent, so diff --git a/deps/nghttp2/lib/nghttp2_pq.h b/deps/nghttp2/lib/nghttp2_pq.h index 1426bef760132c..71cf96a14e0c77 100644 --- a/deps/nghttp2/lib/nghttp2_pq.h +++ b/deps/nghttp2/lib/nghttp2_pq.h @@ -35,7 +35,9 @@ /* Implementation of priority queue */ -typedef struct { size_t index; } nghttp2_pq_entry; +typedef struct { + size_t index; +} nghttp2_pq_entry; typedef struct { /* The pointer to the pointer to the item stored */ @@ -71,7 +73,7 @@ void nghttp2_pq_free(nghttp2_pq *pq); /* * Adds |item| to the priority queue |pq|. * - * This function returns 0 if it succeds, or one of the following + * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM diff --git a/deps/nghttp2/lib/nghttp2_queue.h b/deps/nghttp2/lib/nghttp2_queue.h index d872b07bde961c..c7eb753ca92182 100644 --- a/deps/nghttp2/lib/nghttp2_queue.h +++ b/deps/nghttp2/lib/nghttp2_queue.h @@ -36,7 +36,9 @@ typedef struct nghttp2_queue_cell { struct nghttp2_queue_cell *next; } nghttp2_queue_cell; -typedef struct { nghttp2_queue_cell *front, *back; } nghttp2_queue; +typedef struct { + nghttp2_queue_cell *front, *back; +} nghttp2_queue; void nghttp2_queue_init(nghttp2_queue *queue); void nghttp2_queue_free(nghttp2_queue *queue); diff --git a/deps/nghttp2/lib/nghttp2_session.c b/deps/nghttp2/lib/nghttp2_session.c index 4bc94cbb1982ad..b14ed77a25c293 100644 --- a/deps/nghttp2/lib/nghttp2_session.c +++ b/deps/nghttp2/lib/nghttp2_session.c @@ -148,14 +148,16 @@ static int check_ext_type_set(const uint8_t *ext_types, uint8_t type) { } static int session_call_error_callback(nghttp2_session *session, - const char *fmt, ...) { + int lib_error_code, const char *fmt, + ...) { size_t bufsize; va_list ap; char *buf; int rv; nghttp2_mem *mem; - if (!session->callbacks.error_callback) { + if (!session->callbacks.error_callback && + !session->callbacks.error_callback2) { return 0; } @@ -189,8 +191,13 @@ static int session_call_error_callback(nghttp2_session *session, return 0; } - rv = session->callbacks.error_callback(session, buf, (size_t)rv, - session->user_data); + if (session->callbacks.error_callback2) { + rv = session->callbacks.error_callback2(session, lib_error_code, buf, + (size_t)rv, session->user_data); + } else { + rv = session->callbacks.error_callback(session, buf, (size_t)rv, + session->user_data); + } nghttp2_mem_free(mem, buf); @@ -541,9 +548,8 @@ static int session_new(nghttp2_session **session_ptr, if (nghttp2_enable_strict_preface) { nghttp2_inbound_frame *iframe = &(*session_ptr)->iframe; - if (server && - ((*session_ptr)->opt_flags & NGHTTP2_OPTMASK_NO_RECV_CLIENT_MAGIC) == - 0) { + if (server && ((*session_ptr)->opt_flags & + NGHTTP2_OPTMASK_NO_RECV_CLIENT_MAGIC) == 0) { iframe->state = NGHTTP2_IB_READ_CLIENT_MAGIC; iframe->payloadleft = NGHTTP2_CLIENT_MAGIC_LEN; } else { @@ -2183,7 +2189,7 @@ static int session_prep_frame(nghttp2_session *session, closed. */ stream = nghttp2_session_get_stream(session, frame->hd.stream_id); - /* predicte should fail if stream is NULL. */ + /* predicate should fail if stream is NULL. */ rv = session_predicate_push_promise_send(session, stream); if (rv != 0) { return rv; @@ -2411,19 +2417,16 @@ static int session_close_stream_on_goaway(nghttp2_session *session, nghttp2_stream *stream, *next_stream; nghttp2_close_stream_on_goaway_arg arg = {session, NULL, last_stream_id, incoming}; - uint32_t error_code; rv = nghttp2_map_each(&session->streams, find_stream_on_goaway_func, &arg); assert(rv == 0); - error_code = - session->server && incoming ? NGHTTP2_REFUSED_STREAM : NGHTTP2_CANCEL; - stream = arg.head; while (stream) { next_stream = stream->closed_next; stream->closed_next = NULL; - rv = nghttp2_session_close_stream(session, stream->stream_id, error_code); + rv = nghttp2_session_close_stream(session, stream->stream_id, + NGHTTP2_REFUSED_STREAM); /* stream may be deleted here */ @@ -3608,7 +3611,7 @@ static int inflate_header_block(nghttp2_session *session, nghttp2_frame *frame, nv.name->base, (int)nv.value->len, nv.value->base); rv2 = session_call_error_callback( - session, + session, NGHTTP2_ERR_HTTP_HEADER, "Ignoring received invalid HTTP header field: frame type: " "%u, stream: %d, name: [%.*s], value: [%.*s]", frame->hd.type, frame->hd.stream_id, (int)nv.name->len, @@ -3626,8 +3629,9 @@ static int inflate_header_block(nghttp2_session *session, nghttp2_frame *frame, nv.name->base, (int)nv.value->len, nv.value->base); rv = session_call_error_callback( - session, "Invalid HTTP header field was received: frame type: " - "%u, stream: %d, name: [%.*s], value: [%.*s]", + session, NGHTTP2_ERR_HTTP_HEADER, + "Invalid HTTP header field was received: frame type: " + "%u, stream: %d, name: [%.*s], value: [%.*s]", frame->hd.type, frame->hd.stream_id, (int)nv.name->len, nv.name->base, (int)nv.value->len, nv.value->base); @@ -3781,7 +3785,7 @@ int nghttp2_session_on_request_headers_received(nghttp2_session *session, session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: stream_id == 0"); } - /* If client recieves idle stream from server, it is invalid + /* If client receives idle stream from server, it is invalid regardless stream ID is even or odd. This is because client is not expected to receive request from server. */ if (!session->server) { @@ -5345,9 +5349,10 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, iframe->state = NGHTTP2_IB_IGN_ALL; rv = session_call_error_callback( - session, "Remote peer returned unexpected data while we expected " - "SETTINGS frame. Perhaps, peer does not support HTTP/2 " - "properly."); + session, NGHTTP2_ERR_SETTINGS_EXPECTED, + "Remote peer returned unexpected data while we expected " + "SETTINGS frame. Perhaps, peer does not support HTTP/2 " + "properly."); if (nghttp2_is_fatal(rv)) { return rv; @@ -5588,13 +5593,13 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, if (iframe->payloadleft) { nghttp2_settings_entry *min_header_table_size_entry; - /* We allocate iv with addtional one entry, to store the + /* We allocate iv with additional one entry, to store the minimum header table size. */ iframe->max_niv = iframe->frame.hd.length / NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH + 1; - iframe->iv = nghttp2_mem_malloc( - mem, sizeof(nghttp2_settings_entry) * iframe->max_niv); + iframe->iv = nghttp2_mem_malloc(mem, sizeof(nghttp2_settings_entry) * + iframe->max_niv); if (!iframe->iv) { return NGHTTP2_ERR_NOMEM; diff --git a/deps/nghttp2/lib/nghttp2_session.h b/deps/nghttp2/lib/nghttp2_session.h index 3e1467f6a356d7..c7cb27d77c1e25 100644 --- a/deps/nghttp2/lib/nghttp2_session.h +++ b/deps/nghttp2/lib/nghttp2_session.h @@ -319,7 +319,7 @@ struct nghttp2_session { uint8_t pending_enable_push; /* Nonzero if the session is server side. */ uint8_t server; - /* Flags indicating GOAWAY is sent and/or recieved. The flags are + /* Flags indicating GOAWAY is sent and/or received. The flags are composed by bitwise OR-ing nghttp2_goaway_flag. */ uint8_t goaway_flags; /* This flag is used to reduce excessive queuing of WINDOW_UPDATE to @@ -722,7 +722,7 @@ int nghttp2_session_on_goaway_received(nghttp2_session *session, nghttp2_frame *frame); /* - * Called when WINDOW_UPDATE is recieved, assuming |frame| is properly + * Called when WINDOW_UPDATE is received, assuming |frame| is properly * initialized. * * This function returns 0 if it succeeds, or one of the following @@ -737,7 +737,7 @@ int nghttp2_session_on_window_update_received(nghttp2_session *session, nghttp2_frame *frame); /* - * Called when ALTSVC is recieved, assuming |frame| is properly + * Called when ALTSVC is received, assuming |frame| is properly * initialized. * * This function returns 0 if it succeeds, or one of the following diff --git a/deps/nghttp2/lib/nghttp2_stream.c b/deps/nghttp2/lib/nghttp2_stream.c index 8dee6ef660983c..eccd3174ef7bda 100644 --- a/deps/nghttp2/lib/nghttp2_stream.c +++ b/deps/nghttp2/lib/nghttp2_stream.c @@ -366,8 +366,9 @@ static void check_queued(nghttp2_stream *stream) { } } if (queued == 0) { - fprintf(stderr, "stream(%p)=%d, stream->queued == 1, and " - "!stream_active(), but no descendants is queued\n", + fprintf(stderr, + "stream(%p)=%d, stream->queued == 1, and " + "!stream_active(), but no descendants is queued\n", stream, stream->stream_id); assert(0); } @@ -378,9 +379,10 @@ static void check_queued(nghttp2_stream *stream) { } } else { if (stream_active(stream) || !nghttp2_pq_empty(&stream->obq)) { - fprintf(stderr, "stream(%p) = %d, stream->queued == 0, but " - "stream_active(stream) == %d and " - "nghttp2_pq_size(&stream->obq) = %zu\n", + fprintf(stderr, + "stream(%p) = %d, stream->queued == 0, but " + "stream_active(stream) == %d and " + "nghttp2_pq_size(&stream->obq) = %zu\n", stream, stream->stream_id, stream_active(stream), nghttp2_pq_size(&stream->obq)); assert(0); diff --git a/deps/node-inspect/CHANGELOG.md b/deps/node-inspect/CHANGELOG.md index 41ed928e781ff6..0db3a7842eb15d 100644 --- a/deps/node-inspect/CHANGELOG.md +++ b/deps/node-inspect/CHANGELOG.md @@ -1,3 +1,12 @@ +### 1.11.3 + +* [`93caa0f`](https://github.com/nodejs/node-inspect/commit/93caa0f5267c7ab452b258d3b03329a0bb5ac7f7) **docs:** Add missing oc in protocol +* [`2d87cbe`](https://github.com/nodejs/node-inspect/commit/2d87cbe76aa968dfc1ac69d9571af1be81abd8e0) **fix:** Make --inspect-port=0 work +* [`ebfd02e`](https://github.com/nodejs/node-inspect/commit/ebfd02ece9b642586023f7791da71defeb13d746) **chore:** Bump tap to 10.7 +* [`c07adb1`](https://github.com/nodejs/node-inspect/commit/c07adb17b164c1cf3da8d38659ea9f5d7ff42e9c) **test:** Use useful break location +* [`94f0bf9`](https://github.com/nodejs/node-inspect/commit/94f0bf97d24c376baf3ecced2088d81715a73464) **fix:** Fix `takeHeapSnapshot()` truncation bug + + ### 1.11.2 * [`42e0cd1`](https://github.com/nodejs/node-inspect/commit/42e0cd111d89ed09faba1c0ec45089b0b44de011) **fix:** look for generic hint text diff --git a/deps/node-inspect/README.md b/deps/node-inspect/README.md index ecd939b3ea26a8..b52cc188a62f5b 100644 --- a/deps/node-inspect/README.md +++ b/deps/node-inspect/README.md @@ -10,7 +10,7 @@ node has two options: 1. `node --debug `: Start `file` with remote debugging enabled. 2. `node debug `: Start an interactive CLI debugger for ``. -But for the Chrome inspector protol, +But for the Chrome inspector protocol, there's only one: `node --inspect `. This project tries to provide the missing second option diff --git a/deps/node-inspect/lib/_inspect.js b/deps/node-inspect/lib/_inspect.js index 26912274cdaec4..d846efbe6a4a52 100644 --- a/deps/node-inspect/lib/_inspect.js +++ b/deps/node-inspect/lib/_inspect.js @@ -42,18 +42,9 @@ const [ InspectClient, createRepl ] = const debuglog = util.debuglog('inspect'); -const DEBUG_PORT_PATTERN = /^--(?:debug|inspect)(?:-port|-brk)?=(\d{1,5})$/; -function getDefaultPort() { - for (const arg of process.execArgv) { - const match = arg.match(DEBUG_PORT_PATTERN); - if (match) { - return +match[1]; - } - } - return 9229; -} - function portIsFree(host, port, timeout = 2000) { + if (port === 0) return Promise.resolve(); // Binding to a random port. + const retryDelay = 150; let didTimeOut = false; @@ -110,9 +101,11 @@ function runScript(script, scriptArgs, inspectHost, inspectPort, childPrint) { let output = ''; function waitForListenHint(text) { output += text; - if (/Debugger listening on/.test(output)) { + if (/Debugger listening on ws:\/\/\[?(.+?)\]?:(\d+)\//.test(output)) { + const host = RegExp.$1; + const port = Number.parseInt(RegExp.$2); child.stderr.removeListener('data', waitForListenHint); - resolve(child); + resolve([child, port, host]); } } @@ -160,10 +153,11 @@ class NodeInspector { options.port, this.childPrint.bind(this)); } else { - this._runScript = () => Promise.resolve(null); + this._runScript = + () => Promise.resolve([null, options.port, options.host]); } - this.client = new InspectClient(options.port, options.host); + this.client = new InspectClient(); this.domainNames = ['Debugger', 'HeapProfiler', 'Profiler', 'Runtime']; this.domainNames.forEach((domain) => { @@ -223,9 +217,8 @@ class NodeInspector { run() { this.killChild(); - const { host, port } = this.options; - return this._runScript().then((child) => { + return this._runScript().then(([child, port, host]) => { this.child = child; let connectionAttempts = 0; @@ -233,7 +226,7 @@ class NodeInspector { ++connectionAttempts; debuglog('connection attempt #%d', connectionAttempts); this.stdout.write('.'); - return this.client.connect() + return this.client.connect(port, host) .then(() => { debuglog('connection established'); this.stdout.write(' ok'); @@ -288,7 +281,7 @@ class NodeInspector { function parseArgv([target, ...args]) { let host = '127.0.0.1'; - let port = getDefaultPort(); + let port = 9229; let isRemote = false; let script = target; let scriptArgs = args; diff --git a/deps/node-inspect/lib/internal/inspect_client.js b/deps/node-inspect/lib/internal/inspect_client.js index c247e2add87706..9b8529de21aae2 100644 --- a/deps/node-inspect/lib/internal/inspect_client.js +++ b/deps/node-inspect/lib/internal/inspect_client.js @@ -164,12 +164,12 @@ function decodeFrameHybi17(data) { } class Client extends EventEmitter { - constructor(port, host) { + constructor() { super(); this.handleChunk = this._handleChunk.bind(this); - this._port = port; - this._host = host; + this._port = undefined; + this._host = undefined; this.reset(); } @@ -284,7 +284,9 @@ class Client extends EventEmitter { }); } - connect() { + connect(port, host) { + this._port = port; + this._host = host; return this._discoverWebsocketPath() .then((urlPath) => this._connectWebsocket(urlPath)); } diff --git a/deps/node-inspect/lib/internal/inspect_repl.js b/deps/node-inspect/lib/internal/inspect_repl.js index 937c1843d3a3ee..38fe4684cf6d71 100644 --- a/deps/node-inspect/lib/internal/inspect_repl.js +++ b/deps/node-inspect/lib/internal/inspect_repl.js @@ -900,10 +900,8 @@ function createRepl(inspector) { return new Promise((resolve, reject) => { const absoluteFile = Path.resolve(filename); const writer = FS.createWriteStream(absoluteFile); - let totalSize; let sizeWritten = 0; function onProgress({ done, total, finished }) { - totalSize = total; if (finished) { print('Heap snaphost prepared.'); } else { @@ -913,13 +911,18 @@ function createRepl(inspector) { function onChunk({ chunk }) { sizeWritten += chunk.length; writer.write(chunk); - print(`Writing snapshot: ${sizeWritten}/${totalSize}`, true); - if (sizeWritten >= totalSize) { - writer.end(); + print(`Writing snapshot: ${sizeWritten}`, true); + } + function onResolve() { + writer.end(() => { teardown(); print(`Wrote snapshot: ${absoluteFile}`); resolve(); - } + }); + } + function onReject(error) { + teardown(); + reject(error); } function teardown() { HeapProfiler.removeListener( @@ -932,10 +935,7 @@ function createRepl(inspector) { print('Heap snapshot: 0/0', true); HeapProfiler.takeHeapSnapshot({ reportProgress: true }) - .then(null, (error) => { - teardown(); - reject(error); - }); + .then(onResolve, onReject); }); }, diff --git a/deps/node-inspect/package.json b/deps/node-inspect/package.json index 070abfa8fe51be..d25376b5d4bb96 100644 --- a/deps/node-inspect/package.json +++ b/deps/node-inspect/package.json @@ -1,6 +1,6 @@ { "name": "node-inspect", - "version": "1.11.2", + "version": "1.11.3", "description": "Node Inspect", "license": "MIT", "main": "lib/_inspect.js", @@ -29,7 +29,7 @@ "devDependencies": { "eslint": "^3.10.2", "nlm": "^3.0.0", - "tap": "^7.1.2" + "tap": "^10.7.0" }, "author": { "name": "Jan Krems", diff --git a/deps/node-inspect/test/cli/break.test.js b/deps/node-inspect/test/cli/break.test.js index 59b12cde388c01..ce8c8d6d7d99bd 100644 --- a/deps/node-inspect/test/cli/break.test.js +++ b/deps/node-inspect/test/cli/break.test.js @@ -134,7 +134,7 @@ test('sb before loading file', (t) => { return cli.waitForInitialBreak() .then(() => cli.waitForPrompt()) - .then(() => cli.command('sb("other.js", 3)')) + .then(() => cli.command('sb("other.js", 2)')) .then(() => { t.match( cli.output, @@ -145,7 +145,7 @@ test('sb before loading file', (t) => { .then(() => { t.match( cli.output, - `break in ${otherScript}:3`, + `break in ${otherScript}:2`, 'found breakpoint in file that was not loaded yet'); }) .then(() => cli.quit()) diff --git a/deps/node-inspect/test/cli/heap-profiler.test.js b/deps/node-inspect/test/cli/heap-profiler.test.js new file mode 100644 index 00000000000000..ebd734e03cb06d --- /dev/null +++ b/deps/node-inspect/test/cli/heap-profiler.test.js @@ -0,0 +1,34 @@ +'use strict'; +const { test } = require('tap'); +const { readFileSync, unlinkSync } = require('fs'); + +const startCLI = require('./start-cli'); +const filename = 'node.heapsnapshot'; + +function cleanup() { + try { + unlinkSync(filename); + } catch (_) { + // Ignore. + } +} + +cleanup(); + +test('Heap profiler take snapshot', (t) => { + const cli = startCLI(['examples/empty.js']); + + function onFatal(error) { + cli.quit(); + throw error; + } + + // Check that the snapshot is valid JSON. + return cli.waitForInitialBreak() + .then(() => cli.waitForPrompt()) + .then(() => cli.command('takeHeapSnapshot()')) + .then(() => JSON.parse(readFileSync(filename, 'utf8'))) + .then(() => cleanup()) + .then(() => cli.quit()) + .then(null, onFatal); +}); diff --git a/deps/node-inspect/test/cli/launch.test.js b/deps/node-inspect/test/cli/launch.test.js index f7efc6eb3f2139..8808d47a08b900 100644 --- a/deps/node-inspect/test/cli/launch.test.js +++ b/deps/node-inspect/test/cli/launch.test.js @@ -26,6 +26,46 @@ test('custom port', (t) => { }); }); +test('random port', (t) => { + const script = Path.join('examples', 'three-lines.js'); + + const cli = startCLI(['--port=0', script]); + + return cli.waitForInitialBreak() + .then(() => cli.waitForPrompt()) + .then(() => { + t.match(cli.output, 'debug>', 'prints a prompt'); + t.match( + cli.output, + /< Debugger listening on /, + 'forwards child output'); + }) + .then(() => cli.quit()) + .then((code) => { + t.equal(code, 0, 'exits with success'); + }); +}); + +test('random port with --inspect-port=0', (t) => { + const script = Path.join('examples', 'three-lines.js'); + + const cli = startCLI([script], ['--inspect-port=0']); + + return cli.waitForInitialBreak() + .then(() => cli.waitForPrompt()) + .then(() => { + t.match(cli.output, 'debug>', 'prints a prompt'); + t.match( + cli.output, + /< Debugger listening on /, + 'forwards child output'); + }) + .then(() => cli.quit()) + .then((code) => { + t.equal(code, 0, 'exits with success'); + }); +}); + test('examples/three-lines.js', (t) => { const script = Path.join('examples', 'three-lines.js'); const cli = startCLI([script]); diff --git a/deps/node-inspect/test/cli/start-cli.js b/deps/node-inspect/test/cli/start-cli.js index ae904308e02270..b086dcd8ba218d 100644 --- a/deps/node-inspect/test/cli/start-cli.js +++ b/deps/node-inspect/test/cli/start-cli.js @@ -16,8 +16,8 @@ const BREAK_MESSAGE = new RegExp('(?:' + [ 'exception', 'other', 'promiseRejection', ].join('|') + ') in', 'i'); -function startCLI(args) { - const child = spawn(process.execPath, [CLI, ...args]); +function startCLI(args, flags = []) { + const child = spawn(process.execPath, [...flags, CLI, ...args]); let isFirstStdoutChunk = true; const outputBuffer = []; diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index 25ce5d071e71c3..84c6717e6b486d 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -11,7 +11,7 @@ #define V8_MAJOR_VERSION 6 #define V8_MINOR_VERSION 2 #define V8_BUILD_NUMBER 414 -#define V8_PATCH_LEVEL 50 +#define V8_PATCH_LEVEL 54 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 078796f5dbb4f2..76b095eb422712 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -5190,6 +5190,7 @@ Local v8::Object::CreationContext() { int v8::Object::GetIdentityHash() { + i::DisallowHeapAllocation no_gc; auto isolate = Utils::OpenHandle(this)->GetIsolate(); i::HandleScope scope(isolate); auto self = Utils::OpenHandle(this); diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc index 35261955db2576..915f507b12689c 100644 --- a/deps/v8/src/code-stub-assembler.cc +++ b/deps/v8/src/code-stub-assembler.cc @@ -1187,8 +1187,8 @@ TNode CodeStubAssembler::LoadHashForJSObject( { Node* length_and_hash_int32 = LoadAndUntagToWord32ObjectField( properties_or_hash, PropertyArray::kLengthAndHashOffset); - var_hash.Bind(Word32And(length_and_hash_int32, - Int32Constant(PropertyArray::kHashMask))); + var_hash.Bind( + DecodeWord32(length_and_hash_int32)); Goto(&done); } @@ -2508,7 +2508,8 @@ void CodeStubAssembler::InitializePropertyArrayLength(Node* property_array, CSA_ASSERT( this, IntPtrOrSmiLessThanOrEqual( - length, IntPtrOrSmiConstant(PropertyArray::kMaxLength, mode), mode)); + length, IntPtrOrSmiConstant(PropertyArray::LengthField::kMax, mode), + mode)); StoreObjectFieldNoWriteBarrier( property_array, PropertyArray::kLengthAndHashOffset, ParameterToTagged(length, mode), MachineRepresentation::kTaggedSigned); diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc index dbe3fc9608216b..57d23589ff4f53 100644 --- a/deps/v8/src/compiler/js-native-context-specialization.cc +++ b/deps/v8/src/compiler/js-native-context-specialization.cc @@ -2255,14 +2255,18 @@ Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore( jsgraph()->SmiConstant(PropertyArray::kNoHashSentinel)); hash = graph()->NewNode(common()->TypeGuard(Type::SignedSmall()), hash, control); + hash = + graph()->NewNode(simplified()->NumberShiftLeft(), hash, + jsgraph()->Constant(PropertyArray::HashField::kShift)); } else { hash = effect = graph()->NewNode( simplified()->LoadField(AccessBuilder::ForPropertyArrayLengthAndHash()), properties, effect, control); effect = graph()->NewNode( common()->BeginRegion(RegionObservability::kNotObservable), effect); - hash = graph()->NewNode(simplified()->NumberBitwiseAnd(), hash, - jsgraph()->Constant(JSReceiver::kHashMask)); + hash = + graph()->NewNode(simplified()->NumberBitwiseAnd(), hash, + jsgraph()->Constant(PropertyArray::HashField::kMask)); } Node* new_length_and_hash = graph()->NewNode( diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc index 93a21a7b3adf5c..21b36ba49c9149 100644 --- a/deps/v8/src/deoptimizer.cc +++ b/deps/v8/src/deoptimizer.cc @@ -144,6 +144,50 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, generator.Generate(); } +namespace { +class ActivationsFinder : public ThreadVisitor { + public: + explicit ActivationsFinder(std::set* codes, + Code* topmost_optimized_code, + bool safe_to_deopt_topmost_optimized_code) + : codes_(codes) { +#ifdef DEBUG + topmost_ = topmost_optimized_code; + safe_to_deopt_ = safe_to_deopt_topmost_optimized_code; +#endif + } + + // Find the frames with activations of codes marked for deoptimization, search + // for the trampoline to the deoptimizer call respective to each code, and use + // it to replace the current pc on the stack. + void VisitThread(Isolate* isolate, ThreadLocalTop* top) { + for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) { + if (it.frame()->type() == StackFrame::OPTIMIZED) { + Code* code = it.frame()->LookupCode(); + if (code->kind() == Code::OPTIMIZED_FUNCTION && + code->marked_for_deoptimization()) { + codes_->erase(code); + // Obtain the trampoline to the deoptimizer call. + SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc()); + int trampoline_pc = safepoint.trampoline_pc(); + DCHECK_IMPLIES(code == topmost_, safe_to_deopt_); + // Replace the current pc on the stack with the trampoline. + it.frame()->set_pc(code->instruction_start() + trampoline_pc); + } + } + } + } + + private: + std::set* codes_; + +#ifdef DEBUG + Code* topmost_; + bool safe_to_deopt_; +#endif +}; +} // namespace + void Deoptimizer::VisitAllOptimizedFunctionsForContext( Context* context, OptimizedFunctionVisitor* visitor) { DisallowHeapAllocation no_allocation; @@ -264,9 +308,9 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) { VisitAllOptimizedFunctionsForContext(context, &unlinker); Isolate* isolate = context->GetHeap()->isolate(); -#ifdef DEBUG Code* topmost_optimized_code = NULL; bool safe_to_deopt_topmost_optimized_code = false; +#ifdef DEBUG // Make sure all activations of optimized code can deopt at their current PC. // The topmost optimized code has special handling because it cannot be // deoptimized due to weak object dependency. @@ -304,6 +348,10 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) { } #endif + // We will use this set to mark those Code objects that are marked for + // deoptimization and have not been found in stack frames. + std::set codes; + // Move marked code from the optimized code list to the deoptimized // code list. // Walk over all optimized code objects in this native context. @@ -335,25 +383,14 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) { element = next; } - // Finds the with activations of codes marked for deoptimization, search for - // the trampoline to the deoptimizer call respective to each code, and use it - // to replace the current pc on the stack. - for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done(); - it.Advance()) { - if (it.frame()->type() == StackFrame::OPTIMIZED) { - Code* code = it.frame()->LookupCode(); - if (code->kind() == Code::OPTIMIZED_FUNCTION && - code->marked_for_deoptimization()) { - // Obtain the trampoline to the deoptimizer call. - SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc()); - int trampoline_pc = safepoint.trampoline_pc(); - DCHECK_IMPLIES(code == topmost_optimized_code, - safe_to_deopt_topmost_optimized_code); - // Replace the current pc on the stack with the trampoline. - it.frame()->set_pc(code->instruction_start() + trampoline_pc); - } - } - } + ActivationsFinder visitor(&codes, topmost_optimized_code, + safe_to_deopt_topmost_optimized_code); + // Iterate over the stack of this thread. + visitor.VisitThread(isolate, isolate->thread_local_top()); + // In addition to iterate over the stack of this thread, we also + // need to consider all the other threads as they may also use + // the code currently beings deoptimized. + isolate->thread_manager()->IterateArchivedThreads(&visitor); } diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc index 1dee1303366e74..8985bad15e9f64 100644 --- a/deps/v8/src/ic/accessor-assembler.cc +++ b/deps/v8/src/ic/accessor-assembler.cc @@ -1091,7 +1091,7 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object, // TODO(gsathya): Clean up the type conversions by creating smarter // helpers that do the correct op based on the mode. VARIABLE(var_properties, MachineRepresentation::kTaggedPointer); - VARIABLE(var_hash, MachineRepresentation::kWord32); + VARIABLE(var_encoded_hash, MachineRepresentation::kWord32); VARIABLE(var_length, ParameterRepresentation(mode)); Node* properties = LoadObjectField(object, JSObject::kPropertiesOrHashOffset); @@ -1102,7 +1102,10 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object, BIND(&if_smi_hash); { - var_hash.Bind(SmiToWord32(properties)); + Node* hash = SmiToWord32(properties); + Node* encoded_hash = + Word32Shl(hash, Int32Constant(PropertyArray::HashField::kShift)); + var_encoded_hash.Bind(encoded_hash); var_length.Bind(IntPtrOrSmiConstant(0, mode)); var_properties.Bind(EmptyFixedArrayConstant()); Goto(&extend_store); @@ -1112,10 +1115,11 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object, { Node* length_and_hash_int32 = LoadAndUntagToWord32ObjectField( var_properties.value(), PropertyArray::kLengthAndHashOffset); - var_hash.Bind(Word32And(length_and_hash_int32, - Int32Constant(PropertyArray::kHashMask))); - Node* length_intptr = ChangeInt32ToIntPtr(Word32And( - length_and_hash_int32, Int32Constant(PropertyArray::kLengthMask))); + var_encoded_hash.Bind(Word32And( + length_and_hash_int32, Int32Constant(PropertyArray::HashField::kMask))); + Node* length_intptr = ChangeInt32ToIntPtr( + Word32And(length_and_hash_int32, + Int32Constant(PropertyArray::LengthField::kMask))); Node* length = WordToParameter(length_intptr, mode); var_length.Bind(length); Goto(&extend_store); @@ -1161,7 +1165,7 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object, Node* new_capacity_int32 = TruncateWordToWord32(ParameterToWord(new_capacity, mode)); Node* new_length_and_hash_int32 = - Word32Or(var_hash.value(), new_capacity_int32); + Word32Or(var_encoded_hash.value(), new_capacity_int32); StoreObjectField(new_properties, PropertyArray::kLengthAndHashOffset, SmiFromWord32(new_length_and_hash_int32)); StoreObjectField(object, JSObject::kPropertiesOrHashOffset, new_properties); diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h index 82b7eb05a6e7b4..91fd08a2dd58c8 100644 --- a/deps/v8/src/objects-inl.h +++ b/deps/v8/src/objects-inl.h @@ -2679,33 +2679,31 @@ SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset) int PropertyArray::length() const { Object* value_obj = READ_FIELD(this, kLengthAndHashOffset); int value = Smi::ToInt(value_obj); - return value & kLengthMask; + return LengthField::decode(value); } void PropertyArray::initialize_length(int len) { SLOW_DCHECK(len >= 0); - SLOW_DCHECK(len < kMaxLength); + SLOW_DCHECK(len < LengthField::kMax); WRITE_FIELD(this, kLengthAndHashOffset, Smi::FromInt(len)); } int PropertyArray::synchronized_length() const { Object* value_obj = ACQUIRE_READ_FIELD(this, kLengthAndHashOffset); int value = Smi::ToInt(value_obj); - return value & kLengthMask; + return LengthField::decode(value); } int PropertyArray::Hash() const { Object* value_obj = READ_FIELD(this, kLengthAndHashOffset); int value = Smi::ToInt(value_obj); - int hash = value & kHashMask; - return hash; + return HashField::decode(value); } -void PropertyArray::SetHash(int masked_hash) { - DCHECK_EQ(masked_hash & JSReceiver::kHashMask, masked_hash); +void PropertyArray::SetHash(int hash) { Object* value_obj = READ_FIELD(this, kLengthAndHashOffset); int value = Smi::ToInt(value_obj); - value = (value & kLengthMask) | masked_hash; + value = HashField::update(value, hash); WRITE_FIELD(this, kLengthAndHashOffset, Smi::FromInt(value)); } diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index 28c1cd681ffd46..b2b23c1f68f216 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -2301,6 +2301,7 @@ namespace { // objects. This avoids a double lookup in the cases where we know we will // add the hash to the JSObject if it does not already exist. Object* GetSimpleHash(Object* object) { + DisallowHeapAllocation no_gc; // The object is either a Smi, a HeapNumber, a name, an odd-ball, a real JS // object, or a Harmony proxy. if (object->IsSmi()) { @@ -2333,10 +2334,10 @@ Object* GetSimpleHash(Object* object) { } // namespace Object* Object::GetHash() { + DisallowHeapAllocation no_gc; Object* hash = GetSimpleHash(this); if (hash->IsSmi()) return hash; - DisallowHeapAllocation no_gc; DCHECK(IsJSReceiver()); JSReceiver* receiver = JSReceiver::cast(this); Isolate* isolate = receiver->GetIsolate(); @@ -2345,10 +2346,12 @@ Object* Object::GetHash() { // static Smi* Object::GetOrCreateHash(Isolate* isolate, Object* key) { + DisallowHeapAllocation no_gc; return key->GetOrCreateHash(isolate); } Smi* Object::GetOrCreateHash(Isolate* isolate) { + DisallowHeapAllocation no_gc; Object* hash = GetSimpleHash(this); if (hash->IsSmi()) return Smi::cast(hash); @@ -6266,26 +6269,27 @@ Handle JSObject::NormalizeElements( namespace { -Object* SetHashAndUpdateProperties(HeapObject* properties, int masked_hash) { - DCHECK_NE(PropertyArray::kNoHashSentinel, masked_hash); - DCHECK_EQ(masked_hash & JSReceiver::kHashMask, masked_hash); +Object* SetHashAndUpdateProperties(HeapObject* properties, int hash) { + DCHECK_NE(PropertyArray::kNoHashSentinel, hash); + DCHECK(PropertyArray::HashField::is_valid(hash)); if (properties == properties->GetHeap()->empty_fixed_array() || properties == properties->GetHeap()->empty_property_dictionary()) { - return Smi::FromInt(masked_hash); + return Smi::FromInt(hash); } if (properties->IsPropertyArray()) { - PropertyArray::cast(properties)->SetHash(masked_hash); + PropertyArray::cast(properties)->SetHash(hash); return properties; } DCHECK(properties->IsDictionary()); - NameDictionary::cast(properties)->SetHash(masked_hash); + NameDictionary::cast(properties)->SetHash(hash); return properties; } int GetIdentityHashHelper(Isolate* isolate, JSReceiver* object) { + DisallowHeapAllocation no_gc; Object* properties = object->raw_properties_or_hash(); if (properties->IsSmi()) { return Smi::ToInt(properties); @@ -6311,17 +6315,19 @@ int GetIdentityHashHelper(Isolate* isolate, JSReceiver* object) { } } // namespace -void JSReceiver::SetIdentityHash(int masked_hash) { - DCHECK_NE(PropertyArray::kNoHashSentinel, masked_hash); - DCHECK_EQ(masked_hash & JSReceiver::kHashMask, masked_hash); +void JSReceiver::SetIdentityHash(int hash) { + DisallowHeapAllocation no_gc; + DCHECK_NE(PropertyArray::kNoHashSentinel, hash); + DCHECK(PropertyArray::HashField::is_valid(hash)); HeapObject* existing_properties = HeapObject::cast(raw_properties_or_hash()); Object* new_properties = - SetHashAndUpdateProperties(existing_properties, masked_hash); + SetHashAndUpdateProperties(existing_properties, hash); set_raw_properties_or_hash(new_properties); } void JSReceiver::SetProperties(HeapObject* properties) { + DisallowHeapAllocation no_gc; Isolate* isolate = properties->GetIsolate(); int hash = GetIdentityHashHelper(isolate, this); Object* new_properties = properties; @@ -6337,6 +6343,7 @@ void JSReceiver::SetProperties(HeapObject* properties) { template Smi* GetOrCreateIdentityHashHelper(Isolate* isolate, ProxyType* proxy) { + DisallowHeapAllocation no_gc; Object* maybe_hash = proxy->hash(); if (maybe_hash->IsSmi()) return Smi::cast(maybe_hash); @@ -6346,6 +6353,7 @@ Smi* GetOrCreateIdentityHashHelper(Isolate* isolate, ProxyType* proxy) { } Object* JSObject::GetIdentityHash(Isolate* isolate) { + DisallowHeapAllocation no_gc; if (IsJSGlobalProxy()) { return JSGlobalProxy::cast(this)->hash(); } @@ -6359,6 +6367,7 @@ Object* JSObject::GetIdentityHash(Isolate* isolate) { } Smi* JSObject::GetOrCreateIdentityHash(Isolate* isolate) { + DisallowHeapAllocation no_gc; if (IsJSGlobalProxy()) { return GetOrCreateIdentityHashHelper(isolate, JSGlobalProxy::cast(this)); } @@ -6368,16 +6377,11 @@ Smi* JSObject::GetOrCreateIdentityHash(Isolate* isolate) { return Smi::cast(hash_obj); } - int masked_hash; - // TODO(gsathya): Remove the loop and pass kHashMask directly to - // GenerateIdentityHash. - do { - int hash = isolate->GenerateIdentityHash(Smi::kMaxValue); - masked_hash = hash & JSReceiver::kHashMask; - } while (masked_hash == PropertyArray::kNoHashSentinel); + int hash = isolate->GenerateIdentityHash(PropertyArray::HashField::kMax); + DCHECK_NE(PropertyArray::kNoHashSentinel, hash); - SetIdentityHash(masked_hash); - return Smi::FromInt(masked_hash); + SetIdentityHash(hash); + return Smi::FromInt(hash); } Object* JSProxy::GetIdentityHash() { return hash(); } diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index f9987c2837c466..00a8d0da02220e 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -1953,12 +1953,10 @@ class PropertyArray : public HeapObject { // No weak fields. typedef BodyDescriptor BodyDescriptorWeak; - static const int kLengthMask = 0x3ff; - static const int kHashMask = 0x7ffffc00; - STATIC_ASSERT(kLengthMask + kHashMask == 0x7fffffff); - - static const int kMaxLength = kLengthMask; - STATIC_ASSERT(kMaxLength > kMaxNumberOfDescriptors); + static const int kLengthFieldSize = 10; + class LengthField : public BitField {}; + class HashField : public BitField {}; static const int kNoHashSentinel = 0; @@ -2185,7 +2183,7 @@ class JSReceiver: public HeapObject { MUST_USE_RESULT static MaybeHandle GetOwnEntries( Handle object, PropertyFilter filter); - static const int kHashMask = PropertyArray::kHashMask; + static const int kHashMask = PropertyArray::HashField::kMask; // Layout description. static const int kPropertiesOrHashOffset = HeapObject::kHeaderSize; diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h index a989c8fc8a2e53..09fdb9eb0764a2 100644 --- a/deps/v8/src/objects/dictionary.h +++ b/deps/v8/src/objects/dictionary.h @@ -138,14 +138,16 @@ class BaseNameDictionary : public Dictionary { return Smi::ToInt(this->get(kNextEnumerationIndexIndex)); } - void SetHash(int masked_hash) { - DCHECK_EQ(masked_hash & JSReceiver::kHashMask, masked_hash); - this->set(kObjectHashIndex, Smi::FromInt(masked_hash)); + void SetHash(int hash) { + DCHECK(PropertyArray::HashField::is_valid(hash)); + this->set(kObjectHashIndex, Smi::FromInt(hash)); } int Hash() const { Object* hash_obj = this->get(kObjectHashIndex); - return Smi::ToInt(hash_obj); + int hash = Smi::ToInt(hash_obj); + DCHECK(PropertyArray::HashField::is_valid(hash)); + return hash; } // Creates a new dictionary. diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc index 540d93002459fb..b90f5a4894fc76 100644 --- a/deps/v8/src/profiler/profiler-listener.cc +++ b/deps/v8/src/profiler/profiler-listener.cc @@ -226,11 +226,18 @@ void ProfilerListener::RecordInliningInfo(CodeEntry* entry, SharedFunctionInfo* shared_info = SharedFunctionInfo::cast( deopt_input_data->LiteralArray()->get(shared_info_id)); if (!depth++) continue; // Skip the current function itself. - CodeEntry* inline_entry = new CodeEntry( - entry->tag(), GetFunctionName(shared_info->DebugName()), - CodeEntry::kEmptyNamePrefix, entry->resource_name(), - CpuProfileNode::kNoLineNumberInfo, - CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start()); + const char* resource_name = + (shared_info->script()->IsScript() && + Script::cast(shared_info->script())->name()->IsName()) + ? GetName(Name::cast(Script::cast(shared_info->script())->name())) + : CodeEntry::kEmptyResourceName; + + CodeEntry* inline_entry = + new CodeEntry(entry->tag(), GetFunctionName(shared_info->DebugName()), + CodeEntry::kEmptyNamePrefix, resource_name, + CpuProfileNode::kNoLineNumberInfo, + CpuProfileNode::kNoColumnNumberInfo, nullptr, + code->instruction_start()); inline_entry->FillFunctionInfo(shared_info); inline_stack.push_back(inline_entry); } diff --git a/deps/v8/src/v8.gyp b/deps/v8/src/v8.gyp index bf7635ee339e2a..1c56842f81a548 100644 --- a/deps/v8/src/v8.gyp +++ b/deps/v8/src/v8.gyp @@ -2044,9 +2044,10 @@ '-L/usr/local/lib -lexecinfo', ]}, 'sources': [ + 'base/debug/stack_trace_posix.cc', 'base/platform/platform-openbsd.cc', 'base/platform/platform-posix.h', - 'base/platform/platform-posix.cc' + 'base/platform/platform-posix.cc', 'base/platform/platform-posix-time.h', 'base/platform/platform-posix-time.cc', ], diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc index f22a42a977d5be..b441d04fdd31db 100644 --- a/deps/v8/test/cctest/test-cpu-profiler.cc +++ b/deps/v8/test/cctest/test-cpu-profiler.cc @@ -1745,6 +1745,85 @@ TEST(FunctionDetails) { script_a->GetUnboundScript()->GetId(), 5, 14); } +TEST(FunctionDetailsInlining) { + if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return; + i::FLAG_allow_natives_syntax = true; + v8::HandleScope scope(CcTest::isolate()); + v8::Local env = CcTest::NewContext(PROFILER_EXTENSION); + v8::Context::Scope context_scope(env); + ProfilerHelper helper(env); + + // alpha is in a_script, beta in b_script. beta is + // inlined in alpha, but it should be attributed to b_script. + + v8::Local script_b = CompileWithOrigin( + "function beta(k) {\n" + " let sum = 2;\n" + " for(let i = 0; i < k; i ++) {\n" + " sum += i;\n" + " sum = sum + 'a';\n" + " }\n" + " return sum;\n" + "}\n" + "\n", + "script_b"); + + v8::Local script_a = CompileWithOrigin( + "function alpha(p) {\n" + " let res = beta(p);\n" + " res = res + res;\n" + " return res;\n" + "}\n" + "let p = 2;\n" + "\n" + "\n" + "// Warm up before profiling or the inlining doesn't happen.\n" + "p = alpha(p);\n" + "p = alpha(p);\n" + "%OptimizeFunctionOnNextCall(alpha);\n" + "p = alpha(p);\n" + "\n" + "\n" + "startProfiling();\n" + "for(let i = 0; i < 10000; i++) {\n" + " p = alpha(p);\n" + "}\n" + "stopProfiling();\n" + "\n" + "\n", + "script_a"); + + script_b->Run(env).ToLocalChecked(); + script_a->Run(env).ToLocalChecked(); + + const v8::CpuProfile* profile = i::ProfilerExtension::last_profile; + const v8::CpuProfileNode* current = profile->GetTopDownRoot(); + reinterpret_cast(const_cast(current)) + ->Print(0); + // The tree should look like this: + // 0 (root) 0 #1 + // 5 (program) 0 #6 + // 2 14 #2 script_a:1 + // ;;; deopted at script_id: 14 position: 299 with reason 'Insufficient + // type feedback for call'. + // 1 alpha 14 #4 script_a:1 + // 9 beta 13 #5 script_b:0 + // 0 startProfiling 0 #3 + + const v8::CpuProfileNode* root = profile->GetTopDownRoot(); + const v8::CpuProfileNode* script = GetChild(env, root, ""); + CheckFunctionDetails(env->GetIsolate(), script, "", "script_a", + script_a->GetUnboundScript()->GetId(), 1, 1); + const v8::CpuProfileNode* alpha = FindChild(env, script, "alpha"); + // Return early if profiling didn't sample alpha. + if (!alpha) return; + CheckFunctionDetails(env->GetIsolate(), alpha, "alpha", "script_a", + script_a->GetUnboundScript()->GetId(), 1, 15); + const v8::CpuProfileNode* beta = FindChild(env, alpha, "beta"); + if (!beta) return; + CheckFunctionDetails(env->GetIsolate(), beta, "beta", "script_b", + script_b->GetUnboundScript()->GetId(), 0, 0); +} TEST(DontStopOnFinishedProfileDelete) { v8::HandleScope scope(CcTest::isolate()); diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc index a310bfd68456c4..36a9f11ee27773 100644 --- a/deps/v8/test/cctest/test-lockers.cc +++ b/deps/v8/test/cctest/test-lockers.cc @@ -55,6 +55,244 @@ using ::v8::Value; using ::v8::V8; +namespace { + +class DeoptimizeCodeThread : public v8::base::Thread { + public: + DeoptimizeCodeThread(v8::Isolate* isolate, v8::Local context, + const char* trigger) + : Thread(Options("DeoptimizeCodeThread")), + isolate_(isolate), + context_(isolate, context), + source_(trigger) {} + + void Run() { + v8::Locker locker(isolate_); + isolate_->Enter(); + v8::HandleScope handle_scope(isolate_); + v8::Local context = + v8::Local::New(isolate_, context_); + v8::Context::Scope context_scope(context); + CHECK_EQ(isolate_, v8::Isolate::GetCurrent()); + // This code triggers deoptimization of some function that will be + // used in a different thread. + CompileRun(source_); + isolate_->Exit(); + } + + private: + v8::Isolate* isolate_; + Persistent context_; + // The code that triggers the deoptimization. + const char* source_; +}; + +void UnlockForDeoptimization(const v8::FunctionCallbackInfo& args) { + v8::Isolate* isolate = v8::Isolate::GetCurrent(); + // Gets the pointer to the thread that will trigger the deoptimization of the + // code. + DeoptimizeCodeThread* deoptimizer = + reinterpret_cast(isolate->GetData(0)); + { + // Exits and unlocks the isolate. + isolate->Exit(); + v8::Unlocker unlocker(isolate); + // Starts the deoptimizing thread. + deoptimizer->Start(); + // Waits for deoptimization to finish. + deoptimizer->Join(); + } + // The deoptimizing thread has finished its work, and the isolate + // will now be used by the current thread. + isolate->Enter(); +} + +void UnlockForDeoptimizationIfReady( + const v8::FunctionCallbackInfo& args) { + v8::Isolate* isolate = v8::Isolate::GetCurrent(); + bool* ready_to_deoptimize = reinterpret_cast(isolate->GetData(1)); + if (*ready_to_deoptimize) { + // The test should enter here only once, so put the flag back to false. + *ready_to_deoptimize = false; + // Gets the pointer to the thread that will trigger the deoptimization of + // the code. + DeoptimizeCodeThread* deoptimizer = + reinterpret_cast(isolate->GetData(0)); + { + // Exits and unlocks the thread. + isolate->Exit(); + v8::Unlocker unlocker(isolate); + // Starts the thread that deoptimizes the function. + deoptimizer->Start(); + // Waits for the deoptimizing thread to finish. + deoptimizer->Join(); + } + // The deoptimizing thread has finished its work, and the isolate + // will now be used by the current thread. + isolate->Enter(); + } +} +} // namespace + +TEST(LazyDeoptimizationMultithread) { + i::FLAG_allow_natives_syntax = true; + v8::Isolate::CreateParams create_params; + create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); + v8::Isolate* isolate = v8::Isolate::New(create_params); + { + v8::Locker locker(isolate); + v8::Isolate::Scope isolate_scope(isolate); + v8::HandleScope scope(isolate); + v8::Local context = v8::Context::New(isolate); + const char* trigger_deopt = "obj = { y: 0, x: 1 };"; + + // We use the isolate to pass arguments to the UnlockForDeoptimization + // function. Namely, we pass a pointer to the deoptimizing thread. + DeoptimizeCodeThread deoptimize_thread(isolate, context, trigger_deopt); + isolate->SetData(0, &deoptimize_thread); + v8::Context::Scope context_scope(context); + + // Create the function templace for C++ code that is invoked from + // JavaScript code. + Local fun_templ = + v8::FunctionTemplate::New(isolate, UnlockForDeoptimization); + Local fun = fun_templ->GetFunction(context).ToLocalChecked(); + CHECK(context->Global() + ->Set(context, v8_str("unlock_for_deoptimization"), fun) + .FromJust()); + + // Optimizes a function f, which will be deoptimized in another + // thread. + CompileRun( + "var b = false; var obj = { x: 1 };" + "function f() { g(); return obj.x; }" + "function g() { if (b) { unlock_for_deoptimization(); } }" + "%NeverOptimizeFunction(g);" + "f(); f(); %OptimizeFunctionOnNextCall(f);" + "f();"); + + // Trigger the unlocking. + Local v = CompileRun("b = true; f();"); + + // Once the isolate has been unlocked, the thread will wait for the + // other thread to finish its task. Once this happens, this thread + // continues with its execution, that is, with the execution of the + // function g, which then returns to f. The function f should have + // also been deoptimized. If the replacement did not happen on this + // thread's stack, then the test will fail here. + CHECK(v->IsNumber()); + CHECK_EQ(1, static_cast(v->NumberValue(context).FromJust())); + } + isolate->Dispose(); +} + +TEST(LazyDeoptimizationMultithreadWithNatives) { + i::FLAG_allow_natives_syntax = true; + v8::Isolate::CreateParams create_params; + create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); + v8::Isolate* isolate = v8::Isolate::New(create_params); + { + v8::Locker locker(isolate); + v8::Isolate::Scope isolate_scope(isolate); + v8::HandleScope scope(isolate); + v8::Local context = v8::Context::New(isolate); + const char* trigger_deopt = "%DeoptimizeFunction(f);"; + + // We use the isolate to pass arguments to the UnlockForDeoptimization + // function. Namely, we pass a pointer to the deoptimizing thread. + DeoptimizeCodeThread deoptimize_thread(isolate, context, trigger_deopt); + isolate->SetData(0, &deoptimize_thread); + bool ready_to_deopt = false; + isolate->SetData(1, &ready_to_deopt); + v8::Context::Scope context_scope(context); + + // Create the function templace for C++ code that is invoked from + // JavaScript code. + Local fun_templ = + v8::FunctionTemplate::New(isolate, UnlockForDeoptimizationIfReady); + Local fun = fun_templ->GetFunction(context).ToLocalChecked(); + CHECK(context->Global() + ->Set(context, v8_str("unlock_for_deoptimization"), fun) + .FromJust()); + + // Optimizes a function f, which will be deoptimized in another + // thread. + CompileRun( + "var obj = { x: 1 };" + "function f() { g(); return obj.x;}" + "function g() { " + " unlock_for_deoptimization(); }" + "%NeverOptimizeFunction(g);" + "f(); f(); %OptimizeFunctionOnNextCall(f);"); + + // Trigger the unlocking. + ready_to_deopt = true; + isolate->SetData(1, &ready_to_deopt); + Local v = CompileRun("f();"); + + // Once the isolate has been unlocked, the thread will wait for the + // other thread to finish its task. Once this happens, this thread + // continues with its execution, that is, with the execution of the + // function g, which then returns to f. The function f should have + // also been deoptimized. Otherwise, the test will fail here. + CHECK(v->IsNumber()); + CHECK_EQ(1, static_cast(v->NumberValue(context).FromJust())); + } + isolate->Dispose(); +} + +TEST(EagerDeoptimizationMultithread) { + i::FLAG_allow_natives_syntax = true; + v8::Isolate::CreateParams create_params; + create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); + v8::Isolate* isolate = v8::Isolate::New(create_params); + { + v8::Locker locker(isolate); + v8::Isolate::Scope isolate_scope(isolate); + v8::HandleScope scope(isolate); + v8::Local context = v8::Context::New(isolate); + const char* trigger_deopt = "f({y: 0, x: 1});"; + + // We use the isolate to pass arguments to the UnlockForDeoptimization + // function. Namely, we pass a pointer to the deoptimizing thread. + DeoptimizeCodeThread deoptimize_thread(isolate, context, trigger_deopt); + isolate->SetData(0, &deoptimize_thread); + bool ready_to_deopt = false; + isolate->SetData(1, &ready_to_deopt); + v8::Context::Scope context_scope(context); + + // Create the function templace for C++ code that is invoked from + // JavaScript code. + Local fun_templ = + v8::FunctionTemplate::New(isolate, UnlockForDeoptimizationIfReady); + Local fun = fun_templ->GetFunction(context).ToLocalChecked(); + CHECK(context->Global() + ->Set(context, v8_str("unlock_for_deoptimization"), fun) + .FromJust()); + + // Optimizes a function f, which will be deoptimized by another thread. + CompileRun( + "function f(obj) { unlock_for_deoptimization(); return obj.x; }" + "f({x: 1}); f({x: 1});" + "%OptimizeFunctionOnNextCall(f);" + "f({x: 1});"); + + // Trigger the unlocking. + ready_to_deopt = true; + isolate->SetData(1, &ready_to_deopt); + Local v = CompileRun("f({x: 1});"); + + // Once the isolate has been unlocked, the thread will wait for the + // other thread to finish its task. Once this happens, this thread + // continues with its execution, that is, with the execution of the + // function g, which then returns to f. The function f should have + // also been deoptimized. Otherwise, the test will fail here. + CHECK(v->IsNumber()); + CHECK_EQ(1, static_cast(v->NumberValue(context).FromJust())); + } + isolate->Dispose(); +} + // Migrating an isolate class KangarooThread : public v8::base::Thread { public: diff --git a/deps/v8/test/cctest/test-weakmaps.cc b/deps/v8/test/cctest/test-weakmaps.cc index 13a0c538d9b82f..f645234144c092 100644 --- a/deps/v8/test/cctest/test-weakmaps.cc +++ b/deps/v8/test/cctest/test-weakmaps.cc @@ -191,7 +191,7 @@ TEST(Regress2060a) { Handle object = factory->NewJSObject(function, TENURED); CHECK(!heap->InNewSpace(*object)); CHECK(!first_page->Contains(object->address())); - int32_t hash = object->GetOrCreateHash(isolate)->value(); + int32_t hash = key->GetOrCreateHash(isolate)->value(); JSWeakCollection::Set(weakmap, key, object, hash); } } diff --git a/doc/api/addons.md b/doc/api/addons.md index c6802530f6dc67..03feb8ec619ed5 100644 --- a/doc/api/addons.md +++ b/doc/api/addons.md @@ -101,7 +101,7 @@ Addon module name is `addon`. Once the source code has been written, it must be compiled into the binary `addon.node` file. To do so, create a file called `binding.gyp` in the top-level of the project describing the build configuration of the module -using a JSON-like format. This file is used by [node-gyp][] -- a tool written +using a JSON-like format. This file is used by [node-gyp][] — a tool written specifically to compile Node.js Addons. ```json diff --git a/doc/api/assert.md b/doc/api/assert.md index 561d59bdc2932f..a859408a8be99e 100644 --- a/doc/api/assert.md +++ b/doc/api/assert.md @@ -276,7 +276,7 @@ added: v0.1.21 * `expected` {any} * `message` {any} * `operator` {string} **Default:** '!=' -* `stackStartFunction` {function} **Default:** `assert.fail` +* `stackStartFunction` {Function} **Default:** `assert.fail` Throws an `AssertionError`. If `message` is falsy, the error message is set as the values of `actual` and `expected` separated by the provided `operator`. @@ -631,10 +631,10 @@ For more information, see [MDN's guide on equality comparisons and sameness][mdn-equality-guide]. [`Error.captureStackTrace`]: errors.html#errors_error_capturestacktrace_targetobject_constructoropt -[`Map`]: https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/Map +[`Map`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Map [`Object.is()`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/is [`RegExp`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions -[`Set`]: https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/Set +[`Set`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Set [`TypeError`]: errors.html#errors_class_typeerror [`assert.deepEqual()`]: #assert_assert_deepequal_actual_expected_message [`assert.deepStrictEqual()`]: #assert_assert_deepstrictequal_actual_expected_message diff --git a/doc/api/async_hooks.md b/doc/api/async_hooks.md index 4fa23f28d116b0..56640060bf7616 100644 --- a/doc/api/async_hooks.md +++ b/doc/api/async_hooks.md @@ -86,7 +86,7 @@ added: v8.1.0 * `before` {Function} The [`before` callback][]. * `after` {Function} The [`after` callback][]. * `destroy` {Function} The [`destroy` callback][]. -* Returns: `{AsyncHook}` Instance used for disabling and enabling hooks +* Returns: {AsyncHook} Instance used for disabling and enabling hooks Registers functions to be called for different lifetime events of each async operation. @@ -509,6 +509,9 @@ const server = net.createServer(function onConnection(conn) { }); ``` +Note that promise contexts may not get precise executionAsyncIds by default. +See the section on [promise execution tracking][]. + #### `async_hooks.triggerAsyncId()` * Returns: {number} The ID of the resource responsible for calling the callback @@ -531,6 +534,57 @@ const server = net.createServer((conn) => { }); ``` +Note that promise contexts may not get valid triggerAsyncIds by default. See +the section on [promise execution tracking][]. + +## Promise execution tracking + +By default, promise executions are not assigned asyncIds due to the relatively +expensive nature of the [promise introspection API][PromiseHooks] provided by +V8. This means that programs using promises or `async`/`await` will not get +correct execution and trigger ids for promise callback contexts by default. + +Here's an example: + +```js +const ah = require('async_hooks'); +Promise.resolve(1729).then(() => { + console.log(`eid ${ah.executionAsyncId()} tid ${ah.triggerAsyncId()}`); +}); +// produces: +// eid 1 tid 0 +``` + +Observe that the `then` callback claims to have executed in the context of the +outer scope even though there was an asynchronous hop involved. Also note that +the triggerAsyncId value is 0, which means that we are missing context about the +resource that caused (triggered) the `then` callback to be executed. + +Installing async hooks via `async_hooks.createHook` enables promise execution +tracking. Example: + +```js +const ah = require('async_hooks'); +ah.createHook({ init() {} }).enable(); // forces PromiseHooks to be enabled. +Promise.resolve(1729).then(() => { + console.log(`eid ${ah.executionAsyncId()} tid ${ah.triggerAsyncId()}`); +}); +// produces: +// eid 7 tid 6 +``` + +In this example, adding any actual hook function enabled the tracking of +promises. There are two promises in the example above; the promise created by +`Promise.resolve()` and the promise returned by the call to `then`. In the +example above, the first promise got the asyncId 6 and the latter got asyncId 7. +During the execution of the `then` callback, we are executing in the context of +promise with asyncId 7. This promise was triggered by async resource 6. + +Another subtlety with promises is that `before` and `after` callbacks are run +only on chained promises. That means promises not created by `then`/`catch` will +not have the `before` and `after` callbacks fired on them. For more details see +the details of the V8 [PromiseHooks][] API. + ## JavaScript Embedder API Library developers that handle their own asynchronous resources performing tasks @@ -582,7 +636,7 @@ asyncResource.triggerAsyncId(); * `type` {string} The type of async event. * `options` {Object} * `triggerAsyncId` {number} The ID of the execution context that created this - async event. **Default:** `executionAsyncId()` + async event. **Default:** `executionAsyncId()` * `requireManualDestroy` {boolean} Disables automatic `emitDestroy` when the object is garbage collected. This usually does not need to be set (even if `emitDestroy` is called manually), unless the resource's asyncId is retrieved @@ -655,3 +709,5 @@ constructor. [`destroy` callback]: #async_hooks_destroy_asyncid [`init` callback]: #async_hooks_init_asyncid_type_triggerasyncid_resource [Hook Callbacks]: #async_hooks_hook_callbacks +[PromiseHooks]: https://docs.google.com/document/d/1rda3yKGHimKIhg5YeoAmCOtyURgsbTH_qaYR79FELlk +[promise execution tracking]: #async_hooks_promise_execution_tracking diff --git a/doc/api/buffer.md b/doc/api/buffer.md index 3f827088ce5676..8ab7b472605ba6 100644 --- a/doc/api/buffer.md +++ b/doc/api/buffer.md @@ -449,7 +449,7 @@ changes: * `size` {integer} The desired length of the new `Buffer`. -Allocates a new `Buffer` of `size` bytes. If the `size` is larger than +Allocates a new `Buffer` of `size` bytes. If the `size` is larger than [`buffer.constants.MAX_LENGTH`] or smaller than 0, a [`RangeError`] will be thrown. A zero-length `Buffer` will be created if `size` is 0. @@ -535,7 +535,7 @@ const buf = Buffer.alloc(5); console.log(buf); ``` -Allocates a new `Buffer` of `size` bytes. If the `size` is larger than +Allocates a new `Buffer` of `size` bytes. If the `size` is larger than [`buffer.constants.MAX_LENGTH`] or smaller than 0, a [`RangeError`] will be thrown. A zero-length `Buffer` will be created if `size` is 0. @@ -580,7 +580,7 @@ changes: * `size` {integer} The desired length of the new `Buffer`. -Allocates a new `Buffer` of `size` bytes. If the `size` is larger than +Allocates a new `Buffer` of `size` bytes. If the `size` is larger than [`buffer.constants.MAX_LENGTH`] or smaller than 0, a [`RangeError`] will be thrown. A zero-length `Buffer` will be created if `size` is 0. @@ -626,7 +626,7 @@ added: v5.12.0 * `size` {integer} The desired length of the new `Buffer`. -Allocates a new `Buffer` of `size` bytes. If the `size` is larger than +Allocates a new `Buffer` of `size` bytes. If the `size` is larger than [`buffer.constants.MAX_LENGTH`] or smaller than 0, a [`RangeError`] will be thrown. A zero-length `Buffer` will be created if `size` is 0. @@ -2660,7 +2660,7 @@ deprecated: v6.0.0 * `size` {integer} The desired length of the new `SlowBuffer`. -Allocates a new `Buffer` of `size` bytes. If the `size` is larger than +Allocates a new `Buffer` of `size` bytes. If the `size` is larger than [`buffer.constants.MAX_LENGTH`] or smaller than 0, a [`RangeError`] will be thrown. A zero-length `Buffer` will be created if `size` is 0. diff --git a/doc/api/child_process.md b/doc/api/child_process.md index 2aeae73c728228..620719111c48c8 100755 --- a/doc/api/child_process.md +++ b/doc/api/child_process.md @@ -38,13 +38,13 @@ the event loop until the spawned process either exits or is terminated. For convenience, the `child_process` module provides a handful of synchronous and asynchronous alternatives to [`child_process.spawn()`][] and -[`child_process.spawnSync()`][]. *Note that each of these alternatives are +[`child_process.spawnSync()`][]. *Note that each of these alternatives are implemented on top of [`child_process.spawn()`][] or [`child_process.spawnSync()`][].* * [`child_process.exec()`][]: spawns a shell and runs a command within that shell, passing the `stdout` and `stderr` to a callback function when complete. * [`child_process.execFile()`][]: similar to [`child_process.exec()`][] except that - it spawns the command directly without first spawning a shell. + it spawns the command directly without first spawning a shell by default. * [`child_process.fork()`][]: spawns a new Node.js process and invokes a specified module with an IPC communication channel established that allows sending messages between parent and child. @@ -78,7 +78,7 @@ when the child process terminates. The importance of the distinction between [`child_process.exec()`][] and [`child_process.execFile()`][] can vary based on platform. On Unix-type operating systems (Unix, Linux, macOS) [`child_process.execFile()`][] can be more efficient -because it does not spawn a shell. On Windows, however, `.bat` and `.cmd` +because it does not spawn a shell by default. On Windows, however, `.bat` and `.cmd` files are not executable on their own without a terminal, and therefore cannot be launched using [`child_process.execFile()`][]. When running on Windows, `.bat` and `.cmd` files can be invoked using [`child_process.spawn()`][] with the `shell` @@ -143,8 +143,8 @@ changes: [Shell Requirements][] and [Default Windows Shell][]. * `timeout` {number} **Default:** `0` * `maxBuffer` {number} Largest amount of data in bytes allowed on stdout or - stderr. **Default:** `200*1024`. If exceeded, the child process is terminated. - See caveat at [`maxBuffer` and Unicode][]. + stderr. **Default:** `200 * 1024`. If exceeded, the child process is + terminated. See caveat at [`maxBuffer` and Unicode][]. * `killSignal` {string|integer} **Default:** `'SIGTERM'` * `uid` {number} Sets the user identity of the process (see setuid(2)). * `gid` {number} Sets the group identity of the process (see setgid(2)). @@ -187,7 +187,7 @@ exec('cat *.js bad_file | wc -l', (error, stdout, stderr) => { ``` If a `callback` function is provided, it is called with the arguments -`(error, stdout, stderr)`. On success, `error` will be `null`. On error, +`(error, stdout, stderr)`. On success, `error` will be `null`. On error, `error` will be an instance of [`Error`][]. The `error.code` property will be the exit code of the child process while `error.signal` will be set to the signal that terminated the process. Any exit code other than `0` is considered @@ -257,8 +257,8 @@ changes: * `encoding` {string} **Default:** `'utf8'` * `timeout` {number} **Default:** `0` * `maxBuffer` {number} Largest amount of data in bytes allowed on stdout or - stderr. **Default:** `200*1024` If exceeded, the child process is terminated. - See caveat at [`maxBuffer` and Unicode][]. + stderr. **Default:** `200 * 1024` If exceeded, the child process is + terminated. See caveat at [`maxBuffer` and Unicode][]. * `killSignal` {string|integer} **Default:** `'SIGTERM'` * `uid` {number} Sets the user identity of the process (see setuid(2)). * `gid` {number} Sets the group identity of the process (see setgid(2)). @@ -266,6 +266,10 @@ changes: normally be created on Windows systems. **Default:** `false`. * `windowsVerbatimArguments` {boolean} No quoting or escaping of arguments is done on Windows. Ignored on Unix. **Default:** `false`. + * `shell` {boolean|string} If `true`, runs `command` inside of a shell. Uses + `'/bin/sh'` on UNIX, and `process.env.ComSpec` on Windows. A different + shell can be specified as a string. See [Shell Requirements][] and + [Default Windows Shell][]. **Default:** `false` (no shell). * `callback` {Function} Called with the output when process terminates. * `error` {Error} * `stdout` {string|Buffer} @@ -273,7 +277,7 @@ changes: * Returns: {ChildProcess} The `child_process.execFile()` function is similar to [`child_process.exec()`][] -except that it does not spawn a shell. Rather, the specified executable `file` +except that it does not spawn a shell by default. Rather, the specified executable `file` is spawned directly as a new process making it slightly more efficient than [`child_process.exec()`][]. @@ -312,6 +316,10 @@ async function getVersion() { getVersion(); ``` +*Note*: If the `shell` option is enabled, do not pass unsanitized user input +to this function. Any input containing shell metacharacters may be used to +trigger arbitrary command execution. + ### child_process.fork(modulePath[, args][, options]) -* Returns: {Worker} A reference to `worker`. +* Returns: {cluster.Worker} A reference to `worker`. In a worker, this function will close all servers, wait for the `'close'` event on those servers, and then disconnect the IPC channel. @@ -499,7 +499,7 @@ Emitted after the worker IPC channel has disconnected. This can occur when a worker exits gracefully, is killed, or is disconnected manually (such as with worker.disconnect()). -There may be a delay between the `'disconnect'` and `'exit'` events. These events +There may be a delay between the `'disconnect'` and `'exit'` events. These events can be used to detect if the process is stuck in a cleanup or if there are long-living connections. @@ -590,7 +590,7 @@ The `addressType` is one of: * `4` (TCPv4) * `6` (TCPv6) * `-1` (unix domain socket) -* `"udp4"` or `"udp6"` (UDP v4 or v6) +* `'udp4'` or `'udp6'` (UDP v4 or v6) ## Event: 'message' - `algorithm` {string} - `key` {string | Buffer | TypedArray | DataView} - `iv` {string | Buffer | TypedArray | DataView} @@ -1281,8 +1292,8 @@ recent OpenSSL releases, `openssl list-cipher-algorithms` will display the available cipher algorithms. The `key` is the raw key used by the `algorithm` and `iv` is an -[initialization vector][]. Both arguments must be `'utf8'` encoded strings or -[buffers][`Buffer`]. +[initialization vector][]. Both arguments must be `'utf8'` encoded strings, +[Buffers][`Buffer`], `TypedArray`, or `DataView`s. ### crypto.createDiffieHellman(prime[, primeEncoding][, generator][, generatorEncoding]) -* Returns {number} the `SO_RCVBUF` socket receive buffer size in bytes. +* Returns: {number} the `SO_RCVBUF` socket receive buffer size in bytes. ### socket.getSendBufferSize() -* Returns {number} the `SO_SNDBUF` socket send buffer size in bytes. +* Returns: {number} the `SO_SNDBUF` socket send buffer size in bytes. ### socket.ref() Sometimes, the domain in use is not the one that ought to be used for a -specific event emitter. Or, the event emitter could have been created +specific event emitter. Or, the event emitter could have been created in the context of one domain, but ought to instead be bound to some other domain. @@ -280,7 +280,7 @@ Returns a new Domain object. The Domain class encapsulates the functionality of routing errors and uncaught exceptions to the active Domain object. -Domain is a child class of [`EventEmitter`][]. To handle the errors that it +Domain is a child class of [`EventEmitter`][]. To handle the errors that it catches, listen to its `'error'` event. ### domain.members @@ -294,13 +294,13 @@ to the domain. * `emitter` {EventEmitter|Timer} emitter or timer to be added to the domain -Explicitly adds an emitter to the domain. If any event handlers called by +Explicitly adds an emitter to the domain. If any event handlers called by the emitter throw an error, or if the emitter emits an `'error'` event, it will be routed to the domain's `'error'` event, just like with implicit binding. This also works with timers that are returned from [`setInterval()`][] and -[`setTimeout()`][]. If their callback function throws, it will be caught by +[`setTimeout()`][]. If their callback function throws, it will be caught by the domain 'error' handler. If the Timer or EventEmitter was already bound to a domain, it is removed @@ -312,7 +312,7 @@ from that one, and bound to this one instead. * Returns: {Function} The bound function The returned function will be a wrapper around the supplied callback -function. When the returned function is called, any errors that are +function. When the returned function is called, any errors that are thrown will be routed to the domain's `'error'` event. #### Example @@ -336,7 +336,7 @@ d.on('error', (er) => { ### domain.dispose() -> Stability: 0 - Deprecated. Please recover from failed IO actions +> Stability: 0 - Deprecated. Please recover from failed IO actions > explicitly via error event handlers set on the domain. Once `dispose` has been called, the domain will no longer be used by callbacks @@ -382,7 +382,7 @@ without exiting the domain. * `callback` {Function} The callback function * Returns: {Function} The intercepted function -This method is almost identical to [`domain.bind(callback)`][]. However, in +This method is almost identical to [`domain.bind(callback)`][]. However, in addition to catching thrown errors, it will also intercept [`Error`][] objects sent as the first argument to the function. @@ -419,7 +419,7 @@ d.on('error', (er) => { * `emitter` {EventEmitter|Timer} emitter or timer to be removed from the domain -The opposite of [`domain.add(emitter)`][]. Removes domain handling from the +The opposite of [`domain.add(emitter)`][]. Removes domain handling from the specified emitter. ### domain.run(fn[, ...args]) diff --git a/doc/api/errors.md b/doc/api/errors.md index b43a3395bff615..1c5836e553df1a 100755 --- a/doc/api/errors.md +++ b/doc/api/errors.md @@ -107,7 +107,7 @@ pass or fail). For *all* `EventEmitter` objects, if an `'error'` event handler is not provided, the error will be thrown, causing the Node.js process to report an -unhandled exception and crash unless either: The [`domain`][domains] module is +unhandled exception and crash unless either: The [`domain`][domains] module is used appropriately or a handler has been registered for the [`process.on('uncaughtException')`][] event. @@ -133,7 +133,7 @@ exactly how errors raised by those methods are propagated. Most asynchronous methods exposed by the Node.js core API follow an idiomatic -pattern referred to as an _error-first callback_ (sometimes referred to as +pattern referred to as an _error-first callback_ (sometimes referred to as a _Node.js style callback_). With this pattern, a callback function is passed to the method as an argument. When the operation either completes or an error is raised, the callback function is called with @@ -156,7 +156,7 @@ fs.readFile('/some/file/that/does-exist', errorFirstCallback); ``` The JavaScript `try / catch` mechanism **cannot** be used to intercept errors -generated by asynchronous APIs. A common mistake for beginners is to try to +generated by asynchronous APIs. A common mistake for beginners is to try to use `throw` inside an error-first callback: ```js @@ -209,7 +209,7 @@ provided text message. If an object is passed as `message`, the text message is generated by calling `message.toString()`. The `error.stack` property will represent the point in the code at which `new Error()` was called. Stack traces are dependent on [V8's stack trace API][]. Stack traces extend only to either -(a) the beginning of *synchronous code execution*, or (b) the number of frames +(a) the beginning of *synchronous code execution*, or (b) the number of frames given by the property `Error.stackTraceLimit`, whichever is smaller. ### Error.captureStackTrace(targetObject[, constructorOpt]) @@ -526,7 +526,7 @@ found [here][online]. - `EACCES` (Permission denied): An attempt was made to access a file in a way forbidden by its file access permissions. -- `EADDRINUSE` (Address already in use): An attempt to bind a server +- `EADDRINUSE` (Address already in use): An attempt to bind a server ([`net`][], [`http`][], or [`https`][]) to a local address failed due to another server on the local system already occupying that address. @@ -554,14 +554,14 @@ found [here][online]. `ulimit -n 2048` in the same shell that will run the Node.js process. - `ENOENT` (No such file or directory): Commonly raised by [`fs`][] operations - to indicate that a component of the specified pathname does not exist -- no + to indicate that a component of the specified pathname does not exist — no entity (file or directory) could be found by the given path. - `ENOTDIR` (Not a directory): A component of the given pathname existed, but was not a directory as expected. Commonly raised by [`fs.readdir`][]. - `ENOTEMPTY` (Directory not empty): A directory with entries was the target - of an operation that requires an empty directory -- usually [`fs.unlink`][]. + of an operation that requires an empty directory — usually [`fs.unlink`][]. - `EPERM` (Operation not permitted): An attempt was made to perform an operation that requires elevated privileges. @@ -573,7 +573,7 @@ found [here][online]. - `ETIMEDOUT` (Operation timed out): A connect or send request failed because the connected party did not properly respond after a period of time. Usually - encountered by [`http`][] or [`net`][] -- often a sign that a `socket.end()` + encountered by [`http`][] or [`net`][] — often a sign that a `socket.end()` was not properly called. @@ -638,6 +638,21 @@ Status code was outside the regular status code range (100-999). The `Trailer` header was set even though the transfer encoding does not support that. + +### ERR_HTTP2_ALREADY_SHUTDOWN + +Occurs with multiple attempts to shutdown an HTTP/2 session. + + +### ERR_HTTP2_ALTSVC_INVALID_ORIGIN + +HTTP/2 ALTSVC frames require a valid origin. + + +### ERR_HTTP2_ALTSVC_LENGTH + +HTTP/2 ALTSVC frames are limited to a maximum of 16,382 payload bytes. + ### ERR_HTTP2_CONNECT_AUTHORITY @@ -661,6 +676,12 @@ forbidden. A failure occurred sending an individual frame on the HTTP/2 session. + +### ERR_HTTP2_GOAWAY_SESSION + +New HTTP/2 Streams may not be opened after the `Http2Session` has received a +`GOAWAY` frame from the connected peer. + ### ERR_HTTP2_HEADER_REQUIRED @@ -749,7 +770,7 @@ An operation was performed on a stream that had already been destroyed. ### ERR_HTTP2_MAX_PENDING_SETTINGS_ACK Whenever an HTTP/2 `SETTINGS` frame is sent to a connected peer, the peer is -required to send an acknowledgement that it has received and applied the new +required to send an acknowledgment that it has received and applied the new `SETTINGS`. By default, a maximum number of unacknowledged `SETTINGS` frames may be sent at any given time. This error code is used when that limit has been reached. @@ -775,7 +796,7 @@ forbidden. ### ERR_HTTP2_PING_CANCEL -An HTTP/2 ping was cancelled. +An HTTP/2 ping was canceled. ### ERR_HTTP2_PING_LENGTH @@ -800,6 +821,11 @@ client. An attempt was made to use the `Http2Stream.prototype.responseWithFile()` API to send something other than a regular file. + +### ERR_HTTP2_SESSION_ERROR + +The `Http2Session` closed with a non-zero error code. + ### ERR_HTTP2_SOCKET_BOUND @@ -817,10 +843,11 @@ Use of the `101` Informational status code is forbidden in HTTP/2. An invalid HTTP status code has been specified. Status codes must be an integer between `100` and `599` (inclusive). - -### ERR_HTTP2_STREAM_CLOSED + +### ERR_HTTP2_STREAM_CANCEL -An action was performed on an HTTP/2 Stream that had already been closed. +An `Http2Stream` was destroyed before any data was transmitted to the connected +peer. ### ERR_HTTP2_STREAM_ERROR @@ -981,6 +1008,37 @@ strict compliance with the API specification (which in some cases may accept `func(undefined)` and `func()` are treated identically, and the [`ERR_INVALID_ARG_TYPE`][] error code may be used instead. + +### ERR_MISSING_DYNAMIC_INSTANTIATE_HOOK + +> Stability: 1 - Experimental + +Used when an [ES6 module][] loader hook specifies `format: 'dynamic` but does +not provide a `dynamicInstantiate` hook. + + +### ERR_MISSING_MODULE + +> Stability: 1 - Experimental + +Used when an [ES6 module][] cannot be resolved. + + +### ERR_MODULE_RESOLUTION_LEGACY + +> Stability: 1 - Experimental + +Used when a failure occurred resolving imports in an [ES6 module][]. + + +### ERR_MULTIPLE_CALLBACK + +A callback was called more than once. + +*Note*: A callback is almost always meant to only be called once as the query +can either be fulfilled or rejected but not both at the same time. The latter +would be possible by calling a callback more than once. + ### ERR_NAPI_CONS_FUNCTION @@ -991,17 +1049,30 @@ While using `N-API`, a constructor passed was not a function. While using `N-API`, `Constructor.prototype` was not an object. + +### ERR_NAPI_INVALID_DATAVIEW_ARGS + +While calling `napi_create_dataview()`, a given `offset` was outside the bounds +of the dataview or `offset + length` was larger than a length of given `buffer`. + + +### ERR_NAPI_INVALID_TYPEDARRAY_ALIGNMENT + +While calling `napi_create_typedarray()`, the provided `offset` was not a +multiple of the element size. + + +### ERR_NAPI_INVALID_TYPEDARRAY_LENGTH + +While calling `napi_create_typedarray()`, `(length * size_of_element) + +byte_offset` was larger than the length of given `buffer`. + ### ERR_NO_ICU An attempt was made to use features that require [ICU][], but Node.js was not compiled with ICU support. - -### ERR_OUTOFMEMORY - -An operation caused an out-of-memory condition. - ### ERR_SOCKET_ALREADY_BOUND diff --git a/doc/api/esm.md b/doc/api/esm.md index b90927c0d57cec..926555fc771484 100644 --- a/doc/api/esm.md +++ b/doc/api/esm.md @@ -132,12 +132,12 @@ module. This can be one of the following: | `format` | Description | | --- | --- | -| `"esm"` | Load a standard JavaScript module | -| `"commonjs"` | Load a node-style CommonJS module | -| `"builtin"` | Load a node builtin CommonJS module | -| `"json"` | Load a JSON file | -| `"addon"` | Load a [C++ Addon][addons] | -| `"dynamic"` | Use a [dynamic instantiate hook][] | +| `'esm'` | Load a standard JavaScript module | +| `'commonjs'` | Load a node-style CommonJS module | +| `'builtin'` | Load a node builtin CommonJS module | +| `'json'` | Load a JSON file | +| `'addon'` | Load a [C++ Addon][addons] | +| `'dynamic'` | Use a [dynamic instantiate hook][] | For example, a dummy loader to load JavaScript restricted to browser resolution rules with only JS file extension and Node builtin modules support could @@ -191,7 +191,7 @@ would load the module `x.js` as an ES module with relative resolution support To create a custom dynamic module that doesn't correspond to one of the existing `format` interpretations, the `dynamicInstantiate` hook can be used. -This hook is called only for modules that return `format: "dynamic"` from +This hook is called only for modules that return `format: 'dynamic'` from the `resolve` hook. ```js diff --git a/doc/api/fs.md b/doc/api/fs.md index 1eaae690190c70..93744100057c57 100644 --- a/doc/api/fs.md +++ b/doc/api/fs.md @@ -6,7 +6,7 @@ -File I/O is provided by simple wrappers around standard POSIX functions. To +File I/O is provided by simple wrappers around standard POSIX functions. To use this module do `require('fs')`. All the methods have asynchronous and synchronous forms. @@ -68,7 +68,7 @@ fs.rename('/tmp/hello', '/tmp/world', (err) => { In busy processes, the programmer is _strongly encouraged_ to use the asynchronous versions of these calls. The synchronous versions will block -the entire process until they complete--halting all connections. +the entire process until they complete — halting all connections. The relative path to a filename can be used. Remember, however, that this path will be relative to `process.cwd()`. @@ -369,16 +369,16 @@ value, will not be reflected in the corresponding alternate representation. The times in the stat object have the following semantics: -* `atime` "Access Time" - Time when file data last accessed. Changed +* `atime` "Access Time" - Time when file data last accessed. Changed by the mknod(2), utimes(2), and read(2) system calls. * `mtime` "Modified Time" - Time when file data last modified. Changed by the mknod(2), utimes(2), and write(2) system calls. * `ctime` "Change Time" - Time when file status was last changed - (inode data modification). Changed by the chmod(2), chown(2), + (inode data modification). Changed by the chmod(2), chown(2), link(2), mknod(2), rename(2), unlink(2), utimes(2), read(2), and write(2) system calls. -* `birthtime` "Birth Time" - Time of file creation. Set once when the - file is created. On filesystems where birthtime is not available, +* `birthtime` "Birth Time" - Time of file creation. Set once when the + file is created. On filesystems where birthtime is not available, this field may instead hold either the `ctime` or `1970-01-01T00:00Z` (ie, unix epoch timestamp `0`). Note that this value may be greater than `atime` or `mtime` in this case. On Darwin @@ -387,7 +387,7 @@ The times in the stat object have the following semantics: utimes(2) system call. Prior to Node v0.12, the `ctime` held the `birthtime` on Windows -systems. Note that as of v0.12, `ctime` is not "creation time", and +systems. Note that as of v0.12, `ctime` is not "creation time", and on Unix systems, it never was. ## Class: fs.WriteStream @@ -577,7 +577,7 @@ changes: * `path` {string|Buffer|URL} * `mode` {integer} **Default:** `fs.constants.F_OK` -* Returns: `undefined` +* Returns: {undefined} Synchronously tests a user's permissions for the file or directory specified by `path`. The `mode` argument is an optional integer that specifies the @@ -862,7 +862,7 @@ changes: * `callback` {Function} * `err` {Error} -Asynchronous close(2). No arguments other than a possible exception are given +Asynchronous close(2). No arguments other than a possible exception are given to the completion callback. ## fs.closeSync(fd) @@ -1009,7 +1009,7 @@ const defaults = { ``` `options` can include `start` and `end` values to read a range of bytes from -the file instead of the entire file. Both `start` and `end` are inclusive and +the file instead of the entire file. Both `start` and `end` are inclusive and start counting at 0. If `fd` is specified and `start` is omitted or `undefined`, `fs.createReadStream()` reads sequentially from the current file position. The `encoding` can be any one of those accepted by [`Buffer`][]. @@ -1079,7 +1079,7 @@ const defaults = { ``` `options` may also include a `start` option to allow writing data at -some position past the beginning of the file. Modifying a file rather +some position past the beginning of the file. Modifying a file rather than replacing it may require a `flags` mode of `r+` rather than the default mode `w`. The `encoding` can be any one of those accepted by [`Buffer`][]. @@ -1115,7 +1115,7 @@ deprecated: v1.0.0 * `exists` {boolean} Test whether or not the given path exists by checking with the file system. -Then call the `callback` argument with either true or false. Example: +Then call the `callback` argument with either true or false. Example: ```js fs.exists('/etc/passwd', (exists) => { @@ -1809,7 +1809,7 @@ to a non-existent file. The exclusive flag may or may not work with network file systems. `flags` can also be a number as documented by open(2); commonly used constants -are available from `fs.constants`. On Windows, flags are translated to +are available from `fs.constants`. On Windows, flags are translated to their equivalent ones where applicable, e.g. `O_WRONLY` to `FILE_GENERIC_WRITE`, or `O_EXCL|O_CREAT` to `CREATE_NEW`, as accepted by CreateFileW. @@ -1923,7 +1923,7 @@ changes: * `err` {Error} * `files` {string[]|Buffer[]} -Asynchronous readdir(3). Reads the contents of a directory. +Asynchronous readdir(3). Reads the contents of a directory. The callback gets two arguments `(err, files)` where `files` is an array of the names of the files in the directory excluding `'.'` and `'..'`. @@ -2468,7 +2468,7 @@ Calling `fs.unwatchFile()` with a filename that is not being watched is a no-op, not an error. *Note*: [`fs.watch()`][] is more efficient than `fs.watchFile()` and -`fs.unwatchFile()`. `fs.watch()` should be used instead of `fs.watchFile()` +`fs.unwatchFile()`. `fs.watch()` should be used instead of `fs.watchFile()` and `fs.unwatchFile()` when possible. ## fs.utimes(path, atime, mtime, callback) @@ -2559,12 +2559,12 @@ changes: * `filename` {string|Buffer} Watch for changes on `filename`, where `filename` is either a file or a -directory. The returned object is a [`fs.FSWatcher`][]. +directory. The returned object is a [`fs.FSWatcher`][]. The second argument is optional. If `options` is provided as a string, it specifies the `encoding`. Otherwise `options` should be passed as an object. -The listener callback gets two arguments `(eventType, filename)`. `eventType` is either +The listener callback gets two arguments `(eventType, filename)`. `eventType` is either `'rename'` or `'change'`, and `filename` is the name of the file which triggered the event. @@ -2626,7 +2626,7 @@ content, and one for truncation). Providing `filename` argument in the callback is only supported on Linux, -macOS, Windows, and AIX. Even on supported platforms, `filename` is not always +macOS, Windows, and AIX. Even on supported platforms, `filename` is not always guaranteed to be provided. Therefore, don't assume that `filename` argument is always provided in the callback, and have some fallback logic if it is null. @@ -2775,7 +2775,7 @@ changes: * `written` {integer} * `string` {string} -Write `string` to the file specified by `fd`. If `string` is not a string, then +Write `string` to the file specified by `fd`. If `string` is not a string, then the value will be coerced to one. `position` refers to the offset from the beginning of the file where this data @@ -3177,7 +3177,7 @@ The following constants are meant for use with the [`fs.Stats`][] object's [Caveats]: #fs_caveats [Common System Errors]: errors.html#errors_common_system_errors [FS Constants]: #fs_fs_constants_1 -[MDN-Date]: https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/Date +[MDN-Date]: https://developer.mozilla.org/en-US/JavaScript/Reference/Global_Objects/Date [MDN-Number]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Data_structures#Number_type [MSDN-Rel-Path]: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx#fully_qualified_vs._relative_paths [Readable Stream]: stream.html#stream_class_stream_readable diff --git a/doc/api/http.md b/doc/api/http.md index dea6c08ef35d6f..7331d8bc5d969d 100644 --- a/doc/api/http.md +++ b/doc/api/http.md @@ -9,7 +9,7 @@ To use the HTTP server and client one must `require('http')`. The HTTP interfaces in Node.js are designed to support many features of the protocol which have been traditionally difficult to use. In particular, large, possibly chunk-encoded, messages. The interface is -careful to never buffer entire requests or responses--the +careful to never buffer entire requests or responses — the user is able to stream data. HTTP message headers are represented by an object like this: @@ -33,7 +33,7 @@ parse the actual headers or the body. See [`message.headers`][] for details on how duplicate headers are handled. The raw headers as they were received are retained in the `rawHeaders` -property, which is an array of `[key, value, key2, value2, ...]`. For +property, which is an array of `[key, value, key2, value2, ...]`. For example, the previous message header object might have a `rawHeaders` list like the following: @@ -122,9 +122,9 @@ added: v0.3.4 for TCP Keep-Alive packets. Ignored when the `keepAlive` option is `false` or `undefined`. Defaults to `1000`. * `maxSockets` {number} Maximum number of sockets to allow per - host. Defaults to `Infinity`. + host. Defaults to `Infinity`. * `maxFreeSockets` {number} Maximum number of sockets to leave open - in a free state. Only relevant if `keepAlive` is set to `true`. + in a free state. Only relevant if `keepAlive` is set to `true`. Defaults to `256`. The default [`http.globalAgent`][] that is used by [`http.request()`][] has all @@ -203,9 +203,9 @@ added: v0.11.4 Destroy any sockets that are currently in use by the agent. -It is usually not necessary to do this. However, if using an +It is usually not necessary to do this. However, if using an agent with `keepAlive` enabled, then it is best to explicitly shut down -the agent when it will no longer be used. Otherwise, +the agent when it will no longer be used. Otherwise, sockets may hang open for quite a long time before the server terminates them. @@ -217,7 +217,7 @@ added: v0.11.4 * {Object} An object which contains arrays of sockets currently awaiting use by -the agent when `keepAlive` is enabled. Do not modify. +the agent when `keepAlive` is enabled. Do not modify. ### agent.getName(options) -This object is created internally and returned from [`http.request()`][]. It -represents an _in-progress_ request whose header has already been queued. The +This object is created internally and returned from [`http.request()`][]. It +represents an _in-progress_ request whose header has already been queued. The header is still mutable using the [`setHeader(name, value)`][], - [`getHeader(name)`][], [`removeHeader(name)`][] API. The actual header will + [`getHeader(name)`][], [`removeHeader(name)`][] API. The actual header will be sent along with the first data chunk or when calling [`request.end()`][]. To get the response, add a listener for [`'response'`][] to the request object. [`'response'`][] will be emitted from the request object when the response -headers have been received. The [`'response'`][] event is executed with one +headers have been received. The [`'response'`][] event is executed with one argument which is an instance of [`http.IncomingMessage`][]. During the [`'response'`][] event, one can add listeners to the response object; particularly to listen for the `'data'` event. If no [`'response'`][] handler is added, then the response will be -entirely discarded. However, if a [`'response'`][] event handler is added, +entirely discarded. However, if a [`'response'`][] event handler is added, then the data from the response object **must** be consumed, either by calling `response.read()` whenever there is a `'readable'` event, or by adding a `'data'` handler, or by calling the `.resume()` method. -Until the data is consumed, the `'end'` event will not fire. Also, until +Until the data is consumed, the `'end'` event will not fire. Also, until the data is read it will consume memory that can eventually lead to a 'process out of memory' error. @@ -541,7 +541,7 @@ For efficiency reasons, Node.js normally buffers the request headers until then tries to pack the request headers and data into a single TCP packet. That's usually desired (it saves a TCP round-trip), but not when the first -data is not sent until possibly much later. `request.flushHeaders()` bypasses +data is not sent until possibly much later. `request.flushHeaders()` bypasses the optimization and kickstarts the request. ### request.getHeader(name) @@ -669,9 +669,9 @@ added: v0.1.29 * `encoding` {string} * `callback` {Function} -Sends a chunk of the body. By calling this method +Sends a chunk of the body. By calling this method many times, a request body can be sent to a -server--in that case it is suggested to use the +server — in that case it is suggested to use the `['Transfer-Encoding', 'chunked']` header line when creating the request. @@ -681,7 +681,9 @@ Defaults to `'utf8'`. The `callback` argument is optional and will be called when this chunk of data is flushed. -Returns `request`. +Returns `true` if the entire data was flushed successfully to the kernel +buffer. Returns `false` if all or part of the data was queued in user memory. +`'drain'` will be emitted when the buffer is free again. ## Class: http.Server -This object is created internally by an HTTP server--not by the user. It is +This object is created internally by an HTTP server — not by the user. It is passed as the second parameter to the [`'request'`][] event. The response implements, but does not inherit from, the [Writable Stream][] @@ -1161,8 +1163,8 @@ added: v0.4.0 * `name` {string} * `value` {string | string[]} -Sets a single header value for implicit headers. If this header already exists -in the to-be-sent headers, its value will be replaced. Use an array of strings +Sets a single header value for implicit headers. If this header already exists +in the to-be-sent headers, its value will be replaced. Use an array of strings here to send multiple headers with the same name. Example: @@ -1202,12 +1204,12 @@ added: v0.9.12 * `msecs` {number} * `callback` {Function} -Sets the Socket's timeout value to `msecs`. If a callback is +Sets the Socket's timeout value to `msecs`. If a callback is provided, then it is added as a listener on the `'timeout'` event on the response object. If no `'timeout'` listener is added to the request, the response, or -the server, then sockets are destroyed when they time out. If a handler is +the server, then sockets are destroyed when they time out. If a handler is assigned to the request, the response, or the server's `'timeout'` events, timed out sockets must be handled explicitly. @@ -1487,8 +1489,8 @@ added: v0.11.6 The raw request/response headers list exactly as they were received. -Note that the keys and values are in the same list. It is *not* a -list of tuples. So, the even-numbered offsets are key values, and the +Note that the keys and values are in the same list. It is *not* a +list of tuples. So, the even-numbered offsets are key values, and the odd-numbered offsets are the associated values. Header names are not lowercased, and duplicates are not merged. @@ -1515,7 +1517,7 @@ added: v0.11.6 * {Array} The raw request/response trailer keys and values exactly as they were -received. Only populated at the `'end'` event. +received. Only populated at the `'end'` event. ### message.setTimeout(msecs, callback) @@ -19,13 +19,16 @@ compatibility with the existing [HTTP/1][] module API. However, the [Compatibility API][] is. The `http2` Core API is much more symmetric between client and server than the -`http` API. For instance, most events, like `error` and `socketError`, can be -emitted either by client-side code or server-side code. +`http` API. For instance, most events, like `error`, `connect` and `stream`, can +be emitted either by client-side code or server-side code. ### Server-side example -The following illustrates a simple, plain-text HTTP/2 server using the -Core API: +The following illustrates a simple HTTP/2 server using the Core API. +Since there are no browsers known that support +[unencrypted HTTP/2][HTTP/2 Unencrypted], the use of +[`http2.createSecureServer()`][] is necessary when communicating +with browser clients. ```js const http2 = require('http2'); @@ -36,7 +39,6 @@ const server = http2.createSecureServer({ cert: fs.readFileSync('localhost-cert.pem') }); server.on('error', (err) => console.error(err)); -server.on('socketError', (err) => console.error(err)); server.on('stream', (stream, headers) => { // stream is a Duplex @@ -67,7 +69,6 @@ const fs = require('fs'); const client = http2.connect('https://localhost:8443', { ca: fs.readFileSync('localhost-cert.pem') }); -client.on('socketError', (err) => console.error(err)); client.on('error', (err) => console.error(err)); const req = client.request({ ':path': '/' }); @@ -83,7 +84,7 @@ let data = ''; req.on('data', (chunk) => { data += chunk; }); req.on('end', () => { console.log(`\n${data}`); - client.destroy(); + client.close(); }); req.end(); ``` @@ -127,7 +128,7 @@ solely on the API of the `Http2Session`. added: v8.4.0 --> -The `'close'` event is emitted once the `Http2Session` has been terminated. +The `'close'` event is emitted once the `Http2Session` has been destroyed. #### Event: 'connect' -The `'localSettings'` event is emitted when an acknowledgement SETTINGS frame +The `'localSettings'` event is emitted when an acknowledgment SETTINGS frame has been received. When invoked, the handler function will receive a copy of the local settings. @@ -229,24 +230,18 @@ added: v8.4.0 The `'stream'` event is emitted when a new `Http2Stream` is created. When invoked, the handler function will receive a reference to the `Http2Stream` -object, a [Headers Object][], and numeric flags associated with the creation -of the stream. +object, a [HTTP/2 Headers Object][], and numeric flags associated with the +creation of the stream. ```js const http2 = require('http2'); -const { - HTTP2_HEADER_METHOD, - HTTP2_HEADER_PATH, - HTTP2_HEADER_STATUS, - HTTP2_HEADER_CONTENT_TYPE -} = http2.constants; session.on('stream', (stream, headers, flags) => { - const method = headers[HTTP2_HEADER_METHOD]; - const path = headers[HTTP2_HEADER_PATH]; + const method = headers[':method']; + const path = headers[':path']; // ... stream.respond({ - [HTTP2_HEADER_STATUS]: 200, - [HTTP2_HEADER_CONTENT_TYPE]: 'text/plain' + ':status': 200, + 'content-type': 'text/plain' }); stream.write('hello '); stream.end('world'); @@ -261,7 +256,7 @@ and would instead register a handler for the `'stream'` event emitted by the ```js const http2 = require('http2'); -// Create a plain-text HTTP/2 server +// Create an unencrypted HTTP/2 server const server = http2.createServer(); server.on('stream', (stream, headers) => { @@ -275,19 +270,6 @@ server.on('stream', (stream, headers) => { server.listen(80); ``` -#### Event: 'socketError' - - -The `'socketError'` event is emitted when an `'error'` is emitted on the -`Socket` instance bound to the `Http2Session`. If this event is not handled, -the `'error'` event will be re-emitted on the `Socket`. - -For `ServerHttp2Session` instances, a `'socketError'` event listener is always -registered that will, by default, forward the event on to the owning -`Http2Server` instance if no additional handlers are registered. - #### Event: 'timeout' + +* Value: {string|undefined} + +Value will be `undefined` if the `Http2Session` is not yet connected to a +socket, `h2c` if the `Http2Session` is not connected to a `TLSSocket`, or +will return the value of the connected `TLSSocket`'s own `alpnProtocol` +property. + +#### http2session.close([callback]) + + +* `callback` {Function} + +Gracefully closes the `Http2Session`, allowing any existing streams to +complete on their own and preventing new `Http2Stream` instances from being +created. Once closed, `http2session.destroy()` *might* be called if there +are no open `Http2Stream` instances. + +If specified, the `callback` function is registered as a handler for the +`'close'` event. + +#### http2session.closed + + +* Value: {boolean} + +Will be `true` if this `Http2Session` instance has been closed, otherwise +`false`. + +#### http2session.connecting + + +* {boolean} + +Will be `true` if this `Http2Session` instance is still connecting, will be set +to `false` before emitting `connect` event and/or calling the `http2.connect` +callback. + +#### http2session.destroy([error,][code]) +* `error` {Error} An `Error` object if the `Http2Session` is being destroyed + due to an error. +* `code` {number} The HTTP/2 error code to send in the final `GOAWAY` frame. + If unspecified, and `error` is not undefined, the default is `INTERNAL_ERROR`, + otherwise defaults to `NO_ERROR`. * Returns: {undefined} Immediately terminates the `Http2Session` and the associated `net.Socket` or `tls.TLSSocket`. +Once destroyed, the `Http2Session` will emit the `'close'` event. If `error` +is not undefined, an `'error'` event will be emitted immediately after the +`'close'` event. + +If there are any remaining open `Http2Streams` associated with the +`Http2Session`, those will also be destroyed. + #### http2session.destroyed + +* Value: {boolean|undefined} + +Value is `undefined` if the `Http2Session` session socket has not yet been +connected, `true` if the `Http2Session` is connected with a `TLSSocket`, +and `false` if the `Http2Session` is connected to any other kind of socket +or stream. + +#### http2session.goaway([code, [lastStreamID, [opaqueData]]]) + + +* `code` {number} An HTTP/2 error code +* `lastStreamID` {number} The numeric ID of the last processed `Http2Stream` +* `opaqueData` {Buffer|TypedArray|DataView} A `TypedArray` or `DataView` + instance containing additional data to be carried within the GOAWAY frame. + +Transmits a `GOAWAY` frame to the connected peer *without* shutting down the +`Http2Session`. + #### http2session.localSettings -* Value: {[Settings Object][]} +* Value: {HTTP/2 Settings Object} A prototype-less object describing the current local settings of this `Http2Session`. The local settings are local to *this* `Http2Session` instance. +#### http2session.originSet + + +* Value: {string[]|undefined} + +If the `Http2Session` is connected to a `TLSSocket`, the `originSet` property +will return an Array of origins for which the `Http2Session` may be +considered authoritative. + #### http2session.pendingSettingsAck -* Value: {[Settings Object][]} - -A prototype-less object describing the current remote settings of this -`Http2Session`. The remote settings are set by the *connected* HTTP/2 peer. +Calls [`ref()`][`net.Socket.prototype.ref`] on this `Http2Session` +instance's underlying [`net.Socket`]. -#### http2session.request(headers[, options]) +#### http2session.remoteSettings -* `headers` {[Headers Object][]} -* `options` {Object} - * `endStream` {boolean} `true` if the `Http2Stream` *writable* side should - be closed initially, such as when sending a `GET` request that should not - expect a payload body. - * `exclusive` {boolean} When `true` and `parent` identifies a parent Stream, - the created stream is made the sole direct dependency of the parent, with - all other existing dependents made a dependent of the newly created stream. - **Default:** `false` - * `parent` {number} Specifies the numeric identifier of a stream the newly - created stream is dependent on. - * `weight` {number} Specifies the relative dependency of a stream in relation - to other streams with the same `parent`. The value is a number between `1` - and `256` (inclusive). - * `getTrailers` {Function} Callback function invoked to collect trailer - headers. - -* Returns: {ClientHttp2Stream} - -For HTTP/2 Client `Http2Session` instances only, the `http2session.request()` -creates and returns an `Http2Stream` instance that can be used to send an -HTTP/2 request to the connected server. - -This method is only available if `http2session.type` is equal to -`http2.constants.NGHTTP2_SESSION_CLIENT`. - -```js -const http2 = require('http2'); -const clientSession = http2.connect('https://localhost:1234'); -const { - HTTP2_HEADER_PATH, - HTTP2_HEADER_STATUS -} = http2.constants; - -const req = clientSession.request({ [HTTP2_HEADER_PATH]: '/' }); -req.on('response', (headers) => { - console.log(headers[HTTP2_HEADER_STATUS]); - req.on('data', (chunk) => { /** .. **/ }); - req.on('end', () => { /** .. **/ }); -}); -``` +* Value: {HTTP/2 Settings Object} -When set, the `options.getTrailers()` function is called immediately after -queuing the last chunk of payload data to be sent. The callback is passed a -single object (with a `null` prototype) that the listener may used to specify -the trailing header fields to send to the peer. - -*Note*: The HTTP/1 specification forbids trailers from containing HTTP/2 -"pseudo-header" fields (e.g. `':method'`, `':path'`, etc). An `'error'` event -will be emitted if the `getTrailers` callback attempts to set such header -fields. +A prototype-less object describing the current remote settings of this +`Http2Session`. The remote settings are set by the *connected* HTTP/2 peer. #### http2session.setTimeout(msecs, callback) - -* `options` {Object} - * `graceful` {boolean} `true` to attempt a polite shutdown of the - `Http2Session`. - * `errorCode` {number} The HTTP/2 [error code][] to return. Note that this is - *not* the same thing as an HTTP Response Status Code. **Default:** `0x00` - (No Error). - * `lastStreamID` {number} The Stream ID of the last successfully processed - `Http2Stream` on this `Http2Session`. - * `opaqueData` {Buffer|Uint8Array} A `Buffer` or `Uint8Array` instance - containing arbitrary additional data to send to the peer upon disconnection. - This is used, typically, to provide additional data for debugging failures, - if necessary. -* `callback` {Function} A callback that is invoked after the session shutdown - has been completed. -* Returns: {undefined} - -Attempts to shutdown this `Http2Session` using HTTP/2 defined procedures. -If specified, the given `callback` function will be invoked once the shutdown -process has completed. - -Note that calling `http2session.shutdown()` does *not* destroy the session or -tear down the `Socket` connection. It merely prompts both sessions to begin -preparing to cease activity. - -During a "graceful" shutdown, the session will first send a `GOAWAY` frame to -the connected peer identifying the last processed stream as 232-1. -Then, on the next tick of the event loop, a second `GOAWAY` frame identifying -the most recently processed stream identifier is sent. This process allows the -remote peer to begin preparing for the connection to be terminated. - -```js -session.shutdown({ - graceful: true, - opaqueData: Buffer.from('add some debugging data here') -}, () => session.destroy()); -``` - #### http2session.socket -* `settings` {[Settings Object][]} -* Returns {undefined} +* `settings` {HTTP/2 Settings Object} Updates the current local settings for this `Http2Session` and sends a new `SETTINGS` frame to the connected HTTP/2 peer. @@ -568,8 +554,8 @@ while the session is waiting for the remote peer to acknowledge the new settings. *Note*: The new settings will not become effective until the SETTINGS -acknowledgement is received and the `'localSettings'` event is emitted. It -is possible to send multiple SETTINGS frames while acknowledgement is still +acknowledgment is received and the `'localSettings'` event is emitted. It +is possible to send multiple SETTINGS frames while acknowledgment is still pending. #### http2session.type @@ -584,12 +570,184 @@ The `http2session.type` will be equal to server, and `http2.constants.NGHTTP2_SESSION_CLIENT` if the instance is a client. +#### http2session.unref() + + +Calls [`unref()`][`net.Socket.prototype.unref`] on this `Http2Session` +instance's underlying [`net.Socket`]. + +### Class: ServerHttp2Session + + +#### serverhttp2session.altsvc(alt, originOrStream) + + +* `alt` {string} A description of the alternative service configuration as + defined by [RFC 7838][]. +* `originOrStream` {number|string|URL|Object} Either a URL string specifying + the origin (or an Object with an `origin` property) or the numeric identifier + of an active `Http2Stream` as given by the `http2stream.id` property. + +Submits an `ALTSVC` frame (as defined by [RFC 7838][]) to the connected client. + +```js +const http2 = require('http2'); + +const server = http2.createServer(); +server.on('session', (session) => { + // Set altsvc for origin https://example.org:80 + session.altsvc('h2=":8000"', 'https://example.org:80'); +}); + +server.on('stream', (stream) => { + // Set altsvc for a specific stream + stream.session.altsvc('h2=":8000"', stream.id); +}); +``` + +Sending an `ALTSVC` frame with a specific stream ID indicates that the alternate +service is associated with the origin of the given `Http2Stream`. + +The `alt` and origin string *must* contain only ASCII bytes and are +strictly interpreted as a sequence of ASCII bytes. The special value `'clear'` +may be passed to clear any previously set alternative service for a given +domain. + +When a string is passed for the `originOrStream` argument, it will be parsed as +a URL and the origin will be derived. For instance, the origin for the +HTTP URL `'https://example.org/foo/bar'` is the ASCII string +`'https://example.org'`. An error will be thrown if either the given string +cannot be parsed as a URL or if a valid origin cannot be derived. + +A `URL` object, or any object with an `origin` property, may be passed as +`originOrStream`, in which case the value of the `origin` property will be +used. The value of the `origin` property *must* be a properly serialized +ASCII origin. + +#### Specifying alternative services + +The format of the `alt` parameter is strictly defined by [RFC 7838][] as an +ASCII string containing a comma-delimited list of "alternative" protocols +associated with a specific host and port. + +For example, the value `'h2="example.org:81"'` indicates that the HTTP/2 +protocol is available on the host `'example.org'` on TCP/IP port 81. The +host and port *must* be contained within the quote (`"`) characters. + +Multiple alternatives may be specified, for instance: `'h2="example.org:81", +h2=":82"'` + +The protocol identifier (`'h2'` in the examples) may be any valid +[ALPN Protocol ID][]. + +The syntax of these values is not validated by the Node.js implementation and +are passed through as provided by the user or received from the peer. + +### Class: ClientHttp2Session + + +#### Event: 'altsvc' + + +* `alt`: {string} +* `origin`: {string} +* `streamId`: {number} + +The `'altsvc'` event is emitted whenever an `ALTSVC` frame is received by +the client. The event is emitted with the `ALTSVC` value, origin, and stream +ID. If no `origin` is provided in the `ALTSVC` frame, `origin` will +be an empty string. + +```js +const http2 = require('http2'); +const client = http2.connect('https://example.org'); + +client.on('altsvc', (alt, origin, streamId) => { + console.log(alt); + console.log(origin); + console.log(streamId); +}); +``` + +#### clienthttp2session.request(headers[, options]) + + +* `headers` {HTTP/2 Headers Object} +* `options` {Object} + * `endStream` {boolean} `true` if the `Http2Stream` *writable* side should + be closed initially, such as when sending a `GET` request that should not + expect a payload body. + * `exclusive` {boolean} When `true` and `parent` identifies a parent Stream, + the created stream is made the sole direct dependency of the parent, with + all other existing dependents made a dependent of the newly created stream. + **Default:** `false` + * `parent` {number} Specifies the numeric identifier of a stream the newly + created stream is dependent on. + * `weight` {number} Specifies the relative dependency of a stream in relation + to other streams with the same `parent`. The value is a number between `1` + and `256` (inclusive). + * `getTrailers` {Function} Callback function invoked to collect trailer + headers. + +* Returns: {ClientHttp2Stream} + +For HTTP/2 Client `Http2Session` instances only, the `http2session.request()` +creates and returns an `Http2Stream` instance that can be used to send an +HTTP/2 request to the connected server. + +This method is only available if `http2session.type` is equal to +`http2.constants.NGHTTP2_SESSION_CLIENT`. + +```js +const http2 = require('http2'); +const clientSession = http2.connect('https://localhost:1234'); +const { + HTTP2_HEADER_PATH, + HTTP2_HEADER_STATUS +} = http2.constants; + +const req = clientSession.request({ [HTTP2_HEADER_PATH]: '/' }); +req.on('response', (headers) => { + console.log(headers[HTTP2_HEADER_STATUS]); + req.on('data', (chunk) => { /** .. **/ }); + req.on('end', () => { /** .. **/ }); +}); +``` + +When set, the `options.getTrailers()` function is called immediately after +queuing the last chunk of payload data to be sent. The callback is passed a +single object (with a `null` prototype) that the listener may use to specify +the trailing header fields to send to the peer. + +*Note*: The HTTP/1 specification forbids trailers from containing HTTP/2 +pseudo-header fields (e.g. `':method'`, `':path'`, etc). An `'error'` event +will be emitted if the `getTrailers` callback attempts to set such header +fields. + +The `:method` and `:path` pseudo-headers are not specified within `headers`, +they respectively default to: + +* `:method` = `'GET'` +* `:path` = `/` + ### Class: Http2Stream -* Extends: {Duplex} +* Extends: {stream.Duplex} Each instance of the `Http2Stream` class represents a bidirectional HTTP/2 communications stream over an `Http2Session` instance. Any single `Http2Session` @@ -605,7 +763,7 @@ On the client, `Http2Stream` instances are created and returned when either the `'push'` event. *Note*: The `Http2Stream` class is a base for the [`ServerHttp2Stream`][] and -[`ClientHttp2Stream`][] classes, each of which are used specifically by either +[`ClientHttp2Stream`][] classes, each of which is used specifically by either the Server or Client side, respectively. All `Http2Stream` instances are [`Duplex`][] streams. The `Writable` side of the @@ -629,7 +787,7 @@ On the client side, instances of [`ClientHttp2Stream`][] are created when the `http2session.request()` may not be immediately ready for use if the parent `Http2Session` has not yet been fully established. In such cases, operations called on the `Http2Stream` will be buffered until the `'ready'` event is -emitted. User code should rarely, if ever, have need to handle the `'ready'` +emitted. User code should rarely, if ever, need to handle the `'ready'` event directly. The ready status of an `Http2Stream` can be determined by checking the value of `http2stream.id`. If the value is `undefined`, the stream is not yet ready for use. @@ -639,7 +797,7 @@ is not yet ready for use. All [`Http2Stream`][] instances are destroyed either when: * An `RST_STREAM` frame for the stream is received by the connected peer. -* The `http2stream.rstStream()` methods is called. +* The `http2stream.close()` method is called. * The `http2stream.destroy()` or `http2session.destroy()` methods are called. When an `Http2Stream` instance is destroyed, an attempt will be made to send an @@ -704,7 +862,7 @@ added: v8.4.0 --> The `'timeout'` event is emitted after no activity is received for this -`'Http2Stream'` within the number of millseconds set using +`'Http2Stream'` within the number of milliseconds set using `http2stream.setTimeout()`. #### Event: 'trailers' @@ -714,7 +872,7 @@ added: v8.4.0 The `'trailers'` event is emitted when a block of headers associated with trailing header fields is received. The listener callback is passed the -[Headers Object][] and flags associated with the headers. +[HTTP/2 Headers Object][] and flags associated with the headers. ```js stream.on('trailers', (headers, flags) => { @@ -732,6 +890,29 @@ added: v8.4.0 Set to `true` if the `Http2Stream` instance was aborted abnormally. When set, the `'aborted'` event will have been emitted. +#### http2stream.close(code[, callback]) + + +* code {number} Unsigned 32-bit integer identifying the error code. **Default:** + `http2.constants.NGHTTP2_NO_ERROR` (`0x00`) +* `callback` {Function} An optional function registered to listen for the + `'close'` event. +* Returns: {undefined} + +Closes the `Http2Stream` instance by sending an `RST_STREAM` frame to the +connected HTTP/2 peer. + +#### http2stream.closed + + +* Value: {boolean} + +Set to `true` if the `Http2Stream` instance has been closed. + #### http2stream.destroyed + +* Value: {boolean} + +Set to `true` if the `Http2Stream` instance has not yet been assigned a +numeric stream identifier. + #### http2stream.priority(options) - -* code {number} Unsigned 32-bit integer identifying the error code. **Default:** - `http2.constant.NGHTTP2_NO_ERROR` (`0x00`) -* Returns: {undefined} - -Sends an `RST_STREAM` frame to the connected HTTP/2 peer, causing this -`Http2Stream` to be closed on both sides using [error code][] `code`. - -#### http2stream.rstWithNoError() - - -* Returns: {undefined} - -Shortcut for `http2stream.rstStream()` using error code `0x00` (No Error). - -#### http2stream.rstWithProtocolError() +#### http2stream.sentHeaders -* Returns: {undefined} - -Shortcut for `http2stream.rstStream()` using error code `0x01` (Protocol Error). - -#### http2stream.rstWithCancel() - - -* Returns: {undefined} +* Value: {HTTP/2 Headers Object} -Shortcut for `http2stream.rstStream()` using error code `0x08` (Cancel). +An object containing the outbound headers sent for this `Http2Stream`. -#### http2stream.rstWithRefuse() +#### http2stream.sentInfoHeaders -* Returns: {undefined} +* Value: {HTTP/2 Headers Object[]} -Shortcut for `http2stream.rstStream()` using error code `0x07` (Refused Stream). +An array of objects containing the outbound informational (additional) headers +sent for this `Http2Stream`. -#### http2stream.rstWithInternalError() +#### http2stream.sentTrailers -* Returns: {undefined} +* Value: {HTTP/2 Headers Object} -Shortcut for `http2stream.rstStream()` using error code `0x02` (Internal Error). +An object containing the outbound trailers sent for this this `HttpStream`. #### http2stream.session The `'headers'` event is emitted when an additional block of headers is received -for a stream, such as when a block of `1xx` informational headers are received. -The listener callback is passed the [Headers Object][] and flags associated with -the headers. +for a stream, such as when a block of `1xx` informational headers is received. +The listener callback is passed the [HTTP/2 Headers Object][] and flags +associated with the headers. ```js stream.on('headers', (headers, flags) => { @@ -926,8 +1088,8 @@ added: v8.4.0 --> The `'push'` event is emitted when response headers for a Server Push stream -are received. The listener callback is passed the [Headers Object][] and flags -associated with the headers. +are received. The listener callback is passed the [HTTP/2 Headers Object][] and +flags associated with the headers. ```js stream.on('push', (headers, flags) => { @@ -943,7 +1105,7 @@ added: v8.4.0 The `'response'` event is emitted when a response `HEADERS` frame has been received for this stream from the connected HTTP/2 server. The listener is invoked with two arguments: an Object containing the received -[Headers Object][], and flags associated with the headers. +[HTTP/2 Headers Object][], and flags associated with the headers. For example: @@ -973,8 +1135,7 @@ provide additional methods such as `http2stream.pushStream()` and added: v8.4.0 --> -* `headers` {[Headers Object][]} -* Returns: {undefined} +* `headers` {HTTP/2 Headers Object} Sends an additional informational `HEADERS` frame to the connected HTTP/2 peer. @@ -1004,7 +1165,7 @@ accepts push streams, `false` otherwise. Settings are the same for every added: v8.4.0 --> -* `headers` {[Headers Object][]} +* `headers` {HTTP/2 Headers Object} * `options` {Object} * `exclusive` {boolean} When `true` and `parent` identifies a parent Stream, the created stream is made the sole direct dependency of the parent, with @@ -1014,17 +1175,23 @@ added: v8.4.0 created stream is dependent on. * `callback` {Function} Callback that is called once the push stream has been initiated. + * `err` {Error} + * `pushStream` {ServerHttp2Stream} The returned pushStream object. + * `headers` {HTTP/2 Headers Object} Headers object the pushStream was + initiated with. * Returns: {undefined} Initiates a push stream. The callback is invoked with the new `Http2Stream` -instance created for the push stream. +instance created for the push stream passed as the second argument, or an +`Error` passed as the first argument. ```js const http2 = require('http2'); const server = http2.createServer(); server.on('stream', (stream) => { stream.respond({ ':status': 200 }); - stream.pushStream({ ':path': '/' }, (pushStream) => { + stream.pushStream({ ':path': '/' }, (err, pushStream, headers) => { + if (err) throw err; pushStream.respond({ ':status': 200 }); pushStream.end('some pushed data'); }); @@ -1041,11 +1208,11 @@ a `weight` value to `http2stream.priority` with the `silent` option set to added: v8.4.0 --> -* `headers` {[Headers Object][]} +* `headers` {HTTP/2 Headers Object} * `options` {Object} * `endStream` {boolean} Set to `true` to indicate that the response will not include payload data. - * `getTrailers` {function} Callback function invoked to collect trailer + * `getTrailers` {Function} Callback function invoked to collect trailer headers. * Returns: {undefined} @@ -1060,7 +1227,7 @@ server.on('stream', (stream) => { When set, the `options.getTrailers()` function is called immediately after queuing the last chunk of payload data to be sent. The callback is passed a -single object (with a `null` prototype) that the listener may used to specify +single object (with a `null` prototype) that the listener may use to specify the trailing header fields to send to the peer. ```js @@ -1077,7 +1244,7 @@ server.on('stream', (stream) => { ``` *Note*: The HTTP/1 specification forbids trailers from containing HTTP/2 -"pseudo-header" fields (e.g. `':status'`, `':path'`, etc). An `'error'` event +pseudo-header fields (e.g. `':status'`, `':path'`, etc). An `'error'` event will be emitted if the `getTrailers` callback attempts to set such header fields. @@ -1087,7 +1254,7 @@ added: v8.4.0 --> * `fd` {number} A readable file descriptor. -* `headers` {[Headers Object][]} +* `headers` {HTTP/2 Headers Object} * `options` {Object} * `statCheck` {Function} * `getTrailers` {Function} Callback function invoked to collect trailer @@ -1107,10 +1274,10 @@ automatically. const http2 = require('http2'); const fs = require('fs'); -const fd = fs.openSync('/some/file', 'r'); - const server = http2.createServer(); server.on('stream', (stream) => { + const fd = fs.openSync('/some/file', 'r'); + const stat = fs.fstatSync(fd); const headers = { 'content-length': stat.size, @@ -1118,8 +1285,8 @@ server.on('stream', (stream) => { 'content-type': 'text/plain' }; stream.respondWithFD(fd, headers); + stream.on('close', () => fs.closeSync(fd)); }); -server.on('close', () => fs.closeSync(fd)); ``` The optional `options.statCheck` function may be specified to give user code @@ -1132,19 +1299,25 @@ The `offset` and `length` options may be used to limit the response to a specific range subset. This can be used, for instance, to support HTTP Range requests. +The file descriptor is not closed when the stream is closed, so it will need +to be closed manually once it is no longer needed. +Note that using the same file descriptor concurrently for multiple streams +is not supported and may result in data loss. Re-using a file descriptor +after a stream has finished is supported. + When set, the `options.getTrailers()` function is called immediately after queuing the last chunk of payload data to be sent. The callback is passed a -single object (with a `null` prototype) that the listener may used to specify +single object (with a `null` prototype) that the listener may use to specify the trailing header fields to send to the peer. ```js const http2 = require('http2'); const fs = require('fs'); -const fd = fs.openSync('/some/file', 'r'); - const server = http2.createServer(); server.on('stream', (stream) => { + const fd = fs.openSync('/some/file', 'r'); + const stat = fs.fstatSync(fd); const headers = { 'content-length': stat.size, @@ -1156,12 +1329,13 @@ server.on('stream', (stream) => { trailers['ABC'] = 'some value to send'; } }); + + stream.on('close', () => fs.closeSync(fd)); }); -server.on('close', () => fs.closeSync(fd)); ``` *Note*: The HTTP/1 specification forbids trailers from containing HTTP/2 -"pseudo-header" fields (e.g. `':status'`, `':path'`, etc). An `'error'` event +pseudo-header fields (e.g. `':status'`, `':path'`, etc). An `'error'` event will be emitted if the `getTrailers` callback attempts to set such header fields. @@ -1171,7 +1345,7 @@ added: v8.4.0 --> * `path` {string|Buffer|URL} -* `headers` {[Headers Object][]} +* `headers` {HTTP/2 Headers Object} * `options` {Object} * `statCheck` {Function} * `onError` {Function} Callback function invoked in the case of an @@ -1193,7 +1367,7 @@ of the given file: If an error occurs while attempting to read the file data, the `Http2Stream` will be closed using an `RST_STREAM` frame using the standard `INTERNAL_ERROR` -code. If the `onError` callback is defined it will be called, otherwise +code. If the `onError` callback is defined, then it will be called. Otherwise the stream will be destroyed. Example using a file path: @@ -1253,7 +1427,7 @@ default behavior is to destroy the stream. When set, the `options.getTrailers()` function is called immediately after queuing the last chunk of payload data to be sent. The callback is passed a -single object (with a `null` prototype) that the listener may used to specify +single object (with a `null` prototype) that the listener may use to specify the trailing header fields to send to the peer. ```js @@ -1270,7 +1444,7 @@ server.on('stream', (stream) => { ``` *Note*: The HTTP/1 specification forbids trailers from containing HTTP/2 -"pseudo-header" fields (e.g. `':status'`, `':path'`, etc). An `'error'` event +pseudo-header fields (e.g. `':status'`, `':path'`, etc). An `'error'` event will be emitted if the `getTrailers` callback attempts to set such header fields. @@ -1281,18 +1455,50 @@ added: v8.4.0 * Extends: {net.Server} -In `Http2Server`, there is no `'clientError'` event as there is in -HTTP1. However, there are `'socketError'`, `'sessionError'`, and -`'streamError'`, for error happened on the socket, session, or stream -respectively. +In `Http2Server`, there are no `'clientError'` events as there are in +HTTP1. However, there are `'sessionError'`, and `'streamError'` events for +errors emitted on the socket, or from `Http2Session` or `Http2Stream` instances. + +#### Event: 'checkContinue' + + +* `request` {http2.Http2ServerRequest} +* `response` {http2.Http2ServerResponse} + +If a [`'request'`][] listener is registered or [`http2.createServer()`][] is +supplied a callback function, the `'checkContinue'` event is emitted each time +a request with an HTTP `Expect: 100-continue` is received. If this event is +not listened for, the server will automatically respond with a status +`100 Continue` as appropriate. + +Handling this event involves calling [`response.writeContinue()`][] if the client +should continue to send the request body, or generating an appropriate HTTP +response (e.g. 400 Bad Request) if the client should not continue to send the +request body. + +Note that when this event is emitted and handled, the [`'request'`][] event will +not be emitted. -#### Event: 'socketError' +#### Event: 'request' -The `'socketError'` event is emitted when a `'socketError'` event is emitted by -an `Http2Session` associated with the server. +* `request` {http2.Http2ServerRequest} +* `response` {http2.Http2ServerResponse} + +Emitted each time there is a request. Note that there may be multiple requests +per session. See the [Compatibility API][]. + +#### Event: 'session' + + +The `'session'` event is emitted when a new `Http2Session` is created by the +`Http2Server`. #### Event: 'sessionError' The `'sessionError'` event is emitted when an `'error'` event is emitted by -an `Http2Session` object. If no listener is registered for this event, an -`'error'` event is emitted. +an `Http2Session` object associated with the `Http2Server`. #### Event: 'streamError' -* `socket` {http2.ServerHttp2Stream} - -If an `ServerHttp2Stream` emits an `'error'` event, it will be forwarded here. +If a `ServerHttp2Stream` emits an `'error'` event, it will be forwarded here. The stream will already be destroyed when this event is triggered. #### Event: 'stream' @@ -1344,24 +1547,32 @@ server.on('stream', (stream, headers, flags) => { }); ``` -#### Event: 'request' +#### Event: 'timeout' -* `request` {http2.Http2ServerRequest} -* `response` {http2.Http2ServerResponse} +The `'timeout'` event is emitted when there is no activity on the Server for +a given number of milliseconds set using `http2server.setTimeout()`. -Emitted each time there is a request. Note that there may be multiple requests -per session. See the [Compatibility API][]. +#### server.close([callback]) + +- `callback` {Function} -#### Event: 'timeout' +Stops the server from accepting new connections. See [`net.Server.close()`][]. + +Note that this is not analogous to restricting new requests since HTTP/2 +connections are persistent. To achieve a similar graceful shutdown behavior, +consider also using [`http2session.close()`] on active sessions. + +### Class: Http2SecureServer -The `'timeout'` event is emitted when there is no activity on the Server for -a given number of milliseconds set using `http2server.setTimeout()`. +* Extends: {tls.Server} #### Event: 'checkContinue' -* Extends: {tls.Server} - -#### Event: 'sessionError' - +* `request` {http2.Http2ServerRequest} +* `response` {http2.Http2ServerResponse} -The `'sessionError'` event is emitted when an `'error'` event is emitted by -an `Http2Session` object. If no listener is registered for this event, an -`'error'` event is emitted on the `Http2Session` instance instead. +Emitted each time there is a request. Note that there may be multiple requests +per session. See the [Compatibility API][]. -#### Event: 'socketError' +#### Event: 'session' -The `'socketError'` event is emitted when a `'socketError'` event is emitted by -an `Http2Session` associated with the server. +The `'session'` event is emitted when a new `Http2Session` is created by the +`Http2SecureServer`. -#### Event: 'unknownProtocol' +#### Event: 'sessionError' -The `'unknownProtocol'` event is emitted when a connecting client fails to -negotiate an allowed protocol (i.e. HTTP/2 or HTTP/1.1). The event handler -receives the socket for handling. If no listener is registered for this event, -the connection is terminated. See the [Compatibility API][]. +The `'sessionError'` event is emitted when an `'error'` event is emitted by +an `Http2Session` object associated with the `Http2SecureServer`. #### Event: 'stream' -* `request` {http2.Http2ServerRequest} -* `response` {http2.Http2ServerResponse} - -Emitted each time there is a request. Note that there may be multiple requests -per session. See the [Compatibility API][]. +The `'timeout'` event is emitted when there is no activity on the Server for +a given number of milliseconds set using `http2secureServer.setTimeout()`. -#### Event: 'timeout' +#### Event: 'unknownProtocol' -#### Event: 'checkContinue' +The `'unknownProtocol'` event is emitted when a connecting client fails to +negotiate an allowed protocol (i.e. HTTP/2 or HTTP/1.1). The event handler +receives the socket for handling. If no listener is registered for this event, +the connection is terminated. See the [Compatibility API][]. + +#### server.close([callback]) +- `callback` {Function} -* `request` {http2.Http2ServerRequest} -* `response` {http2.Http2ServerResponse} - -If a [`'request'`][] listener is registered or [`http2.createSecureServer()`][] -is supplied a callback function, the `'checkContinue'` event is emitted each -time a request with an HTTP `Expect: 100-continue` is received. If this event -is not listened for, the server will automatically respond with a status -`100 Continue` as appropriate. +Stops the server from accepting new connections. See [`tls.Server.close()`][]. -Handling this event involves calling [`response.writeContinue()`][] if the client -should continue to send the request body, or generating an appropriate HTTP -response (e.g. 400 Bad Request) if the client should not continue to send the -request body. - -Note that when this event is emitted and handled, the [`'request'`][] event will -not be emitted. +Note that this is not analogous to restricting new requests since HTTP/2 +connections are persistent. To achieve a similar graceful shutdown behavior, +consider also using [`http2session.close()`] on active sessions. ### http2.createServer(options[, onRequestHandler]) -* Returns: {[Settings Object][]} +* Returns: {HTTP/2 Settings Object} Returns an object containing the default settings for an `Http2Session` instance. This method returns a new object instance every time it is called @@ -1748,7 +2000,7 @@ so instances returned may be safely modified for use. added: v8.4.0 --> -* `settings` {[Settings Object][]} +* `settings` {HTTP/2 Settings Object} * Returns: {Buffer} Returns a `Buffer` instance containing serialized representation of the given @@ -1770,10 +2022,10 @@ added: v8.4.0 --> * `buf` {Buffer|Uint8Array} The packed settings. -* Returns: {[Settings Object][]} +* Returns: {HTTP/2 Settings Object} -Returns a [Settings Object][] containing the deserialized settings from the -given `Buffer` as generated by `http2.getPackedSettings()`. +Returns a [HTTP/2 Settings Object][] containing the deserialized settings from +the given `Buffer` as generated by `http2.getPackedSettings()`. ### Headers Object @@ -1838,8 +2090,8 @@ properties. * `maxConcurrentStreams` {number} Specifies the maximum number of concurrent streams permitted on an `Http2Session`. There is no default value which implies, at least theoretically, 231-1 streams may be open - concurrently at any given time in an `Http2Session`. The minimum value is - 0. The maximum allowed value is 231-1. + concurrently at any given time in an `Http2Session`. The minimum value + is 0. The maximum allowed value is 231-1. * `maxHeaderListSize` {number} Specifies the maximum size (uncompressed octets) of header list that will be accepted. The minimum allowed value is 0. The maximum allowed value is 232-1. **Default:** 65535. @@ -1957,6 +2209,7 @@ An HTTP/2 CONNECT proxy: ```js const http2 = require('http2'); +const { NGHTTP2_REFUSED_STREAM } = http2.constants; const net = require('net'); const { URL } = require('url'); @@ -1964,7 +2217,7 @@ const proxy = http2.createServer(); proxy.on('stream', (stream, headers) => { if (headers[':method'] !== 'CONNECT') { // Only accept CONNECT requests - stream.rstWithRefused(); + stream.close(NGHTTP2_REFUSED_STREAM); return; } const auth = new URL(`tcp://${headers[':authority']}`); @@ -1976,7 +2229,7 @@ proxy.on('stream', (stream, headers) => { stream.pipe(socket); }); socket.on('error', (error) => { - stream.rstStream(http2.constants.NGHTTP2_CONNECT_ERROR); + stream.close(http2.constants.NGHTTP2_CONNECT_ERROR); }); }); @@ -2005,7 +2258,7 @@ req.setEncoding('utf8'); req.on('data', (chunk) => data += chunk); req.on('end', () => { console.log(`The server says: ${data}`); - client.destroy(); + client.close(); }); req.end('Jane'); ``` @@ -2014,8 +2267,8 @@ req.end('Jane'); The Compatibility API has the goal of providing a similar developer experience of HTTP/1 when using HTTP/2, making it possible to develop applications -that supports both [HTTP/1][] and HTTP/2. This API targets only the -**public API** of the [HTTP/1][], however many modules uses internal +that support both [HTTP/1][] and HTTP/2. This API targets only the +**public API** of the [HTTP/1][]. However many modules use internal methods or state, and those _are not supported_ as it is a completely different implementation. @@ -2036,14 +2289,14 @@ In order to create a mixed [HTTPS][] and HTTP/2 server, refer to the [ALPN negotiation][] section. Upgrading from non-tls HTTP/1 servers is not supported. -The HTTP2 compatibility API is composed of [`Http2ServerRequest`]() and +The HTTP/2 compatibility API is composed of [`Http2ServerRequest`]() and [`Http2ServerResponse`](). They aim at API compatibility with HTTP/1, but they do not hide the differences between the protocols. As an example, the status message for HTTP codes is ignored. ### ALPN negotiation -ALPN negotiation allows to support both [HTTPS][] and HTTP/2 over +ALPN negotiation allows supporting both [HTTPS][] and HTTP/2 over the same socket. The `req` and `res` objects can be either HTTP/1 or HTTP/2, and an application **must** restrict itself to the public API of [HTTP/1][], and detect if it is possible to use the more advanced @@ -2085,7 +2338,7 @@ added: v8.4.0 A `Http2ServerRequest` object is created by [`http2.Server`][] or [`http2.SecureServer`][] and passed as the first argument to the -[`'request'`][] event. It may be used to access a request status, headers and +[`'request'`][] event. It may be used to access a request status, headers, and data. It implements the [Readable Stream][] interface, as well as the @@ -2144,9 +2397,9 @@ Example: console.log(request.headers); ``` -See [Headers Object][]. +See [HTTP/2 Headers Object][]. -*Note*: In HTTP/2, the request path, host name, protocol, and method are +*Note*: In HTTP/2, the request path, hostname, protocol, and method are represented as special headers prefixed with the `:` character (e.g. `':path'`). These special headers will be included in the `request.headers` object. Care must be taken not to inadvertently modify these special headers or errors may @@ -2179,7 +2432,7 @@ added: v8.4.0 * {string} -The request method as a string. Read only. Example: +The request method as a string. Read-only. Example: `'GET'`, `'DELETE'`. #### request.rawHeaders @@ -2191,8 +2444,8 @@ added: v8.4.0 The raw request/response headers list exactly as they were received. -Note that the keys and values are in the same list. It is *not* a -list of tuples. So, the even-numbered offsets are key values, and the +Note that the keys and values are in the same list. It is *not* a +list of tuples. So, the even-numbered offsets are key values, and the odd-numbered offsets are the associated values. Header names are not lowercased, and duplicates are not merged. @@ -2219,7 +2472,7 @@ added: v8.4.0 * {Array} The raw request/response trailer keys and values exactly as they were -received. Only populated at the `'end'` event. +received. Only populated at the `'end'` event. #### request.setTimeout(msecs, callback) -This object is created internally by an HTTP server--not by the user. It is +This object is created internally by an HTTP server — not by the user. It is passed as the second parameter to the [`'request'`][] event. The response implements, but does not inherit from, the [Writable Stream][] @@ -2560,8 +2813,8 @@ added: v8.4.0 * `name` {string} * `value` {string|string[]} -Sets a single header value for implicit headers. If this header already exists -in the to-be-sent headers, its value will be replaced. Use an array of strings +Sets a single header value for implicit headers. If this header already exists +in the to-be-sent headers, its value will be replaced. Use an array of strings here to send multiple headers with the same name. Example: @@ -2601,7 +2854,7 @@ added: v8.4.0 * `msecs` {number} * `callback` {Function} -Sets the [`Http2Stream`]()'s timeout value to `msecs`. If a callback is +Sets the [`Http2Stream`]()'s timeout value to `msecs`. If a callback is provided, then it is added as a listener on the `'timeout'` event on the response object. @@ -2760,7 +3013,7 @@ response.writeHead(200, { ``` Note that Content-Length is given in bytes not characters. The -`Buffer.byteLength()` API may be used to determine the number of bytes in a +`Buffer.byteLength()` API may be used to determine the number of bytes in a given encoding. On outbound messages, Node.js does not check if Content-Length and the length of the body being transmitted are equal or not. However, when receiving messages, Node.js will automatically reject messages when the @@ -2800,15 +3053,80 @@ given newly created [`Http2Stream`] on `Http2ServerRespose`. The callback will be called with an error with code `ERR_HTTP2_STREAM_CLOSED` if the stream is closed. +## Collecting HTTP/2 Performance Metrics + +The [Performance Observer][] API can be used to collect basic performance +metrics for each `Http2Session` and `Http2Stream` instance. + +```js +const { PerformanceObserver } = require('perf_hooks'); + +const obs = new PerformanceObserver((items) => { + const entry = items.getEntries()[0]; + console.log(entry.entryType); // prints 'http2' + if (entry.name === 'Http2Session') { + // entry contains statistics about the Http2Session + } else if (entry.name === 'Http2Stream') { + // entry contains statistics about the Http2Stream + } +}); +obs.observe({ entryTypes: ['http2'] }); +``` + +The `entryType` property of the `PerformanceEntry` will be equal to `'http2'`. + +The `name` property of the `PerformanceEntry` will be equal to either +`'Http2Stream'` or `'Http2Session'`. + +If `name` is equal to `Http2Stream`, the `PerformanceEntry` will contain the +following additional properties: + +* `bytesRead` {number} The number of DATA frame bytes received for this + `Http2Stream`. +* `bytesWritten` {number} The number of DATA frame bytes sent for this + `Http2Stream`. +* `id` {number} The identifier of the associated `Http2Stream` +* `timeToFirstByte` {number} The number of milliseconds elapsed between the + `PerformanceEntry` `startTime` and the reception of the first `DATA` frame. +* `timeToFirstByteSent` {number} The number of milliseconds elapsed between + the `PerformanceEntry` `startTime` and sending of the first `DATA` frame. +* `timeToFirstHeader` {number} The number of milliseconds elapsed between the + `PerformanceEntry` `startTime` and the reception of the first header. + +If `name` is equal to `Http2Session`, the `PerformanceEntry` will contain the +following additional properties: + +* `bytesRead` {number} The number of bytes received for this `Http2Session`. +* `bytesWritten` {number} The number of bytes sent for this `Http2Session`. +* `framesReceived` {number} The number of HTTP/2 frames received by the + `Http2Session`. +* `framesSent` {number} The number of HTTP/2 frames sent by the `Http2Session`. +* `maxConcurrentStreams` {number} The maximum number of streams concurrently + open during the lifetime of the `Http2Session`. +* `pingRTT` {number} The number of milliseconds elapsed since the transmission + of a `PING` frame and the reception of its acknowledgment. Only present if + a `PING` frame has been sent on the `Http2Session`. +* `streamAverageDuration` {number} The average duration (in milliseconds) for + all `Http2Stream` instances. +* `streamCount` {number} The number of `Http2Stream` instances processed by + the `Http2Session`. +* `type` {string} Either `'server'` or `'client'` to identify the type of + `Http2Session`. + + [ALPN negotiation]: #http2_alpn_negotiation +[ALPN Protocol ID]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids [Compatibility API]: #http2_compatibility_api [HTTP/1]: http.html [HTTP/2]: https://tools.ietf.org/html/rfc7540 +[HTTP/2 Unencrypted]: https://http2.github.io/faq/#does-http2-require-encryption +[HTTP/2 Headers Object]: #http2_headers_object +[HTTP/2 Settings Object]: #http2_settings_object [HTTPS]: https.html -[Headers Object]: #http2_headers_object [Http2Session and Sockets]: #http2_http2session_and_sockets +[Performance Observer]: perf_hooks.html [Readable Stream]: stream.html#stream_class_stream_readable -[Settings Object]: #http2_settings_object +[RFC 7838]: https://tools.ietf.org/html/rfc7838 [Using options.selectPadding]: #http2_using_options_selectpadding [Writable Stream]: stream.html#stream_writable_streams [`'checkContinue'`]: #http2_event_checkcontinue @@ -2825,8 +3143,12 @@ if the stream is closed. [`http2.createSecureServer()`]: #http2_http2_createsecureserver_options_onrequesthandler [`http2.Server`]: #http2_class_http2server [`http2.createServer()`]: #http2_http2_createserver_options_onrequesthandler +[`http2session.close()`]: #http2_http2session_close_callback [`http2stream.pushStream()`]: #http2_http2stream_pushstream_headers_options_callback +[`net.Server.close()`]: net.html#net_server_close_callback [`net.Socket`]: net.html#net_class_net_socket +[`net.Socket.prototype.ref`]: net.html#net_socket_ref +[`net.Socket.prototype.unref`]: net.html#net_socket_unref [`net.connect()`]: net.html#net_net_connect [`request.socket.getPeerCertificate()`]: tls.html#tls_tlssocket_getpeercertificate_detailed [`response.end()`]: #http2_response_end_data_encoding_callback @@ -2836,6 +3158,7 @@ if the stream is closed. [`response.write(data, encoding)`]: http.html#http_response_write_chunk_encoding_callback [`response.writeContinue()`]: #http2_response_writecontinue [`response.writeHead()`]: #http2_response_writehead_statuscode_statusmessage_headers +[`tls.Server.close()`]: tls.html#tls_server_close_callback [`tls.TLSSocket`]: tls.html#tls_class_tls_tlssocket [`tls.connect()`]: tls.html#tls_tls_connect_options_callback [`tls.createServer()`]: tls.html#tls_tls_createserver_options_secureconnectionlistener diff --git a/doc/api/https.md b/doc/api/https.md index 4740986170b39a..daf10ac4a2bb94 100644 --- a/doc/api/https.md +++ b/doc/api/https.md @@ -12,7 +12,7 @@ separate module. added: v0.4.5 --> -An Agent object for HTTPS similar to [`http.Agent`][]. See [`https.request()`][] +An Agent object for HTTPS similar to [`http.Agent`][]. See [`https.request()`][] for more information. ## Class: https.Server diff --git a/doc/api/intl.md b/doc/api/intl.md index 224c00a62fe888..83a9947dc21238 100644 --- a/doc/api/intl.md +++ b/doc/api/intl.md @@ -190,17 +190,17 @@ to be helpful: ["ICU Data"]: http://userguide.icu-project.org/icudata [`--icu-data-dir`]: cli.html#cli_icu_data_dir_file -[`Date.prototype.toLocaleString()`]: https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/Date/toLocaleString -[`Intl`]: https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/Intl +[`Date.prototype.toLocaleString()`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toLocaleString +[`Intl`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Intl [`Intl.DateTimeFormat`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/DateTimeFormat [`NODE_ICU_DATA`]: cli.html#cli_node_icu_data_file -[`Number.prototype.toLocaleString()`]: https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/Number/toLocaleString +[`Number.prototype.toLocaleString()`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/toLocaleString [`require('buffer').transcode()`]: buffer.html#buffer_buffer_transcode_source_fromenc_toenc [`require('util').TextDecoder`]: util.html#util_class_util_textdecoder -[`String.prototype.localeCompare()`]: https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/String/localeCompare -[`String.prototype.normalize()`]: https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/String/normalize -[`String.prototype.toLowerCase()`]: https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/String/toLowerCase -[`String.prototype.toUpperCase()`]: https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/String/toUpperCase +[`String.prototype.localeCompare()`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/localeCompare +[`String.prototype.normalize()`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/normalize +[`String.prototype.toLowerCase()`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/toLowerCase +[`String.prototype.toUpperCase()`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/toUpperCase [BUILDING.md]: https://github.com/nodejs/node/blob/master/BUILDING.md [BUILDING.md#full-icu]: https://github.com/nodejs/node/blob/master/BUILDING.md#build-with-full-icu-support-all-locales-supported-by-icu [ECMA-262]: https://tc39.github.io/ecma262/ diff --git a/doc/api/modules.md b/doc/api/modules.md index 07c469c11408ad..6bac6a8b23144b 100644 --- a/doc/api/modules.md +++ b/doc/api/modules.md @@ -123,12 +123,12 @@ the version that is symlinked into Furthermore, to make the module lookup process even more optimal, rather than putting packages directly in `/usr/lib/node`, we could put them in -`/usr/lib/node_modules//`. Then Node.js will not bother +`/usr/lib/node_modules//`. Then Node.js will not bother looking for missing dependencies in `/usr/node_modules` or `/node_modules`. In order to make modules available to the Node.js REPL, it might be useful to also add the `/usr/lib/node_modules` folder to the `$NODE_PATH` environment -variable. Since the module lookups using `node_modules` folders are all +variable. Since the module lookups using `node_modules` folders are all relative, and based on the real path of the files making the calls to `require()`, the packages themselves can be anywhere. @@ -196,12 +196,12 @@ NODE_MODULES_PATHS(START) -Modules are cached after the first time they are loaded. This means +Modules are cached after the first time they are loaded. This means (among other things) that every call to `require('foo')` will get exactly the same object returned, if it would resolve to the same file. Multiple calls to `require('foo')` may not cause the module code to be -executed multiple times. This is an important feature. With it, +executed multiple times. This is an important feature. With it, "partially done" objects can be returned, thus allowing transitive dependencies to be loaded even when they would cause cycles. @@ -212,7 +212,7 @@ that function. -Modules are cached based on their resolved filename. Since modules may +Modules are cached based on their resolved filename. Since modules may resolve to a different filename based on the location of the calling module (loading from `node_modules` folders), it is not a *guarantee* that `require('foo')` will always return the exact same object, if it @@ -228,14 +228,14 @@ irrespective of whether or not `./foo` and `./FOO` are the same file. -Node.js has several modules compiled into the binary. These modules are +Node.js has several modules compiled into the binary. These modules are described in greater detail elsewhere in this documentation. The core modules are defined within Node.js's source and are located in the `lib/` folder. Core modules are always preferentially loaded if their identifier is -passed to `require()`. For instance, `require('http')` will always +passed to `require()`. For instance, `require('http')` will always return the built in HTTP module, even if there is a file by that name. ## Cycles @@ -275,13 +275,13 @@ console.log('b done'); console.log('main starting'); const a = require('./a.js'); const b = require('./b.js'); -console.log('in main, a.done=%j, b.done=%j', a.done, b.done); +console.log('in main, a.done = %j, b.done = %j', a.done, b.done); ``` -When `main.js` loads `a.js`, then `a.js` in turn loads `b.js`. At that -point, `b.js` tries to load `a.js`. In order to prevent an infinite +When `main.js` loads `a.js`, then `a.js` in turn loads `b.js`. At that +point, `b.js` tries to load `a.js`. In order to prevent an infinite loop, an **unfinished copy** of the `a.js` exports object is returned to the -`b.js` module. `b.js` then finishes loading, and its `exports` object is +`b.js` module. `b.js` then finishes loading, and its `exports` object is provided to the `a.js` module. By the time `main.js` has loaded both modules, they're both finished. @@ -296,7 +296,7 @@ in b, a.done = false b done in a, b.done = true a done -in main, a.done=true, b.done=true +in main, a.done = true, b.done = true ``` Careful planning is required to allow cyclic module dependencies to work @@ -314,7 +314,7 @@ required filename with the added extensions: `.js`, `.json`, and finally parsed as JSON text files. `.node` files are interpreted as compiled addon modules loaded with `dlopen`. -A required module prefixed with `'/'` is an absolute path to the file. For +A required module prefixed with `'/'` is an absolute path to the file. For example, `require('/home/marco/foo.js')` will load the file at `/home/marco/foo.js`. @@ -338,7 +338,7 @@ There are three ways in which a folder may be passed to `require()` as an argument. The first is to create a `package.json` file in the root of the folder, -which specifies a `main` module. An example package.json file might +which specifies a `main` module. An example package.json file might look like this: ```json @@ -352,7 +352,7 @@ If this was in a folder at `./some-library`, then This is the extent of Node.js's awareness of package.json files. -*Note*: If the file specified by the `"main"` entry of `package.json` is +*Note*: If the file specified by the `'main'` entry of `package.json` is missing and can not be resolved, Node.js will report the entire module as missing with the default error: @@ -362,7 +362,7 @@ Error: Cannot find module 'some-library' If there is no package.json file present in the directory, then Node.js will attempt to load an `index.js` or `index.node` file out of that -directory. For example, if there was no package.json file in the above +directory. For example, if there was no package.json file in the above example, then `require('./some-library')` would attempt to load: * `./some-library/index.js` @@ -415,7 +415,7 @@ varying paths before the current [module resolution][] algorithm was frozen. `NODE_PATH` is still supported, but is less necessary now that the Node.js ecosystem has settled on a convention for locating dependent modules. Sometimes deployments that rely on `NODE_PATH` show surprising behavior -when people are unaware that `NODE_PATH` must be set. Sometimes a +when people are unaware that `NODE_PATH` must be set. Sometimes a module's dependencies change, causing a different version (or even a different module) to be loaded as the `NODE_PATH` is searched. @@ -583,14 +583,14 @@ Process files with the extension `.sjs` as `.js`: require.extensions['.sjs'] = require.extensions['.js']; ``` -**Deprecated** In the past, this list has been used to load +**Deprecated** In the past, this list has been used to load non-JavaScript modules into Node.js by compiling them on-demand. However, in practice, there are much better ways to do this, such as loading modules via some other Node.js program, or compiling them to JavaScript ahead of time. Since the module system is locked, this feature will probably never go -away. However, it may have subtle bugs and complexities that are best +away. However, it may have subtle bugs and complexities that are best left untouched. Note that the number of file system operations that the module system @@ -643,7 +643,7 @@ added: v0.1.16 * {Object} In each module, the `module` free variable is a reference to the object -representing the current module. For convenience, `module.exports` is +representing the current module. For convenience, `module.exports` is also accessible via the `exports` module-global. `module` is not actually a global but rather local to each module. @@ -694,7 +694,7 @@ a.on('ready', () => { Note that assignment to `module.exports` must be done immediately. It cannot be -done in any callbacks. This does not work: +done in any callbacks. This does not work: x.js: @@ -774,7 +774,7 @@ added: v0.1.16 * {string} -The identifier for the module. Typically this is the fully resolved +The identifier for the module. Typically this is the fully resolved filename. ### module.loaded @@ -817,7 +817,7 @@ The `module.require` method provides a way to load a module as if `require()` was called from the original module. *Note*: In order to do this, it is necessary to get a reference to the -`module` object. Since `require()` returns the `module.exports`, and the +`module` object. Since `require()` returns the `module.exports`, and the `module` is typically *only* available within a specific module's code, it must be explicitly exported in order to be used. @@ -830,7 +830,7 @@ added: v0.3.7 * {Object} Provides general utility methods when interacting with instances of -`Module` -- the `module` variable often seen in file modules. Accessed +`Module` — the `module` variable often seen in file modules. Accessed via `require('module')`. ### module.builtinModules @@ -840,7 +840,7 @@ added: v8.10.0 * {string[]} -A list of the names of all modules provided by Node.js. Can be used to verify +A list of the names of all modules provided by Node.js. Can be used to verify if a module is maintained by a third-party module or not. [`__dirname`]: #modules_dirname diff --git a/doc/api/n-api.md b/doc/api/n-api.md index 689536c227aafd..e3e3242ef321bb 100644 --- a/doc/api/n-api.md +++ b/doc/api/n-api.md @@ -14,7 +14,7 @@ compiled for one version to run on later versions of Node.js without recompilation. Addons are built/packaged with the same approach/tools -outlined in the section titled [C++ Addons](addons.html). +outlined in the section titled [C++ Addons](addons.html). The only difference is the set of APIs that are used by the native code. Instead of using the V8 or [Native Abstractions for Node.js][] APIs, the functions available in the N-API are used. @@ -71,7 +71,7 @@ N-API exposes the following fundamental datatypes as abstractions that are consumed by the various APIs. These APIs should be treated as opaque, introspectable only with other N-API calls. -### *napi_status* +### napi_status Integral status code indicating the success or failure of a N-API call. Currently, the following status codes are supported. ```C @@ -94,7 +94,7 @@ typedef enum { If additional information is required upon an API returning a failed status, it can be obtained by calling `napi_get_last_error_info`. -### *napi_extended_error_info* +### napi_extended_error_info ```C typedef struct { const char* error_message; @@ -114,7 +114,7 @@ typedef struct { See the [Error Handling][] section for additional information. -### *napi_env* +### napi_env `napi_env` is used to represent a context that the underlying N-API implementation can use to persist VM-specific state. This structure is passed to native functions when they're invoked, and it must be passed back when @@ -123,11 +123,11 @@ the initial native function was called must be passed to any subsequent nested N-API calls. Caching the `napi_env` for the purpose of general reuse is not allowed. -### *napi_value* +### napi_value This is an opaque pointer that is used to represent a JavaScript value. ### N-API Memory Management types -#### *napi_handle_scope* +#### napi_handle_scope This is an abstraction used to control and modify the lifetime of objects created within a particular scope. In general, N-API values are created within the context of a handle scope. When a native method is called from @@ -145,11 +145,11 @@ referenced from the current stack frame. For more details, review the [Object Lifetime Management][]. -#### *napi_escapable_handle_scope* +#### napi_escapable_handle_scope Escapable handle scopes are a special type of handle scope to return values created within a particular handle scope to a parent scope. -#### *napi_ref* +#### napi_ref This is the abstraction to use to reference a `napi_value`. This allows for users to manage the lifetimes of JavaScript values, including defining their minimum lifetimes explicitly. @@ -157,12 +157,12 @@ minimum lifetimes explicitly. For more details, review the [Object Lifetime Management][]. ### N-API Callback types -#### *napi_callback_info* +#### napi_callback_info Opaque datatype that is passed to a callback function. It can be used for getting additional information about the context in which the callback was invoked. -#### *napi_callback* +#### napi_callback Function pointer type for user-provided native functions which are to be exposed to JavaScript via N-API. Callback functions should satisfy the following signature: @@ -170,7 +170,7 @@ following signature: typedef napi_value (*napi_callback)(napi_env, napi_callback_info); ``` -#### *napi_finalize* +#### napi_finalize Function pointer type for add-on provided functions that allow the user to be notified when externally-owned data is ready to be cleaned up because the object with which it was associated with, has been garbage-collected. The user @@ -279,6 +279,8 @@ valid up until an n-api function is called on the same `env`. information as it is not subject to SemVer and may change at any time. It is intended only for logging purposes. +This API can be called even if there is a pending JavaScript exception. + ### Exceptions Any N-API function call may result in a pending JavaScript exception. This is @@ -308,7 +310,7 @@ where the native code can catch the exception, take the appropriate action, and then continue. This is only recommended in specific cases where it is known that the exception can be safely handled. In these cases [`napi_get_and_clear_last_exception`][] can be used to get and -clear the exception. On success, result will contain the handle to +clear the exception. On success, result will contain the handle to the last JavaScript Object thrown. If it is determined, after retrieving the exception, the exception cannot be handled after all it can be re-thrown it with [`napi_throw`][] where error is the @@ -316,7 +318,7 @@ JavaScript Error object to be thrown. The following utility functions are also available in case native code needs to throw an exception or determine if a `napi_value` is an instance -of a JavaScript `Error` object: [`napi_throw_error`][], +of a JavaScript `Error` object: [`napi_throw_error`][], [`napi_throw_type_error`][], [`napi_throw_range_error`][] and [`napi_is_error`][]. @@ -327,7 +329,7 @@ where result is the napi_value that refers to the newly created JavaScript Error object. The Node.js project is adding error codes to all of the errors -generated internally. The goal is for applications to use these +generated internally. The goal is for applications to use these error codes for all error checking. The associated error messages will remain, but will only be meant to be used for logging and display with the expectation that the message can change without @@ -335,7 +337,7 @@ SemVer applying. In order to support this model with N-API, both in internal functionality and for module specific functionality (as its good practice), the `throw_` and `create_` functions take an optional code parameter which is the string for the code -to be added to the error object. If the optional parameter is NULL +to be added to the error object. If the optional parameter is NULL then no code will be associated with the error. If a code is provided, the name associated with the error is also updated to be: @@ -344,7 +346,7 @@ originalName [code] ``` where originalName is the original name associated with the error -and code is the code that was provided. For example if the code +and code is the code that was provided. For example if the code is 'ERR_ERROR_1' and a TypeError is being created the name will be: ```text @@ -504,7 +506,6 @@ Returns `napi_ok` if the API succeeded. This API returns a JavaScript RangeError with the text provided. - #### napi_get_and_clear_last_exception +```C +napi_status napi_fatal_exception(napi_env env, napi_value err); +``` + +- `[in] env`: The environment that the API is invoked under. +- `[in] err`: The error you want to pass to `uncaughtException`. + +Trigger an `uncaughtException` in JavaScript. Useful if an async +callback throws an exception with no way to recover. + ### Fatal Errors In the event of an unrecoverable error in a native module, a fatal error can be @@ -554,14 +573,16 @@ NAPI_NO_RETURN void napi_fatal_error(const char* location, - `[in] location`: Optional location at which the error occurred. - `[in] location_len`: The length of the location in bytes, or -NAPI_AUTO_LENGTH if it is null-terminated. +`NAPI_AUTO_LENGTH` if it is null-terminated. - `[in] message`: The message associated with the error. - `[in] message_len`: The length of the message in bytes, or -NAPI_AUTO_LENGTH if it is +`NAPI_AUTO_LENGTH` if it is null-terminated. The function call does not return, the process will be terminated. +This API can be called even if there is a pending JavaScript exception. + ## Object Lifetime management As N-API calls are made, handles to objects in the heap for the underlying @@ -589,7 +610,7 @@ that has a loop which iterates through the elements in a large array: ```C for (int i = 0; i < 1000000; i++) { napi_value result; - napi_status status = napi_get_element(e object, i, &result); + napi_status status = napi_get_element(e, object, i, &result); if (status != napi_ok) { break; } @@ -608,7 +629,7 @@ are no longer required, the scope can be 'closed' and any handles associated with the scope are invalidated. The methods available to open/close scopes are [`napi_open_handle_scope`][] and [`napi_close_handle_scope`][]. -N-API only supports a single nested hiearchy of scopes. There is only one +N-API only supports a single nested hierarchy of scopes. There is only one active scope at any time, and all new handles will be associated with that scope while it is active. Scopes must be closed in the reverse order from which they are opened. In addition, all scopes created within a native method @@ -626,7 +647,7 @@ for (int i = 0; i < 1000000; i++) { break; } napi_value result; - status = napi_get_element(e object, i, &result); + status = napi_get_element(e, object, i, &result); if (status != napi_ok) { break; } @@ -682,6 +703,8 @@ Returns `napi_ok` if the API succeeded. This API closes the scope passed in. Scopes must be closed in the reverse order from which they were created. +This API can be called even if there is a pending JavaScript exception. + #### napi_open_escapable_handle_scope @@ -1038,7 +1066,7 @@ JavaScript arrays are described in [Section 22.1](https://tc39.github.io/ecma262/#sec-array-objects) of the ECMAScript Language Specification. -#### *napi_create_array_with_length* +#### napi_create_array_with_length @@ -1067,7 +1095,7 @@ JavaScript arrays are described in [Section 22.1](https://tc39.github.io/ecma262/#sec-array-objects) of the ECMAScript Language Specification. -#### *napi_create_arraybuffer* +#### napi_create_arraybuffer @@ -1099,7 +1127,7 @@ JavaScript ArrayBuffer objects are described in [Section 24.1](https://tc39.github.io/ecma262/#sec-arraybuffer-objects) of the ECMAScript Language Specification. -#### *napi_create_buffer* +#### napi_create_buffer @@ -1120,7 +1148,7 @@ Returns `napi_ok` if the API succeeded. This API allocates a `node::Buffer` object. While this is still a fully-supported data structure, in most cases using a TypedArray will suffice. -#### *napi_create_buffer_copy* +#### napi_create_buffer_copy @@ -1145,7 +1173,7 @@ This API allocates a `node::Buffer` object and initializes it with data copied from the passed-in buffer. While this is still a fully-supported data structure, in most cases using a TypedArray will suffice. -#### *napi_create_external* +#### napi_create_external @@ -1212,7 +1240,7 @@ JavaScript ArrayBuffers are described in [Section 24.1](https://tc39.github.io/ecma262/#sec-arraybuffer-objects) of the ECMAScript Language Specification. -#### *napi_create_external_buffer* +#### napi_create_external_buffer @@ -1243,7 +1271,7 @@ structure, in most cases using a TypedArray will suffice. *Note*: For Node.js >=4 `Buffers` are Uint8Arrays. -#### *napi_create_function* +#### napi_create_function @@ -1260,7 +1288,7 @@ napi_status napi_create_function(napi_env env, - `[in] utf8name`: A string representing the name of the function encoded as UTF8. - `[in] length`: The length of the utf8name in bytes, or -NAPI_AUTO_LENGTH if it is null-terminated. +`NAPI_AUTO_LENGTH` if it is null-terminated. - `[in] cb`: A function pointer to the native function to be invoked when the created function is invoked from JavaScript. - `[in] data`: Optional arbitrary context data to be passed into the native @@ -1276,7 +1304,7 @@ JavaScript Functions are described in [Section 19.2](https://tc39.github.io/ecma262/#sec-function-objects) of the ECMAScript Language Specification. -#### *napi_create_object* +#### napi_create_object @@ -1296,7 +1324,7 @@ The JavaScript Object type is described in [Section 6.1.7](https://tc39.github.io/ecma262/#sec-object-type) of the ECMAScript Language Specification. -#### *napi_create_symbol* +#### napi_create_symbol @@ -1319,7 +1347,7 @@ The JavaScript Symbol type is described in [Section 19.4](https://tc39.github.io/ecma262/#sec-symbol-objects) of the ECMAScript Language Specification. -#### *napi_create_typedarray* +#### napi_create_typedarray @@ -1355,7 +1383,7 @@ JavaScript TypedArray Objects are described in of the ECMAScript Language Specification. -#### *napi_create_dataview* +#### napi_create_dataview @@ -1389,7 +1417,7 @@ JavaScript DataView Objects are described in [Section 24.3][] of the ECMAScript Language Specification. ### Functions to convert from C types to N-API -#### *napi_create_int32* +#### napi_create_int32 @@ -1410,7 +1438,7 @@ The JavaScript Number type is described in [Section 6.1.6](https://tc39.github.io/ecma262/#sec-ecmascript-language-types-number-type) of the ECMAScript Language Specification. -#### *napi_create_uint32* +#### napi_create_uint32 @@ -1431,7 +1459,7 @@ The JavaScript Number type is described in [Section 6.1.6](https://tc39.github.io/ecma262/#sec-ecmascript-language-types-number-type) of the ECMAScript Language Specification. -#### *napi_create_int64* +#### napi_create_int64 @@ -1458,7 +1486,7 @@ outside the range of [`Number.MAX_SAFE_INTEGER`](https://tc39.github.io/ecma262/#sec-number.max_safe_integer) (2^53 - 1) will lose precision. -#### *napi_create_double* +#### napi_create_double @@ -1479,7 +1507,7 @@ The JavaScript Number type is described in [Section 6.1.6](https://tc39.github.io/ecma262/#sec-ecmascript-language-types-number-type) of the ECMAScript Language Specification. -#### *napi_create_string_latin1* +#### napi_create_string_latin1 @@ -1493,7 +1521,7 @@ napi_status napi_create_string_latin1(napi_env env, - `[in] env`: The environment that the API is invoked under. - `[in] str`: Character buffer representing a ISO-8859-1-encoded string. - `[in] length`: The length of the string in bytes, or -NAPI_AUTO_LENGTH if it is null-terminated. +`NAPI_AUTO_LENGTH` if it is null-terminated. - `[out] result`: A `napi_value` representing a JavaScript String. Returns `napi_ok` if the API succeeded. @@ -1504,7 +1532,7 @@ The JavaScript String type is described in [Section 6.1.4](https://tc39.github.io/ecma262/#sec-ecmascript-language-types-string-type) of the ECMAScript Language Specification. -#### *napi_create_string_utf16* +#### napi_create_string_utf16 @@ -1518,7 +1546,7 @@ napi_status napi_create_string_utf16(napi_env env, - `[in] env`: The environment that the API is invoked under. - `[in] str`: Character buffer representing a UTF16-LE-encoded string. - `[in] length`: The length of the string in two-byte code units, or -NAPI_AUTO_LENGTH if it is null-terminated. +`NAPI_AUTO_LENGTH` if it is null-terminated. - `[out] result`: A `napi_value` representing a JavaScript String. Returns `napi_ok` if the API succeeded. @@ -1529,7 +1557,7 @@ The JavaScript String type is described in [Section 6.1.4](https://tc39.github.io/ecma262/#sec-ecmascript-language-types-string-type) of the ECMAScript Language Specification. -#### *napi_create_string_utf8* +#### napi_create_string_utf8 @@ -1542,7 +1570,7 @@ napi_status napi_create_string_utf8(napi_env env, - `[in] env`: The environment that the API is invoked under. - `[in] str`: Character buffer representing a UTF8-encoded string. -- `[in] length`: The length of the string in bytes, or NAPI_AUTO_LENGTH +- `[in] length`: The length of the string in bytes, or `NAPI_AUTO_LENGTH` if it is null-terminated. - `[out] result`: A `napi_value` representing a JavaScript String. @@ -1555,7 +1583,7 @@ The JavaScript String type is described in of the ECMAScript Language Specification. ### Functions to convert from N-API to C types -#### *napi_get_array_length* +#### napi_get_array_length @@ -1578,7 +1606,7 @@ Array length is described in [Section 22.1.4.1](https://tc39.github.io/ecma262/#sec-properties-of-array-instances-length) of the ECMAScript Language Specification. -#### *napi_get_arraybuffer_info* +#### napi_get_arraybuffer_info @@ -1606,7 +1634,7 @@ which can be used to guarantee control over the lifetime of the ArrayBuffer. It's also safe to use the returned data buffer within the same callback as long as there are no calls to other APIs that might trigger a GC. -#### *napi_get_buffer_info* +#### napi_get_buffer_info @@ -1630,7 +1658,7 @@ and it's length. *Warning*: Use caution while using this API since the underlying data buffer's lifetime is not guaranteed if it's managed by the VM. -#### *napi_get_prototype* +#### napi_get_prototype @@ -1648,7 +1676,7 @@ not the same as the function's `prototype` property). Returns `napi_ok` if the API succeeded. -#### *napi_get_typedarray_info* +#### napi_get_typedarray_info @@ -1678,9 +1706,7 @@ This API returns various properties of a typed array. *Warning*: Use caution while using this API since the underlying data buffer is managed by the VM - - -#### *napi_get_dataview_info* +#### napi_get_dataview_info @@ -1707,8 +1733,7 @@ Returns `napi_ok` if the API succeeded. This API returns various properties of a DataView. - -#### *napi_get_value_bool* +#### napi_get_value_bool @@ -1727,7 +1752,7 @@ passed in it returns `napi_boolean_expected`. This API returns the C boolean primitive equivalent of the given JavaScript Boolean. -#### *napi_get_value_double* +#### napi_get_value_double @@ -1748,8 +1773,7 @@ in it returns `napi_number_expected`. This API returns the C double primitive equivalent of the given JavaScript Number. - -#### *napi_get_value_external* +#### napi_get_value_external @@ -1769,7 +1793,7 @@ passed in it returns `napi_invalid_arg`. This API retrieves the external data pointer that was previously passed to `napi_create_external()`. -#### *napi_get_value_int32* +#### napi_get_value_int32 @@ -1784,15 +1808,19 @@ napi_status napi_get_value_int32(napi_env env, - `[out] result`: C int32 primitive equivalent of the given JavaScript Number. Returns `napi_ok` if the API succeeded. If a non-number `napi_value` -is passed in `napi_number_expected . +is passed in `napi_number_expected`. This API returns the C int32 primitive equivalent -of the given JavaScript Number. If the number exceeds the range of the -32 bit integer, then the result is truncated to the equivalent of the -bottom 32 bits. This can result in a large positive number becoming -a negative number if the value is > 2^31 -1. +of the given JavaScript Number. -#### *napi_get_value_int64* +If the number exceeds the range of the 32 bit integer, then the result is +truncated to the equivalent of the bottom 32 bits. This can result in a large +positive number becoming a negative number if the value is > 2^31 -1. + +Non-finite number values (NaN, positive infinity, or negative infinity) set the +result to zero. + +#### napi_get_value_int64 @@ -1809,10 +1837,19 @@ napi_status napi_get_value_int64(napi_env env, Returns `napi_ok` if the API succeeded. If a non-number `napi_value` is passed in it returns `napi_number_expected`. -This API returns the C int64 primitive equivalent of the given -JavaScript Number +This API returns the C int64 primitive equivalent of the given JavaScript +Number. -#### *napi_get_value_string_latin1* +Number values outside the range of +[`Number.MIN_SAFE_INTEGER`](https://tc39.github.io/ecma262/#sec-number.min_safe_integer) +-(2^53 - 1) - +[`Number.MAX_SAFE_INTEGER`](https://tc39.github.io/ecma262/#sec-number.max_safe_integer) +(2^53 - 1) will lose precision. + +Non-finite number values (NaN, positive infinity, or negative infinity) set the +result to zero. + +#### napi_get_value_string_latin1 @@ -1839,7 +1876,7 @@ is passed in it returns `napi_string_expected`. This API returns the ISO-8859-1-encoded string corresponding the value passed in. -#### *napi_get_value_string_utf8* +#### napi_get_value_string_utf8 @@ -1865,7 +1902,7 @@ is passed in it returns `napi_string_expected`. This API returns the UTF8-encoded string corresponding the value passed in. -#### *napi_get_value_string_utf16* +#### napi_get_value_string_utf16 @@ -1891,7 +1928,7 @@ is passed in it returns `napi_string_expected`. This API returns the UTF16-encoded string corresponding the value passed in. -#### *napi_get_value_uint32* +#### napi_get_value_uint32 @@ -1913,7 +1950,7 @@ This API returns the C primitive equivalent of the given `napi_value` as a `uint32_t`. ### Functions to get global instances -#### *napi_get_boolean* +#### napi_get_boolean @@ -1931,7 +1968,7 @@ Returns `napi_ok` if the API succeeded. This API is used to return the JavaScript singleton object that is used to represent the given boolean value -#### *napi_get_global* +#### napi_get_global @@ -1946,7 +1983,7 @@ Returns `napi_ok` if the API succeeded. This API returns the global Object. -#### *napi_get_null* +#### napi_get_null @@ -1961,7 +1998,7 @@ Returns `napi_ok` if the API succeeded. This API returns the null Object. -#### *napi_get_undefined* +#### napi_get_undefined @@ -1989,7 +2026,7 @@ These APIs support doing one of the following: 2. Check the type of a JavaScript value 3. Check for equality between two JavaScript values -### *napi_coerce_to_bool* +### napi_coerce_to_bool @@ -2010,7 +2047,7 @@ This API implements the abstract operation ToBoolean as defined in of the ECMAScript Language Specification. This API can be re-entrant if getters are defined on the passed-in Object. -### *napi_coerce_to_number* +### napi_coerce_to_number @@ -2031,7 +2068,7 @@ This API implements the abstract operation ToNumber as defined in of the ECMAScript Language Specification. This API can be re-entrant if getters are defined on the passed-in Object. -### *napi_coerce_to_object* +### napi_coerce_to_object @@ -2052,7 +2089,7 @@ This API implements the abstract operation ToObject as defined in of the ECMAScript Language Specification. This API can be re-entrant if getters are defined on the passed-in Object. -### *napi_coerce_to_string* +### napi_coerce_to_string @@ -2073,7 +2110,7 @@ This API implements the abstract operation ToString as defined in of the ECMAScript Language Specification. This API can be re-entrant if getters are defined on the passed-in Object. -### *napi_typeof* +### napi_typeof @@ -2094,7 +2131,7 @@ the object as defined in [Section 12.5.5][] of the ECMAScript Language Specification. However, it has support for detecting an External value. If `value` has a type that is invalid, an error is returned. -### *napi_instanceof* +### napi_instanceof @@ -2119,7 +2156,7 @@ defined in [Section 12.10.4](https://tc39.github.io/ecma262/#sec-instanceofoperator) of the ECMAScript Language Specification. -### *napi_is_array* +### napi_is_array @@ -2137,7 +2174,7 @@ This API represents invoking the `IsArray` operation on the object as defined in [Section 7.2.2](https://tc39.github.io/ecma262/#sec-isarray) of the ECMAScript Language Specification. -### *napi_is_arraybuffer* +### napi_is_arraybuffer @@ -2151,9 +2188,9 @@ napi_status napi_is_arraybuffer(napi_env env, napi_value value, bool* result) Returns `napi_ok` if the API succeeded. -This API checks if the Object passsed in is an array buffer. +This API checks if the Object passed in is an array buffer. -### *napi_is_buffer* +### napi_is_buffer @@ -2168,9 +2205,9 @@ object. Returns `napi_ok` if the API succeeded. -This API checks if the Object passsed in is a buffer. +This API checks if the Object passed in is a buffer. -### *napi_is_error* +### napi_is_error @@ -2184,9 +2221,9 @@ napi_status napi_is_error(napi_env env, napi_value value, bool* result) Returns `napi_ok` if the API succeeded. -This API checks if the Object passsed in is an Error. +This API checks if the Object passed in is an Error. -### *napi_is_typedarray* +### napi_is_typedarray @@ -2200,11 +2237,9 @@ napi_status napi_is_typedarray(napi_env env, napi_value value, bool* result) Returns `napi_ok` if the API succeeded. -This API checks if the Object passsed in is a typed array. - +This API checks if the Object passed in is a typed array. - -### *napi_is_dataview* +### napi_is_dataview @@ -2221,7 +2256,7 @@ Returns `napi_ok` if the API succeeded. This API checks if the Object passed in is a DataView. -### *napi_strict_equals* +### napi_strict_equals @@ -2364,8 +2399,8 @@ if (status != napi_ok) return status; // Set the properties napi_property_descriptor descriptors[] = { - { "foo", nullptr, 0, 0, 0, fooValue, napi_default, 0 }, - { "bar", nullptr, 0, 0, 0, barValue, napi_default, 0 } + { "foo", NULL, 0, 0, 0, fooValue, napi_default, 0 }, + { "bar", NULL, 0, 0, 0, barValue, napi_default, 0 } } status = napi_define_properties(env, obj, @@ -2375,7 +2410,7 @@ if (status != napi_ok) return status; ``` ### Structures -#### *napi_property_attributes* +#### napi_property_attributes ```C typedef enum { napi_default = 0, @@ -2398,7 +2433,7 @@ They can be one or more of the following bitflags: - `napi_default` - Used to indicate that no explicit attributes are set on the given property. By default, a property is read only, not enumerable and not configurable. -- `napi_writable` - Used to indicate that a given property is writable. +- `napi_writable` - Used to indicate that a given property is writable. - `napi_enumerable` - Used to indicate that a given property is enumerable. - `napi_configurable` - Used to indicate that a given property is configurable, as defined in @@ -2409,7 +2444,7 @@ a static property on a class as opposed to an instance property, which is the default. This is used only by [`napi_define_class`][]. It is ignored by `napi_define_properties`. -#### *napi_property_descriptor* +#### napi_property_descriptor ```C typedef struct { // One of utf8name or name should be NULL. @@ -2430,7 +2465,7 @@ typedef struct { encoded as UTF8. One of `utf8name` or `name` must be provided for the property. - `name`: Optional napi_value that points to a JavaScript string or symbol -to be used as the key for the property. One of `utf8name` or `name` must +to be used as the key for the property. One of `utf8name` or `name` must be provided for the property. - `value`: The value that's retrieved by a get access of the property if the property is a data property. If this is passed in, set `getter`, `setter`, @@ -2455,7 +2490,7 @@ this function is invoked. See [`napi_property_attributes`](#n_api_napi_property_attributes). ### Functions -#### *napi_get_property_names* +#### napi_get_property_names @@ -2474,9 +2509,9 @@ and [`napi_get_element`][]. Returns `napi_ok` if the API succeeded. -This API returns the array of propertys for the Object passed in +This API returns the array of properties for the Object passed in -#### *napi_set_property* +#### napi_set_property @@ -2496,7 +2531,7 @@ Returns `napi_ok` if the API succeeded. This API set a property on the Object passed in. -#### *napi_get_property* +#### napi_get_property @@ -2517,7 +2552,7 @@ Returns `napi_ok` if the API succeeded. This API gets the requested property from the Object passed in. -#### *napi_has_property* +#### napi_has_property @@ -2538,7 +2573,7 @@ Returns `napi_ok` if the API succeeded. This API checks if the Object passed in has the named property. -#### *napi_delete_property* +#### napi_delete_property @@ -2560,7 +2595,7 @@ Returns `napi_ok` if the API succeeded. This API attempts to delete the `key` own property from `object`. -#### *napi_has_own_property* +#### napi_has_own_property @@ -2583,7 +2618,7 @@ be a string or a Symbol, or an error will be thrown. N-API will not perform any conversion between data types. -#### *napi_set_named_property* +#### napi_set_named_property @@ -2604,7 +2639,7 @@ Returns `napi_ok` if the API succeeded. This method is equivalent to calling [`napi_set_property`][] with a `napi_value` created from the string passed in as `utf8Name` -#### *napi_get_named_property* +#### napi_get_named_property @@ -2625,7 +2660,7 @@ Returns `napi_ok` if the API succeeded. This method is equivalent to calling [`napi_get_property`][] with a `napi_value` created from the string passed in as `utf8Name` -#### *napi_has_named_property* +#### napi_has_named_property @@ -2646,7 +2681,7 @@ Returns `napi_ok` if the API succeeded. This method is equivalent to calling [`napi_has_property`][] with a `napi_value` created from the string passed in as `utf8Name` -#### *napi_set_element* +#### napi_set_element @@ -2666,7 +2701,7 @@ Returns `napi_ok` if the API succeeded. This API sets and element on the Object passed in. -#### *napi_get_element* +#### napi_get_element @@ -2686,7 +2721,7 @@ Returns `napi_ok` if the API succeeded. This API gets the element at the requested index. -#### *napi_has_element* +#### napi_has_element @@ -2707,7 +2742,7 @@ Returns `napi_ok` if the API succeeded. This API returns if the Object passed in has an element at the requested index. -#### *napi_delete_element* +#### napi_delete_element @@ -2728,7 +2763,7 @@ Returns `napi_ok` if the API succeeded. This API attempts to delete the specified `index` from `object`. -#### *napi_define_properties* +#### napi_define_properties @@ -2771,7 +2806,7 @@ like a regular JavaScript function call, or as a constructor function. -### *napi_call_function* +### napi_call_function @@ -2837,7 +2872,7 @@ status = napi_get_value_int32(env, return_val, &result); if (status != napi_ok) return; ``` -### *napi_create_function* +### napi_create_function @@ -2876,18 +2911,18 @@ object. A sample module might look as follows: ```C napi_value SayHello(napi_env env, napi_callback_info info) { printf("Hello\n"); - return nullptr; + return NULL; } napi_value Init(napi_env env, napi_value exports) { napi_status status; napi_value fn; - status = napi_create_function(env, nullptr, 0, SayHello, nullptr, &fn); - if (status != napi_ok) return nullptr; + status = napi_create_function(env, NULL, 0, SayHello, nullptr, &fn); + if (status != napi_ok) return NULL; status = napi_set_named_property(env, exports, "sayHello", fn); - if (status != napi_ok) return nullptr; + if (status != napi_ok) return NULL; return exports; } @@ -2905,7 +2940,7 @@ myaddon.sayHello(); `NAPI_MODULE` in the earlier snippet but the name of the target in `binding.gyp` responsible for creating the `.node` file. -### *napi_get_cb_info* +### napi_get_cb_info @@ -2935,7 +2970,7 @@ Returns `napi_ok` if the API succeeded. This method is used within a callback function to retrieve details about the call like the arguments and the `this` pointer from a given callback info. -### *napi_get_new_target* +### napi_get_new_target @@ -2952,9 +2987,9 @@ napi_status napi_get_new_target(napi_env env, Returns `napi_ok` if the API succeeded. This API returns the `new.target` of the constructor call. If the current -callback is not a constructor call, the result is `nullptr`. +callback is not a constructor call, the result is `NULL`. -### *napi_new_instance* +### napi_new_instance @@ -3034,7 +3069,7 @@ reference to the class constructor for later `instanceof` checks. As an example: ```C -napi_value MyClass_constructor = nullptr; +napi_value MyClass_constructor = NULL; status = napi_get_reference_value(env, MyClass::es_constructor, &MyClass_constructor); assert(napi_ok == status); bool is_instance = false; @@ -3049,7 +3084,7 @@ if (is_instance) { The reference must be freed once it is no longer needed. -### *napi_define_class* +### napi_define_class @@ -3068,7 +3103,7 @@ napi_status napi_define_class(napi_env env, - `[in] utf8name`: Name of the JavaScript constructor function; this is not required to be the same as the C++ class name, though it is recommended for clarity. - - `[in] length`: The length of the utf8name in bytes, or NAPI_AUTO_LENGTH + - `[in] length`: The length of the utf8name in bytes, or `NAPI_AUTO_LENGTH` if it is null-terminated. - `[in] constructor`: Callback function that handles constructing instances of the class. (This should be a static method on the class, not an actual @@ -3105,7 +3140,7 @@ case, to prevent the function value from being garbage-collected, create a persistent reference to it using [`napi_create_reference`][] and ensure the reference count is kept >= 1. -### *napi_wrap* +### napi_wrap @@ -3161,13 +3196,10 @@ required in order to enable correct proper of the reference. Afterward, additional manipulation of the wrapper's prototype chain may cause `napi_unwrap()` to fail. -*Note*: Calling `napi_wrap()` a second time on an object that already has a -native instance associated with it by virtue of a previous call to -`napi_wrap()` will cause an error to be returned. If you wish to associate -another native instance with the given object, call `napi_remove_wrap()` on it -first. +Calling napi_wrap() a second time on an object will return an error. To associate +another native instance with the object, use napi_remove_wrap() first. -### *napi_unwrap* +### napi_unwrap @@ -3192,7 +3224,7 @@ method or accessor, then the `this` argument to the callback is the wrapper object; the wrapped C++ instance that is the target of the call can be obtained then by calling `napi_unwrap()` on the wrapper object. -### *napi_remove_wrap* +### napi_remove_wrap @@ -3254,7 +3286,7 @@ napi_status napi_queue_async_work(napi_env env, napi_async_work work); ``` -[`napi_cancel_async_work`][] can be used if the work needs +[`napi_cancel_async_work`][] can be used if the work needs to be cancelled before the work has started execution. After calling [`napi_cancel_async_work`][], the `complete` callback @@ -3283,12 +3315,14 @@ napi_status napi_create_async_work(napi_env env, - `[in] env`: The environment that the API is invoked under. - `[in] async_resource`: An optional object associated with the async work that will be passed to possible async_hooks [`init` hooks][]. -- `[in] async_resource_name`: An identifier for the kind of resource that is +- `[in] async_resource_name`: Identifier for the kind of resource that is being provided for diagnostic information exposed by the `async_hooks` API. -- `[in] execute`: The native function which should be called to excute -the logic asynchronously. +- `[in] execute`: The native function which should be called to execute +the logic asynchronously. The given function is called from a worker pool +thread and can execute in parallel with the main event loop thread. - `[in] complete`: The native function which will be called when the -asynchronous logic is comple or is cancelled. +asynchronous logic is completed or is cancelled. The given function is called +from the main event loop thread. - `[in] data`: User-provided data context. This will be passed back into the execute and complete functions. - `[out] result`: `napi_async_work*` which is the handle to the newly created @@ -3324,6 +3358,8 @@ Returns `napi_ok` if the API succeeded. This API frees a previously allocated work object. +This API can be called even if there is a pending JavaScript exception. + ### napi_queue_async_work @@ -3382,14 +3420,14 @@ napi_status napi_async_init(napi_env env, - `[in] env`: The environment that the API is invoked under. - `[in] async_resource`: An optional object associated with the async work that will be passed to possible `async_hooks` [`init` hooks][]. -- `[in] async_resource_name`: Required identifier for the kind of resource +- `[in] async_resource_name`: Identifier for the kind of resource that is being provided for diagnostic information exposed by the `async_hooks` API. - `[out] result`: The initialized async context. Returns `napi_ok` if the API succeeded. -### *napi_async_destroy** +### napi_async_destroy @@ -3403,7 +3441,9 @@ napi_status napi_async_destroy(napi_env env, Returns `napi_ok` if the API succeeded. -### *napi_make_callback* +This API can be called even if there is a pending JavaScript exception. + +### napi_make_callback +```C +NAPI_EXTERN napi_status napi_open_callback_scope(napi_env env, + napi_value resource_object, + napi_async_context context, + napi_callback_scope* result) +``` +- `[in] env`: The environment that the API is invoked under. +- `[in] resource_object`: An optional object associated with the async work + that will be passed to possible async_hooks [`init` hooks][]. +- `[in] context`: Context for the async operation that is +invoking the callback. This should be a value previously obtained +from [`napi_async_init`][]. +- `[out] result`: The newly created scope. + +There are cases (for example resolving promises) where it is +necessary to have the equivalent of the scope associated with a callback +in place when making certain N-API calls. If there is no other script on +the stack the [`napi_open_callback_scope`][] and +[`napi_close_callback_scope`][] functions can be used to open/close +the required scope. + +### *napi_close_callback_scope* + +```C +NAPI_EXTERN napi_status napi_close_callback_scope(napi_env env, + napi_callback_scope scope) +``` +- `[in] env`: The environment that the API is invoked under. +- `[in] scope`: The scope to be closed. + +This API can be called even if there is a pending JavaScript exception. + ## Version Management ### napi_get_node_version @@ -3494,7 +3572,7 @@ napi_status napi_get_version(napi_env env, Returns `napi_ok` if the API succeeded. This API returns the highest N-API version supported by the -Node.js runtime. N-API is planned to be additive such that +Node.js runtime. N-API is planned to be additive such that newer releases of Node.js may support additional API functions. In order to allow an addon to use a newer function when running with versions of Node.js that support it, while providing @@ -3728,6 +3806,7 @@ NAPI_EXTERN napi_status napi_get_uv_event_loop(napi_env env, [`napi_async_init`]: #n_api_napi_async_init [`napi_cancel_async_work`]: #n_api_napi_cancel_async_work [`napi_close_escapable_handle_scope`]: #n_api_napi_close_escapable_handle_scope +[`napi_close_callback_scope`]: #n_api_napi_close_callback_scope [`napi_close_handle_scope`]: #n_api_napi_close_handle_scope [`napi_create_async_work`]: #n_api_napi_create_async_work [`napi_create_error`]: #n_api_napi_create_error @@ -3753,6 +3832,7 @@ NAPI_EXTERN napi_status napi_get_uv_event_loop(napi_env env, [`napi_get_last_error_info`]: #n_api_napi_get_last_error_info [`napi_get_and_clear_last_exception`]: #n_api_napi_get_and_clear_last_exception [`napi_make_callback`]: #n_api_napi_make_callback +[`napi_open_callback_scope`]: #n_api_napi_open_callback_scope [`napi_open_escapable_handle_scope`]: #n_api_napi_open_escapable_handle_scope [`napi_open_handle_scope`]: #n_api_napi_open_handle_scope [`napi_property_descriptor`]: #n_api_napi_property_descriptor diff --git a/doc/api/net.md b/doc/api/net.md index 31e0ca7a596fa7..f281d480304862 100644 --- a/doc/api/net.md +++ b/doc/api/net.md @@ -185,8 +185,8 @@ Possible signatures: * [`server.listen([port][, host][, backlog][, callback])`][`server.listen(port, host)`] for TCP servers -This function is asynchronous. When the server starts listening, the -[`'listening'`][] event will be emitted. The last parameter `callback` +This function is asynchronous. When the server starts listening, the +[`'listening'`][] event will be emitted. The last parameter `callback` will be added as a listener for the [`'listening'`][] event. All `listen()` methods can take a `backlog` parameter to specify the maximum @@ -420,8 +420,8 @@ added: v0.1.90 * {Buffer} -Emitted when data is received. The argument `data` will be a `Buffer` or -`String`. Encoding of data is set by `socket.setEncoding()`. +Emitted when data is received. The argument `data` will be a `Buffer` or +`String`. Encoding of data is set by `socket.setEncoding()`. (See the [Readable Stream][] section for more information.) Note that the **data will be lost** if there is no listener when a `Socket` @@ -459,7 +459,7 @@ added: v0.1.90 * {Error} -Emitted when an error occurs. The `'close'` event will be called directly +Emitted when an error occurs. The `'close'` event will be called directly following this event. ### Event: 'lookup' @@ -474,9 +474,9 @@ changes: Emitted after resolving the hostname but before connecting. Not applicable to UNIX sockets. -* `err` {Error|null} The error object. See [`dns.lookup()`][]. +* `err` {Error|null} The error object. See [`dns.lookup()`][]. * `address` {string} The IP address. -* `family` {string|null} The address type. See [`dns.lookup()`][]. +* `family` {string|null} The address type. See [`dns.lookup()`][]. * `host` {string} The hostname. ### Event: 'timeout' @@ -822,7 +822,7 @@ added: v0.1.90 --> Sends data on the socket. The second parameter specifies the encoding in the -case of a string--it defaults to UTF8 encoding. +case of a string — it defaults to UTF8 encoding. Returns `true` if the entire data was flushed successfully to the kernel buffer. Returns `false` if all or part of the data was queued in user memory. diff --git a/doc/api/os.md b/doc/api/os.md index 306a11a3b8dd82..f46292b81386ec 100644 --- a/doc/api/os.md +++ b/doc/api/os.md @@ -225,7 +225,7 @@ The `os.loadavg()` method returns an array containing the 1, 5, and 15 minute load averages. The load average is a measure of system activity, calculated by the operating -system and expressed as a fractional number. As a rule of thumb, the load +system and expressed as a fractional number. As a rule of thumb, the load average should ideally be less than the number of logical CPUs in the system. The load average is a UNIX-specific concept with no real equivalent on @@ -404,7 +404,7 @@ added: v6.0.0 * Returns: {Object} The `os.userInfo()` method returns information about the currently effective -user -- on POSIX platforms, this is typically a subset of the password file. The +user — on POSIX platforms, this is typically a subset of the password file. The returned object includes the `username`, `uid`, `gid`, `shell`, and `homedir`. On Windows, the `uid` and `gid` fields are `-1`, and `shell` is `null`. diff --git a/doc/api/path.md b/doc/api/path.md index f2015db47048d7..ab6039206fbd91 100644 --- a/doc/api/path.md +++ b/doc/api/path.md @@ -163,7 +163,7 @@ changes: The `path.extname()` method returns the extension of the `path`, from the last occurrence of the `.` (period) character to end of string in the last portion of -the `path`. If there is no `.` in the last portion of the `path`, or if the +the `path`. If there is no `.` in the last portion of the `path`, or if the first character of the basename of `path` (see `path.basename()`) is `.`, then an empty string is returned. @@ -396,7 +396,7 @@ path.parse('/home/user/dir/file.txt'); │ root │ │ name │ ext │ " / home/user/dir / file .txt " └──────┴──────────────┴──────┴─────┘ -(all spaces in the "" line should be ignored -- they are purely for formatting) +(all spaces in the "" line should be ignored — they are purely for formatting) ``` On Windows: @@ -418,7 +418,7 @@ path.parse('C:\\path\\dir\\file.txt'); │ root │ │ name │ ext │ " C:\ path\dir \ file .txt " └──────┴──────────────┴──────┴─────┘ -(all spaces in the "" line should be ignored -- they are purely for formatting) +(all spaces in the "" line should be ignored — they are purely for formatting) ``` A [`TypeError`][] is thrown if `path` is not a string. diff --git a/doc/api/perf_hooks.md b/doc/api/perf_hooks.md index 9b4ffb7ccb63c6..ace0c48bacdda3 100644 --- a/doc/api/perf_hooks.md +++ b/doc/api/perf_hooks.md @@ -29,6 +29,14 @@ added: v8.5.0 The `Performance` provides access to performance metric data. A single instance of this class is provided via the `performance` property. +### performance.clearEntries(name) + + +Remove all performance entry objects with `entryType` equal to `name` from the +Performance Timeline. + ### performance.clearFunctions([name]) The `process.setuid(id)` method sets the user identity of the process. (See -setuid(2).) The `id` can be passed as either a numeric ID or a username string. +setuid(2).) The `id` can be passed as either a numeric ID or a username string. If a username is specified, the method blocks while resolving the associated numeric ID. @@ -1747,7 +1747,7 @@ different maximum length restrictions on the title. Usually such restrictions are quite limited. For instance, on Linux and macOS, `process.title` is limited to the size of the binary name plus the length of the command line arguments because setting the `process.title` overwrites the `argv` memory of the -process. Node.js v0.8 allowed for longer process title strings by also +process. Node.js v0.8 allowed for longer process title strings by also overwriting the `environ` memory but that was potentially insecure and confusing in some (rather obscure) cases. @@ -1854,7 +1854,7 @@ Will generate an object similar to: ## Exit Codes Node.js will normally exit with a `0` status code when no more async -operations are pending. The following status codes are used in other +operations are pending. The following status codes are used in other cases: * `1` **Uncaught Fatal Exception** - There was an uncaught exception, @@ -1862,12 +1862,12 @@ cases: handler. * `2` - Unused (reserved by Bash for builtin misuse) * `3` **Internal JavaScript Parse Error** - The JavaScript source code - internal in Node.js's bootstrapping process caused a parse error. This + internal in Node.js's bootstrapping process caused a parse error. This is extremely rare, and generally can only happen during development of Node.js itself. * `4` **Internal JavaScript Evaluation Failure** - The JavaScript source code internal in Node.js's bootstrapping process failed to - return a function value when evaluated. This is extremely rare, and + return a function value when evaluated. This is extremely rare, and generally can only happen during development of Node.js itself. * `5` **Fatal Error** - There was a fatal unrecoverable error in V8. Typically a message will be printed to stderr with the prefix `FATAL @@ -1877,22 +1877,22 @@ cases: function was somehow set to a non-function, and could not be called. * `7` **Internal Exception Handler Run-Time Failure** - There was an uncaught exception, and the internal fatal exception handler - function itself threw an error while attempting to handle it. This + function itself threw an error while attempting to handle it. This can happen, for example, if a [`'uncaughtException'`][] or `domain.on('error')` handler throws an error. -* `8` - Unused. In previous versions of Node.js, exit code 8 sometimes +* `8` - Unused. In previous versions of Node.js, exit code 8 sometimes indicated an uncaught exception. * `9` - **Invalid Argument** - Either an unknown option was specified, or an option requiring a value was provided without a value. * `10` **Internal JavaScript Run-Time Failure** - The JavaScript source code internal in Node.js's bootstrapping process threw an error - when the bootstrapping function was called. This is extremely rare, + when the bootstrapping function was called. This is extremely rare, and generally can only happen during development of Node.js itself. * `12` **Invalid Debug Argument** - The `--inspect` and/or `--inspect-brk` options were set, but the port number chosen was invalid or unavailable. * `>128` **Signal Exits** - If Node.js receives a fatal signal such as `SIGKILL` or `SIGHUP`, then its exit code will be `128` plus the - value of the signal code. This is a standard POSIX practice, since + value of the signal code. This is a standard POSIX practice, since exit codes are defined to be 7-bit integers, and signal exits set the high-order bit, and then contain the value of the signal code. diff --git a/doc/api/readline.md b/doc/api/readline.md index 7ba1277dce2d9c..78571d7e993ef8 100644 --- a/doc/api/readline.md +++ b/doc/api/readline.md @@ -295,7 +295,7 @@ added: v0.1.98 * `shift` {boolean} `true` to indicate the `` key. * `name` {string} The name of the a key. -The `rl.write()` method will write either `data` or a key sequence identified +The `rl.write()` method will write either `data` or a key sequence identified by `key` to the `output`. The `key` argument is supported only if `output` is a [TTY][] text terminal. @@ -323,7 +323,7 @@ Interface's `input` *as if it were provided by the user*. added: v0.7.7 --> -* `stream` {Writable} +* `stream` {stream.Writable} * `dir` {number} * `-1` - to the left from cursor * `1` - to the right from cursor @@ -338,7 +338,7 @@ in a specified direction identified by `dir`. added: v0.7.7 --> -* `stream` {Writable} +* `stream` {stream.Writable} The `readline.clearScreenDown()` method clears the given [TTY][] stream from the current position of the cursor down. @@ -362,9 +362,9 @@ changes: --> * `options` {Object} - * `input` {Readable} The [Readable][] stream to listen to. This option is + * `input` {stream.Readable} The [Readable][] stream to listen to. This option is *required*. - * `output` {Writable} The [Writable][] stream to write readline data to. + * `output` {stream.Writable} The [Writable][] stream to write readline data to. * `completer` {Function} An optional function used for Tab autocompletion. * `terminal` {boolean} `true` if the `input` and `output` streams should be treated like a TTY, and have ANSI/VT100 escape codes written to it. @@ -444,7 +444,7 @@ function completer(linePartial, callback) { added: v0.7.7 --> -* `stream` {Writable} +* `stream` {stream.Writable} * `x` {number} * `y` {number} @@ -456,7 +456,7 @@ given [TTY][] `stream`. added: v0.7.7 --> -* `stream` {Readable} +* `stream` {stream.Readable} * `interface` {readline.Interface} The `readline.emitKeypressEvents()` method causes the given [Readable][] @@ -482,7 +482,7 @@ if (process.stdin.isTTY) added: v0.7.7 --> -* `stream` {Writable} +* `stream` {stream.Writable} * `dx` {number} * `dy` {number} diff --git a/doc/api/repl.md b/doc/api/repl.md index 0ff4ee099cb428..1b6013cf4ca4ed 100644 --- a/doc/api/repl.md +++ b/doc/api/repl.md @@ -96,7 +96,7 @@ are declared at the global scope. The default evaluator provides access to any variables that exist in the global scope. It is possible to expose a variable to the REPL explicitly by assigning -it to the `context` object associated with each `REPLServer`. For example: +it to the `context` object associated with each `REPLServer`. For example: ```js const repl = require('repl'); @@ -388,9 +388,9 @@ changes: * `options` {Object|string} * `prompt` {string} The input prompt to display. Defaults to `> ` (with a trailing space). - * `input` {Readable} The Readable stream from which REPL input will be read. + * `input` {stream.Readable} The Readable stream from which REPL input will be read. Defaults to `process.stdin`. - * `output` {Writable} The Writable stream to which REPL output will be + * `output` {stream.Writable} The Writable stream to which REPL output will be written. Defaults to `process.stdout`. * `terminal` {boolean} If `true`, specifies that the `output` should be treated as a TTY terminal, and have ANSI/VT100 escape codes written to it. @@ -398,7 +398,7 @@ changes: stream upon instantiation. * `eval` {Function} The function to be used when evaluating each given line of input. Defaults to an async wrapper for the JavaScript `eval()` - function. An `eval` function can error with `repl.Recoverable` to indicate + function. An `eval` function can error with `repl.Recoverable` to indicate the input was incomplete and prompt for additional lines. * `useColors` {boolean} If `true`, specifies that the default `writer` function should include ANSI color styling to REPL output. If a custom @@ -467,7 +467,7 @@ environment variables: - `NODE_REPL_HISTORY` - When a valid path is given, persistent REPL history will be saved to the specified file rather than `.node_repl_history` in the - user's home directory. Setting this value to `""` will disable persistent + user's home directory. Setting this value to `''` will disable persistent REPL history. Whitespace will be trimmed from the value. - `NODE_REPL_HISTORY_SIZE` - Defaults to `1000`. Controls how many lines of history will be persisted if history is available. Must be a positive number. diff --git a/doc/api/stream.md b/doc/api/stream.md index d92d5fde23a38b..f0d393543d6516 100644 --- a/doc/api/stream.md +++ b/doc/api/stream.md @@ -394,7 +394,7 @@ changes: --> * `encoding` {string} The new default encoding -* Returns: `this` +* Returns: {this} The `writable.setDefaultEncoding()` method sets the default `encoding` for a [Writable][] stream. @@ -525,7 +525,7 @@ A Writable stream in object mode will always ignore the `encoding` argument. added: v8.0.0 --> -* Returns: `this` +* Returns: {this} Destroy the stream, and emit the passed error. After this call, the writable stream has ended. Implementors should not override this method, @@ -572,8 +572,8 @@ The Readable can switch back to paused mode using one of the following: * If there are no pipe destinations, by calling the [`stream.pause()`][stream-pause] method. -* If there are pipe destinations, by removing any [`'data'`][] event - handlers, and removing all pipe destinations by calling the +* If there are pipe destinations, by removing all pipe destinations. + Multiple pipe destinations may be removed by calling the [`stream.unpipe()`][] method. The important concept to remember is that a Readable will not generate data @@ -810,7 +810,7 @@ readable.isPaused(); // === false added: v0.9.4 --> -* Returns: `this` +* Returns: {this} The `readable.pause()` method will cause a stream in flowing mode to stop emitting [`'data'`][] events, switching out of flowing mode. Any data that @@ -901,7 +901,7 @@ added: v0.9.4 --> * `size` {number} Optional argument to specify how much data to read. -* Return {string|Buffer|null} +* Returns: {string|Buffer|null} The `readable.read()` method pulls some data out of the internal buffer and returns it. If no data available to be read, `null` is returned. By default, @@ -950,7 +950,7 @@ event has been emitted will return `null`. No runtime error will be raised. added: v0.9.4 --> -* Returns: `this` +* Returns: {this} The `readable.resume()` method causes an explicitly paused Readable stream to resume emitting [`'data'`][] events, switching the stream into flowing mode. @@ -973,7 +973,7 @@ added: v0.9.4 --> * `encoding` {string} The encoding to use. -* Returns: `this` +* Returns: {this} The `readable.setEncoding()` method sets the character encoding for data read from the Readable stream. @@ -1411,7 +1411,7 @@ write succeeded. All calls to `writable.write()` that occur between the time `writable._write()` is called and the `callback` is called will cause the written data to be -buffered. Once the `callback` is invoked, the stream will emit a [`'drain'`][] +buffered. When the `callback` is invoked, the stream might emit a [`'drain'`][] event. If a stream implementation is capable of processing multiple chunks of data at once, the `writable._writev()` method should be implemented. @@ -1457,7 +1457,7 @@ added: v8.0.0 argument. The `_destroy()` method is called by [`writable.destroy()`][writable-destroy]. -It can be overriden by child classes but it **must not** be called directly. +It can be overridden by child classes but it **must not** be called directly. #### writable.\_final(callback) Returns the bound address, the address family name, and port of the -server as reported by the operating system. See [`net.Server.address()`][] for +server as reported by the operating system. See [`net.Server.address()`][] for more information. ### server.close([callback]) @@ -462,7 +462,7 @@ changes: * `options` {Object} * `isServer`: The SSL/TLS protocol is asymmetrical, TLSSockets must know if they are to behave as a server or a client. If `true` the TLS socket will be - instantiated as a server. Defaults to `false`. + instantiated as a server. Defaults to `false`. * `server` {net.Server} An optional [`net.Server`][] instance. * `requestCert`: Whether to authenticate the remote peer by requesting a certificate. Clients always request a server certificate. Servers @@ -620,7 +620,7 @@ For example: { ... another certificate, possibly with a .issuerCertificate ... }, raw: < RAW DER buffer >, valid_from: 'Nov 11 09:52:22 2009 GMT', - valid_to: 'Nov 6 09:52:22 2029 GMT', + valid_to: 'Nov 6 09:52:22 2029 GMT', fingerprint: '2A:7A:C2:DD:E5:F9:CC:53:72:35:99:7A:02:5A:71:38:52:EC:8A:DF', serialNumber: 'B9B0D332A1AA5635' } ``` @@ -822,7 +822,7 @@ changes: rather than creating a new socket. Typically, this is an instance of [`net.Socket`][], but any `Duplex` stream is allowed. If this option is specified, `path`, `host` and `port` are ignored, - except for certificate validation. Usually, a socket is already connected + except for certificate validation. Usually, a socket is already connected when passed to `tls.connect()`, but it can be connected later. Note that connection/disconnection/destruction of `socket` is the user's responsibility, calling `tls.connect()` will not cause `net.connect()` to be @@ -983,7 +983,7 @@ changes: decrypted with `object.passphrase` if provided, or `options.passphrase` if it is not. * `key` {string|string[]|Buffer|Buffer[]|Object[]} Optional private keys in PEM format. PEM allows the option of private keys being encrypted. Encrypted - keys will be decrypted with `options.passphrase`. Multiple keys using + keys will be decrypted with `options.passphrase`. Multiple keys using different algorithms can be provided either as an array of unencrypted key strings or buffers, or an array of objects in the form `{pem: [, passphrase: ]}`. The object form can only occur in @@ -996,7 +996,7 @@ changes: consist of the PEM formatted certificate for a provided private `key`, followed by the PEM formatted intermediate certificates (if any), in order, and not including the root CA (the root CA must be pre-known to the peer, - see `ca`). When providing multiple cert chains, they do not have to be in + see `ca`). When providing multiple cert chains, they do not have to be in the same order as their private keys in `key`. If the intermediate certificates are not provided, the peer will not be able to validate the certificate, and the handshake will fail. @@ -1006,7 +1006,7 @@ changes: using this option. The value can be a string or Buffer, or an Array of strings and/or Buffers. Any string or Buffer can contain multiple PEM CAs concatenated together. The peer's certificate must be chainable to a CA - trusted by the server for the connection to be authenticated. When using + trusted by the server for the connection to be authenticated. When using certificates that are not chainable to a well-known CA, the certificate's CA must be explicitly specified as a trusted or the connection will fail to authenticate. @@ -1018,7 +1018,7 @@ changes: * `crl` {string|string[]|Buffer|Buffer[]} Optional PEM formatted CRLs (Certificate Revocation Lists). * `ciphers` {string} Optional cipher suite specification, replacing the - default. For more information, see [modifying the default cipher suite][]. + default. For more information, see [modifying the default cipher suite][]. * `honorCipherOrder` {boolean} Attempt to use the server's cipher suite preferences instead of the client's. When `true`, causes `SSL_OP_CIPHER_SERVER_PREFERENCE` to be set in `secureOptions`, see @@ -1037,8 +1037,8 @@ changes: for stronger security. If omitted or invalid, the parameters are silently discarded and DHE ciphers will not be available. * `secureProtocol` {string} Optional SSL method to use, default is - `"SSLv23_method"`. The possible values are listed as [SSL_METHODS][], use - the function names as strings. For example, `"SSLv3_method"` to force SSL + `'SSLv23_method'`. The possible values are listed as [SSL_METHODS][], use + the function names as strings. For example, `'SSLv3_method'` to force SSL version 3. * `secureOptions` {number} Optionally affect the OpenSSL protocol behavior, which is not usually necessary. This should be used carefully if at all! @@ -1124,7 +1124,7 @@ changes: servers, the identity options (`pfx` or `key`/`cert`) are usually required. * `secureConnectionListener` {Function} -Creates a new [tls.Server][]. The `secureConnectionListener`, if provided, is +Creates a new [tls.Server][]. The `secureConnectionListener`, if provided, is automatically set as a listener for the [`'secureConnection'`][] event. *Note*: The `ticketKeys` options is automatically shared between `cluster` diff --git a/doc/api/url.md b/doc/api/url.md index 00211095627a46..40cc002f048e81 100644 --- a/doc/api/url.md +++ b/doc/api/url.md @@ -51,7 +51,7 @@ properties of a WHATWG `URL` object. ├─────────────┴─────────────────────┴─────────────────────┴──────────┴────────────────┴───────┤ │ href │ └─────────────────────────────────────────────────────────────────────────────────────────────┘ -(all spaces in the "" line should be ignored -- they are purely for formatting) +(all spaces in the "" line should be ignored — they are purely for formatting) ``` Parsing the URL string using the WHATWG API: @@ -556,7 +556,7 @@ Instantiate a new `URLSearchParams` object with an iterable map in a way that is similar to [`Map`][]'s constructor. `iterable` can be an Array or any iterable object. That means `iterable` can be another `URLSearchParams`, in which case the constructor will simply create a clone of the provided -`URLSearchParams`. Elements of `iterable` are key-value pairs, and can +`URLSearchParams`. Elements of `iterable` are key-value pairs, and can themselves be any iterable object. Duplicate keys are allowed. @@ -1030,6 +1030,11 @@ The formatting process operates as follows: ### url.parse(urlString[, parseQueryString[, slashesDenoteHost]]) * `urlString` {string} The URL string to parse. @@ -1141,7 +1146,7 @@ console.log(myURL.origin); ``` [`Error`]: errors.html#errors_class_error -[`JSON.stringify()`]: https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify +[`JSON.stringify()`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify [`Map`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Map [`TypeError`]: errors.html#errors_class_typeerror [`URLSearchParams`]: #url_class_urlsearchparams diff --git a/doc/api/util.md b/doc/api/util.md index 52e9e41639e0ca..7932b815e4a5e9 100644 --- a/doc/api/util.md +++ b/doc/api/util.md @@ -82,9 +82,9 @@ added: v0.11.3 The `util.debuglog()` method is used to create a function that conditionally writes debug messages to `stderr` based on the existence of the `NODE_DEBUG` -environment variable. If the `section` name appears within the value of that +environment variable. If the `section` name appears within the value of that environment variable, then the returned function operates similar to -[`console.error()`][]. If not, then the returned function is a no-op. +[`console.error()`][]. If not, then the returned function is a no-op. For example: @@ -102,7 +102,7 @@ it will output something like: FOO 3245: hello from foo [123] ``` -where `3245` is the process id. If it is not run with that +where `3245` is the process id. If it is not run with that environment variable set, then it will not print anything. Multiple comma-separated `section` names may be specified in the `NODE_DEBUG` @@ -172,7 +172,7 @@ corresponding argument. Supported placeholders are: * `%d` - Number (integer or floating point value). * `%i` - Integer. * `%f` - Floating point value. -* `%j` - JSON. Replaced with the string `'[Circular]'` if the argument +* `%j` - JSON. Replaced with the string `'[Circular]'` if the argument contains circular references. * `%o` - Object. A string representation of an object with generic JavaScript object formatting. @@ -233,7 +233,7 @@ that the two styles are [semantically incompatible][]. * `constructor` {Function} * `superConstructor` {Function} -Inherit the prototype methods from one [constructor][] into another. The +Inherit the prototype methods from one [constructor][] into another. The prototype of `constructor` will be set to a new object created from `superConstructor`. @@ -1191,7 +1191,7 @@ Deprecated predecessor of `console.log`. [`Array.isArray`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/isArray [`Buffer.isBuffer()`]: buffer.html#buffer_class_method_buffer_isbuffer_obj [`Error`]: errors.html#errors_class_error -[`Object.assign()`]: https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/Object/assign +[`Object.assign()`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/assign [`console.error()`]: console.html#console_console_error_data_args [`console.log()`]: console.html#console_console_log_data_args [`util.inspect()`]: #util_util_inspect_object_options @@ -1201,5 +1201,5 @@ Deprecated predecessor of `console.log`. [Customizing `util.inspect` colors]: #util_customizing_util_inspect_colors [Internationalization]: intl.html [WHATWG Encoding Standard]: https://encoding.spec.whatwg.org/ -[constructor]: https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/Object/constructor +[constructor]: https://developer.mozilla.org/en-US/JavaScript/Reference/Global_Objects/Object/constructor [semantically incompatible]: https://github.com/nodejs/node/issues/4179 diff --git a/doc/api/v8.md b/doc/api/v8.md index 3684a61583cb37..63a4ded9fe96ea 100644 --- a/doc/api/v8.md +++ b/doc/api/v8.md @@ -144,7 +144,7 @@ after the VM has started may result in unpredictable behavior, including crashes and data loss; or it may simply do nothing. The V8 options available for a version of Node.js may be determined by running -`node --v8-options`. An unofficial, community-maintained list of options +`node --v8-options`. An unofficial, community-maintained list of options and their effects is available [here][]. Usage: diff --git a/doc/api/vm.md b/doc/api/vm.md index 41101a5e5cf76f..842b0c8ac20cdd 100644 --- a/doc/api/vm.md +++ b/doc/api/vm.md @@ -116,7 +116,7 @@ changes: will be thrown. * `breakOnSigint`: if `true`, the execution will be terminated when `SIGINT` (Ctrl+C) is received. Existing handlers for the - event that have been attached via `process.on("SIGINT")` will be disabled + event that have been attached via `process.on('SIGINT')` will be disabled during script execution, but will continue to work after that. If execution is terminated, an [`Error`][] will be thrown. diff --git a/doc/api/zlib.md b/doc/api/zlib.md index 7c55b94b26706b..00d3a8c821d2eb 100644 --- a/doc/api/zlib.md +++ b/doc/api/zlib.md @@ -64,8 +64,8 @@ header is used to identify the compression encodings actually applied to a message. *Note*: the examples given below are drastically simplified to show -the basic concept. Using `zlib` encoding can be expensive, and the results -ought to be cached. See [Memory Usage Tuning][] for more information +the basic concept. Using `zlib` encoding can be expensive, and the results +ought to be cached. See [Memory Usage Tuning][] for more information on the speed/memory/compression tradeoffs involved in `zlib` usage. ```js @@ -165,7 +165,7 @@ The memory requirements for deflate are (in bytes): (1 << (windowBits + 2)) + (1 << (memLevel + 9)) ``` -That is: 128K for windowBits=15 + 128K for memLevel = 8 +That is: 128K for windowBits = 15 + 128K for memLevel = 8 (default values) plus a few kilobytes for small objects. For example, to reduce the default memory requirements from 256K to 128K, the @@ -178,20 +178,20 @@ const options = { windowBits: 14, memLevel: 7 }; This will, however, generally degrade compression. The memory requirements for inflate are (in bytes) `1 << windowBits`. -That is, 32K for windowBits=15 (default value) plus a few kilobytes +That is, 32K for windowBits = 15 (default value) plus a few kilobytes for small objects. This is in addition to a single internal output slab buffer of size `chunkSize`, which defaults to 16K. The speed of `zlib` compression is affected most dramatically by the -`level` setting. A higher level will result in better compression, but -will take longer to complete. A lower level will result in less +`level` setting. A higher level will result in better compression, but +will take longer to complete. A lower level will result in less compression, but will be much faster. In general, greater memory usage options will mean that Node.js has to make fewer calls to `zlib` because it will be able to process more data on -each `write` operation. So, this is another factor that affects the +each `write` operation. So, this is another factor that affects the speed, at the cost of memory usage. ## Flushing @@ -233,9 +233,9 @@ added: v0.5.8 All of the constants defined in `zlib.h` are also defined on `require('zlib').constants`. In the normal course of operations, it will not be -necessary to use these constants. They are documented so that their presence is +necessary to use these constants. They are documented so that their presence is not surprising. This section is taken almost directly from the -[zlib documentation][]. See for more +[zlib documentation][]. See for more details. *Note*: Previously, the constants were available directly from @@ -296,7 +296,7 @@ changes: -Each class takes an `options` object. All options are optional. +Each class takes an `options` object. All options are optional. Note that some options are only relevant when compressing, and are ignored by the decompression classes. diff --git a/doc/changelogs/CHANGELOG_V8.md b/doc/changelogs/CHANGELOG_V8.md index 960f97ebc6e289..11b17bb89afd9b 100644 --- a/doc/changelogs/CHANGELOG_V8.md +++ b/doc/changelogs/CHANGELOG_V8.md @@ -10,6 +10,7 @@ +8.11.2
8.11.1
8.11.0
8.10.0
@@ -53,6 +54,239 @@ [Node.js Long Term Support Plan](https://github.com/nodejs/LTS) and will be supported actively until April 2019 and maintained until December 2019. + +## 2018-05-15, Version 8.11.2 'Carbon' (LTS), @MylesBorins + +### Notable Changes + +* **deps**: + - update node-inspect to 1.11.3 (Jan Krems) [#18354](https://github.com/nodejs/node/pull/18354) + - update nghttp2 to 1.29.0 (James M Snell) [#17908](https://github.com/nodejs/node/pull/17908) +* **http2**: + - Sync with current release stream +* **n-api**: + - Sync with current release stream + +### Commits + +* [[`ce3866bdcc`](https://github.com/nodejs/node/commit/ce3866bdcc)] - **async_hooks**: clean up comments (Ali Ijaz Sheikh) [#18467](https://github.com/nodejs/node/pull/18467) +* [[`86e3c89ea4`](https://github.com/nodejs/node/commit/86e3c89ea4)] - **benchmark**: improve compare output (Ruben Bridgewater) [#18597](https://github.com/nodejs/node/pull/18597) +* [[`18be476116`](https://github.com/nodejs/node/commit/18be476116)] - **benchmark**: fix punycode test for --without-intl (Timothy Gu) [#16251](https://github.com/nodejs/node/pull/16251) +* [[`88d3028e22`](https://github.com/nodejs/node/commit/88d3028e22)] - **build**: no longer have v8-debug.h as dependency. (Yang Guo) [#18677](https://github.com/nodejs/node/pull/18677) +* [[`7b6d93c145`](https://github.com/nodejs/node/commit/7b6d93c145)] - **build**: add doc linting when runnning `make lint` (Camilo Gonzalez) [#18472](https://github.com/nodejs/node/pull/18472) +* [[`9bce14172a`](https://github.com/nodejs/node/commit/9bce14172a)] - **build**: do not suppress output in make doc-only (Joyee Cheung) [#18507](https://github.com/nodejs/node/pull/18507) +* [[`333d7dda84`](https://github.com/nodejs/node/commit/333d7dda84)] - **build**: make lint-js independent of local node (Joyee Cheung) [#18272](https://github.com/nodejs/node/pull/18272) +* [[`d537f45aaa`](https://github.com/nodejs/node/commit/d537f45aaa)] - **build**: make lint-md independent of local node (Joyee Cheung) [#18272](https://github.com/nodejs/node/pull/18272) +* [[`658dd409fd`](https://github.com/nodejs/node/commit/658dd409fd)] - **build**: refine static and shared lib build (Yihong Wang) [#17604](https://github.com/nodejs/node/pull/17604) +* [[`659b2a1821`](https://github.com/nodejs/node/commit/659b2a1821)] - **build**: allow x86\_64 as a dest\_cpu alias for x64 (Rod Vagg) [#18052](https://github.com/nodejs/node/pull/18052) +* [[`424703a556`](https://github.com/nodejs/node/commit/424703a556)] - **build**: add cflags for OpenBSD, remove stray comma. (Aaron Bieber) [#18448](https://github.com/nodejs/node/pull/18448) +* [[`ab4809f195`](https://github.com/nodejs/node/commit/ab4809f195)] - **build,win**: restore vcbuild TAG functionality (Rod Vagg) [#18031](https://github.com/nodejs/node/pull/18031) +* [[`bf4d0743be`](https://github.com/nodejs/node/commit/bf4d0743be)] - **cluster**: fix inspector port assignment (Santiago Gimeno) [#18696](https://github.com/nodejs/node/pull/18696) +* [[`16bf5fed69`](https://github.com/nodejs/node/commit/16bf5fed69)] - **crypto**: reuse variable instead of reevaluation (Tobias Nießen) [#17735](https://github.com/nodejs/node/pull/17735) +* [[`9acc7f3fbb`](https://github.com/nodejs/node/commit/9acc7f3fbb)] - **deps**: update nghttp2 to 1.29.0 (James M Snell) [#17908](https://github.com/nodejs/node/pull/17908) +* [[`ab005592be`](https://github.com/nodejs/node/commit/ab005592be)] - **deps**: V8: backport 76c3ac5 from upstream (Ali Ijaz Sheikh) [#18298](https://github.com/nodejs/node/pull/18298) +* [[`f12db24947`](https://github.com/nodejs/node/commit/f12db24947)] - **deps**: cherry-pick a803fad from upstream V8 (Michaël Zasso) [#19824](https://github.com/nodejs/node/pull/19824) +* [[`09f5e252bf`](https://github.com/nodejs/node/commit/09f5e252bf)] - **deps**: cherry-pick 7abdadc from upstream V8 (Michaël Zasso) [#19824](https://github.com/nodejs/node/pull/19824) +* [[`c97237bc10`](https://github.com/nodejs/node/commit/c97237bc10)] - **deps**: cherry-pick a4bddba from upstream V8 (Michaël Zasso) [#19824](https://github.com/nodejs/node/pull/19824) +* [[`d02b72e799`](https://github.com/nodejs/node/commit/d02b72e799)] - **deps**: V8: backport 596d55a from upstream (Myles Borins) [#19477](https://github.com/nodejs/node/pull/19477) +* [[`79a7a17312`](https://github.com/nodejs/node/commit/79a7a17312)] - **deps**: update node-inspect to 1.11.3 (Jan Krems) [#18354](https://github.com/nodejs/node/pull/18354) +* [[`5394bc5c42`](https://github.com/nodejs/node/commit/5394bc5c42)] - **deps,src**: align ssize\_t ABI between Node & nghttp2 (Anna Henningsen) [#18565](https://github.com/nodejs/node/pull/18565) +* [[`165d214a54`](https://github.com/nodejs/node/commit/165d214a54)] - **doc**: add Http2Session.connecting property (Pieter Mees) [#19842](https://github.com/nodejs/node/pull/19842) +* [[`1ff3544a4b`](https://github.com/nodejs/node/commit/1ff3544a4b)] - **doc**: guard against md list parsing edge case (Vse Mozhet Byt) [#19647](https://github.com/nodejs/node/pull/19647) +* [[`f59eab0165`](https://github.com/nodejs/node/commit/f59eab0165)] - **doc**: rename HTTP2 to HTTP/2 (Timothy Gu) [#19603](https://github.com/nodejs/node/pull/19603) +* [[`da185cec8f`](https://github.com/nodejs/node/commit/da185cec8f)] - **doc**: add note about browsers and HTTP/2 (Steven) [#19476](https://github.com/nodejs/node/pull/19476) +* [[`30070c7568`](https://github.com/nodejs/node/commit/30070c7568)] - **doc**: warn against concurrent http2stream.respondWithFD (Anna Henningsen) [#18762](https://github.com/nodejs/node/pull/18762) +* [[`39267e8bb0`](https://github.com/nodejs/node/commit/39267e8bb0)] - **doc**: fix typo in http2.md (Vse Mozhet Byt) [#18602](https://github.com/nodejs/node/pull/18602) +* [[`2da965c5b8`](https://github.com/nodejs/node/commit/2da965c5b8)] - **doc**: remove removed apis from http2 docs (Kelvin Jin) [#18439](https://github.com/nodejs/node/pull/18439) +* [[`9a4a8c127e`](https://github.com/nodejs/node/commit/9a4a8c127e)] - **doc**: unify type linkification (Vse Mozhet Byt) [#18407](https://github.com/nodejs/node/pull/18407) +* [[`15023c7d28`](https://github.com/nodejs/node/commit/15023c7d28)] - **doc**: fix documentation of http2Stream.pushstream() (Peter Dalgaard-Jensen) [#18258](https://github.com/nodejs/node/pull/18258) +* [[`fac76f9a6b`](https://github.com/nodejs/node/commit/fac76f9a6b)] - **doc**: fix typo in http2stream.close param default (Moritz Peters) [#18166](https://github.com/nodejs/node/pull/18166) +* [[`88babd5a23`](https://github.com/nodejs/node/commit/88babd5a23)] - **doc**: fix s/rstStream/close in example (James M Snell) [#18088](https://github.com/nodejs/node/pull/18088) +* [[`d9d0d0e98e`](https://github.com/nodejs/node/commit/d9d0d0e98e)] - **doc**: update pushStream docs to use err first (James M Snell) [#18088](https://github.com/nodejs/node/pull/18088) +* [[`940457394a`](https://github.com/nodejs/node/commit/940457394a)] - **doc**: compact eslint directives in common/README (Vse Mozhet Byt) [#17971](https://github.com/nodejs/node/pull/17971) +* [[`e9de5a976b`](https://github.com/nodejs/node/commit/e9de5a976b)] - **doc**: re-alphabetise sections in common/README.md (Vse Mozhet Byt) [#17971](https://github.com/nodejs/node/pull/17971) +* [[`c924adf33d`](https://github.com/nodejs/node/commit/c924adf33d)] - **doc**: fix code nits in common/README (Vse Mozhet Byt) [#17971](https://github.com/nodejs/node/pull/17971) +* [[`0205b3f0c1`](https://github.com/nodejs/node/commit/0205b3f0c1)] - **doc**: correct spelling (sreepurnajasti) [#17911](https://github.com/nodejs/node/pull/17911) +* [[`591f78bb0a`](https://github.com/nodejs/node/commit/591f78bb0a)] - **doc**: grammar fixes in http2.md (Rich Trott) [#17972](https://github.com/nodejs/node/pull/17972) +* [[`35ee8943da`](https://github.com/nodejs/node/commit/35ee8943da)] - **doc**: add docs for common/http2.js utility (James M Snell) [#17942](https://github.com/nodejs/node/pull/17942) +* [[`f0ba2c6ceb`](https://github.com/nodejs/node/commit/f0ba2c6ceb)] - **doc**: Add a missing comma (jiangq) [#19555](https://github.com/nodejs/node/pull/19555) +* [[`7c6fa183cb`](https://github.com/nodejs/node/commit/7c6fa183cb)] - **doc**: fix typos on n-api (Kyle Robinson Young) [#19385](https://github.com/nodejs/node/pull/19385) +* [[`1abb168838`](https://github.com/nodejs/node/commit/1abb168838)] - **doc**: fix n-api asynchronous threading docs (Eric Bickle) [#19073](https://github.com/nodejs/node/pull/19073) +* [[`87d0fd8212`](https://github.com/nodejs/node/commit/87d0fd8212)] - **doc**: mark NAPI\_AUTO\_LENGTH as code (Tobias Nießen) [#18697](https://github.com/nodejs/node/pull/18697) +* [[`58688d97dc`](https://github.com/nodejs/node/commit/58688d97dc)] - **doc**: fix exporting a function example (Aonghus O Nia) [#18661](https://github.com/nodejs/node/pull/18661) +* [[`4d43607474`](https://github.com/nodejs/node/commit/4d43607474)] - **doc**: fix typo in n-api.md (Vse Mozhet Byt) [#18590](https://github.com/nodejs/node/pull/18590) +* [[`9729278007`](https://github.com/nodejs/node/commit/9729278007)] - **doc**: small typo in n-api.md (iskore) [#18555](https://github.com/nodejs/node/pull/18555) +* [[`7ed1dfef28`](https://github.com/nodejs/node/commit/7ed1dfef28)] - **doc**: remove usage of you in n-api doc (Michael Dawson) [#18528](https://github.com/nodejs/node/pull/18528) +* [[`84e0a03727`](https://github.com/nodejs/node/commit/84e0a03727)] - **doc**: remove uannecessary Require (Michael Dawson) [#18184](https://github.com/nodejs/node/pull/18184) +* [[`51513fdf9e`](https://github.com/nodejs/node/commit/51513fdf9e)] - **doc**: napi: make header style consistent (Ali Ijaz Sheikh) [#18122](https://github.com/nodejs/node/pull/18122) +* [[`02ae3295d5`](https://github.com/nodejs/node/commit/02ae3295d5)] - **doc**: napi: fix unbalanced emphasis (Ali Ijaz Sheikh) [#18122](https://github.com/nodejs/node/pull/18122) +* [[`79ecc2c586`](https://github.com/nodejs/node/commit/79ecc2c586)] - **doc**: updates examples to use NULL (Michael Dawson) [#18008](https://github.com/nodejs/node/pull/18008) +* [[`b2213798a3`](https://github.com/nodejs/node/commit/b2213798a3)] - **doc**: fix MDN links to avoid redirections (Vse Mozhet Byt) [#18631](https://github.com/nodejs/node/pull/18631) +* [[`f4ddaaec0e`](https://github.com/nodejs/node/commit/f4ddaaec0e)] - **doc**: move Fedor to TSC Emeritus (Myles Borins) [#18752](https://github.com/nodejs/node/pull/18752) +* [[`b8f2acd2e8`](https://github.com/nodejs/node/commit/b8f2acd2e8)] - **doc**: add mmarchini to collaborators (Matheus Marchini) [#18740](https://github.com/nodejs/node/pull/18740) +* [[`16f9631475`](https://github.com/nodejs/node/commit/16f9631475)] - **doc**: add history for url.parse (Steven) [#18685](https://github.com/nodejs/node/pull/18685) +* [[`d30c3533ff`](https://github.com/nodejs/node/commit/d30c3533ff)] - **doc**: fix links to Style Guide and CPP Style Guide (Justin Lee) [#18683](https://github.com/nodejs/node/pull/18683) +* [[`176ed1e9b1`](https://github.com/nodejs/node/commit/176ed1e9b1)] - **doc**: add devsnek to collaborators (Gus Caplan) [#18679](https://github.com/nodejs/node/pull/18679) +* [[`25db460f03`](https://github.com/nodejs/node/commit/25db460f03)] - **doc**: expand on promises and async\_hooks (Ali Ijaz Sheikh) [#18540](https://github.com/nodejs/node/pull/18540) +* [[`73adadd56a`](https://github.com/nodejs/node/commit/73adadd56a)] - **doc**: add section for strategic initiatives (Michael Dawson) [#17104](https://github.com/nodejs/node/pull/17104) +* [[`8ca6d34801`](https://github.com/nodejs/node/commit/8ca6d34801)] - **doc**: add introduce about cli options (Weijia Wang) [#18475](https://github.com/nodejs/node/pull/18475) +* [[`1ea1970c37`](https://github.com/nodejs/node/commit/1ea1970c37)] - **doc**: modify the return value of request.write() (陈刚) [#18526](https://github.com/nodejs/node/pull/18526) +* [[`50bdf0ed78`](https://github.com/nodejs/node/commit/50bdf0ed78)] - **doc**: be more explicit in the sypnosis (Tim O. Peters) [#17977](https://github.com/nodejs/node/pull/17977) +* [[`f8ad381e61`](https://github.com/nodejs/node/commit/f8ad381e61)] - **doc**: add missing meta for createCipheriv (Tobias Nießen) [#18651](https://github.com/nodejs/node/pull/18651) +* [[`0071560eb4`](https://github.com/nodejs/node/commit/0071560eb4)] - **doc**: fix description of createDecipheriv (Tobias Nießen) [#18651](https://github.com/nodejs/node/pull/18651) +* [[`c89781583b`](https://github.com/nodejs/node/commit/c89781583b)] - **doc**: fix various nits (Vse Mozhet Byt) [#19743](https://github.com/nodejs/node/pull/19743) +* [[`1091dfc801`](https://github.com/nodejs/node/commit/1091dfc801)] - **doc**: linkify missing types (Vse Mozhet Byt) [#18444](https://github.com/nodejs/node/pull/18444) +* [[`1107a494e4`](https://github.com/nodejs/node/commit/1107a494e4)] - **doc**: shell option for the execFile and execFileSync functions (jvelezpo) [#18237](https://github.com/nodejs/node/pull/18237) +* [[`36ea472393`](https://github.com/nodejs/node/commit/36ea472393)] - **doc**: improve http.request documentation (Guangcong Luo) [#18289](https://github.com/nodejs/node/pull/18289) +* [[`e5d5137963`](https://github.com/nodejs/node/commit/e5d5137963)] - **doc**: streamline README intro (Rich Trott) [#18483](https://github.com/nodejs/node/pull/18483) +* [[`eec9334a2e`](https://github.com/nodejs/node/commit/eec9334a2e)] - **doc**: move Brian White to TSC Emeriti list (Rich Trott) [#18482](https://github.com/nodejs/node/pull/18482) +* [[`ac41aacb05`](https://github.com/nodejs/node/commit/ac41aacb05)] - **doc**: improve stream documentation (陈刚) [#18375](https://github.com/nodejs/node/pull/18375) +* [[`7feeb1574e`](https://github.com/nodejs/node/commit/7feeb1574e)] - **doc**: add Gibson Fahnestock to TSC (Rich Trott) [#18481](https://github.com/nodejs/node/pull/18481) +* [[`142ad8d450`](https://github.com/nodejs/node/commit/142ad8d450)] - **doc**: reorder section on updating PR branch (Ali Ijaz Sheikh) [#18355](https://github.com/nodejs/node/pull/18355) +* [[`39ea4f12c5`](https://github.com/nodejs/node/commit/39ea4f12c5)] - **doc**: fix manpage warnings (Roman Reiss) +* [[`5209f9e1e2`](https://github.com/nodejs/node/commit/5209f9e1e2)] - **doc**: warn about GCM authenticity (Tobias Nießen) [#18376](https://github.com/nodejs/node/pull/18376) +* [[`e84e9db6fe`](https://github.com/nodejs/node/commit/e84e9db6fe)] - **doc**: capitalize non-primitive types (Vse Mozhet Byt) [#18111](https://github.com/nodejs/node/pull/18111) +* [[`84fa6eb173`](https://github.com/nodejs/node/commit/84fa6eb173)] - **doc, http2**: add sections for server.close() (Chris Miller) [#19802](https://github.com/nodejs/node/pull/19802) +* [[`cbc8561949`](https://github.com/nodejs/node/commit/cbc8561949)] - **errors**: remove ERR\_OUTOFMEMORY (Tobias Nießen) [#17877](https://github.com/nodejs/node/pull/17877) +* [[`2995506bbf`](https://github.com/nodejs/node/commit/2995506bbf)] - **fs**: fix stack overflow in fs.readdirSync (Joyee Cheung) [#18647](https://github.com/nodejs/node/pull/18647) +* [[`a653f23dfc`](https://github.com/nodejs/node/commit/a653f23dfc)] - **fs**: fix `createReadStream(…, {end: n})` for non-seekable fds (Anna Henningsen) [#19329](https://github.com/nodejs/node/pull/19329) +* [[`6bfdba125f`](https://github.com/nodejs/node/commit/6bfdba125f)] - **http**: remove default 'drain' listener on upgrade (Luigi Pinca) [#18866](https://github.com/nodejs/node/pull/18866) +* [[`29c395d975`](https://github.com/nodejs/node/commit/29c395d975)] - **http**: allow \_httpMessage to be GC'ed (Luigi Pinca) [#18865](https://github.com/nodejs/node/pull/18865) +* [[`d2a884edf9`](https://github.com/nodejs/node/commit/d2a884edf9)] - **http**: fix parsing of binary upgrade response body (Ben Noordhuis) [#17806](https://github.com/nodejs/node/pull/17806) +* [[`1d88266543`](https://github.com/nodejs/node/commit/1d88266543)] - **http**: free the parser before emitting 'upgrade' (Luigi Pinca) [#18209](https://github.com/nodejs/node/pull/18209) +* [[`1455b1dec2`](https://github.com/nodejs/node/commit/1455b1dec2)] - **http2**: emit session connect on next tick (Pieter Mees) [#19842](https://github.com/nodejs/node/pull/19842) +* [[`6d6e2e2454`](https://github.com/nodejs/node/commit/6d6e2e2454)] - **http2**: callback valid check before closing request (Trivikram) [#19061](https://github.com/nodejs/node/pull/19061) +* [[`eddf3a6c70`](https://github.com/nodejs/node/commit/eddf3a6c70)] - **http2**: destroy() stream, upon errnoException (Sarat Addepalli) [#19389](https://github.com/nodejs/node/pull/19389) +* [[`e4c10e1201`](https://github.com/nodejs/node/commit/e4c10e1201)] - **http2**: remove some unnecessary next ticks (James M Snell) [#19451](https://github.com/nodejs/node/pull/19451) +* [[`c976cb5be5`](https://github.com/nodejs/node/commit/c976cb5be5)] - **http2**: no stream destroy while its data is on the wire (Anna Henningsen) [#19002](https://github.com/nodejs/node/pull/19002) +* [[`bfd7d6d0de`](https://github.com/nodejs/node/commit/bfd7d6d0de)] - **http2**: fix flaky test-http2-https-fallback (Matteo Collina) [#19093](https://github.com/nodejs/node/pull/19093) +* [[`b75897f982`](https://github.com/nodejs/node/commit/b75897f982)] - **http2**: fix endless loop when writing empty string (Anna Henningsen) [#18924](https://github.com/nodejs/node/pull/18924) +* [[`7e4a9c9fe2`](https://github.com/nodejs/node/commit/7e4a9c9fe2)] - **http2**: use original error for cancelling pending streams (Anna Henningsen) [#18988](https://github.com/nodejs/node/pull/18988) +* [[`2a04f57444`](https://github.com/nodejs/node/commit/2a04f57444)] - **http2**: send error text in case of ALPN mismatch (Anna Henningsen) [#18986](https://github.com/nodejs/node/pull/18986) +* [[`f366373ad8`](https://github.com/nodejs/node/commit/f366373ad8)] - **http2**: fix condition where data is lost (Matteo Collina) [#18895](https://github.com/nodejs/node/pull/18895) +* [[`20fb59fdc4`](https://github.com/nodejs/node/commit/20fb59fdc4)] - **http2**: use `\_final` instead of `on('finish')` (Anna Henningsen) [#18609](https://github.com/nodejs/node/pull/18609) +* [[`ac64b4f6a7`](https://github.com/nodejs/node/commit/ac64b4f6a7)] - **http2**: add checks for server close callback (James M Snell) [#18182](https://github.com/nodejs/node/pull/18182) +* [[`8b0a1b32de`](https://github.com/nodejs/node/commit/8b0a1b32de)] - **http2**: refactor read mechanism (Anna Henningsen) [#18030](https://github.com/nodejs/node/pull/18030) +* [[`a4d910c644`](https://github.com/nodejs/node/commit/a4d910c644)] - **http2**: remember sent headers (James M Snell) [#18045](https://github.com/nodejs/node/pull/18045) +* [[`3cd205431b`](https://github.com/nodejs/node/commit/3cd205431b)] - **http2**: use aliased buffer for perf stats, add stats (James M Snell) [#18020](https://github.com/nodejs/node/pull/18020) +* [[`46d1b331e0`](https://github.com/nodejs/node/commit/46d1b331e0)] - **http2**: verify flood error and unsolicited frames (James M Snell) [#17969](https://github.com/nodejs/node/pull/17969) +* [[`a85518ed22`](https://github.com/nodejs/node/commit/a85518ed22)] - **http2**: verify that a dependency cycle may exist (James M Snell) [#17968](https://github.com/nodejs/node/pull/17968) +* [[`9c85ada4e3`](https://github.com/nodejs/node/commit/9c85ada4e3)] - **http2**: implement maxSessionMemory (James M Snell) [#17967](https://github.com/nodejs/node/pull/17967) +* [[`9a6ea7eb02`](https://github.com/nodejs/node/commit/9a6ea7eb02)] - **http2**: properly handle already closed stream error (James M Snell) [#17942](https://github.com/nodejs/node/pull/17942) +* [[`0078a97793`](https://github.com/nodejs/node/commit/0078a97793)] - **http2**: add aligned padding strategy (James M Snell) [#17938](https://github.com/nodejs/node/pull/17938) +* [[`1c313e09d6`](https://github.com/nodejs/node/commit/1c313e09d6)] - **http2**: add initial support for originSet (James M Snell) [#17935](https://github.com/nodejs/node/pull/17935) +* [[`1a24feccb5`](https://github.com/nodejs/node/commit/1a24feccb5)] - **http2**: add altsvc support (James M Snell) [#17917](https://github.com/nodejs/node/pull/17917) +* [[`c915bc54d4`](https://github.com/nodejs/node/commit/c915bc54d4)] - **http2**: strictly limit number on concurrent streams (James M Snell) [#16766](https://github.com/nodejs/node/pull/16766) +* [[`dcc7f4d84c`](https://github.com/nodejs/node/commit/dcc7f4d84c)] - **http2**: perf\_hooks integration (James M Snell) [#17906](https://github.com/nodejs/node/pull/17906) +* [[`72b42de33a`](https://github.com/nodejs/node/commit/72b42de33a)] - **http2**: implement ref() and unref() on client sessions (Kelvin Jin) [#17620](https://github.com/nodejs/node/pull/17620) +* [[`55f6bdb698`](https://github.com/nodejs/node/commit/55f6bdb698)] - **http2**: keep session objects alive during Http2Scope (Anna Henningsen) [#17863](https://github.com/nodejs/node/pull/17863) +* [[`c61a54ec3d`](https://github.com/nodejs/node/commit/c61a54ec3d)] - **http2**: fix compiling with `--debug-http2` (Anna Henningsen) [#17863](https://github.com/nodejs/node/pull/17863) +* [[`04632214c1`](https://github.com/nodejs/node/commit/04632214c1)] - **http2**: convert Http2Settings to an AsyncWrap (James M Snell) [#17763](https://github.com/nodejs/node/pull/17763) +* [[`ea98fd573e`](https://github.com/nodejs/node/commit/ea98fd573e)] - **http2**: refactor outgoing write mechanism (Anna Henningsen) [#17718](https://github.com/nodejs/node/pull/17718) +* [[`05b823d4ad`](https://github.com/nodejs/node/commit/05b823d4ad)] - **http2**: remove redundant write indirection (Anna Henningsen) [#17718](https://github.com/nodejs/node/pull/17718) +* [[`fc40b7de46`](https://github.com/nodejs/node/commit/fc40b7de46)] - **http2**: cleanup Http2Stream/Http2Session destroy (James M Snell) [#17406](https://github.com/nodejs/node/pull/17406) +* [[`1d65f2b879`](https://github.com/nodejs/node/commit/1d65f2b879)] - **http2**: be sure to destroy the Http2Stream (James M Snell) [#17406](https://github.com/nodejs/node/pull/17406) +* [[`8431b4297c`](https://github.com/nodejs/node/commit/8431b4297c)] - **http2**: only schedule write when necessary (Anna Henningsen) [#17183](https://github.com/nodejs/node/pull/17183) +* [[`38cfb707bd`](https://github.com/nodejs/node/commit/38cfb707bd)] - **http2**: don't call into JS from GC (Anna Henningsen) [#17183](https://github.com/nodejs/node/pull/17183) +* [[`a1539e5731`](https://github.com/nodejs/node/commit/a1539e5731)] - **http2**: simplify onSelectPadding (Anna Henningsen) [#17717](https://github.com/nodejs/node/pull/17717) +* [[`9a4bac2081`](https://github.com/nodejs/node/commit/9a4bac2081)] - **http2,perf_hooks**: perf state using AliasedBuffer (Kyle Farnung) [#18300](https://github.com/nodejs/node/pull/18300) +* [[`9129bc4fde`](https://github.com/nodejs/node/commit/9129bc4fde)] - **lib**: set process.execPath on OpenBSD (Aaron Bieber) [#18543](https://github.com/nodejs/node/pull/18543) +* [[`2019b023a7`](https://github.com/nodejs/node/commit/2019b023a7)] - **lib**: refactor ES module loader for readability (Anna Henningsen) [#16579](https://github.com/nodejs/node/pull/16579) +* [[`3df0570c90`](https://github.com/nodejs/node/commit/3df0570c90)] - **lib**: fix spelling in comments (Tobias Nießen) [#18018](https://github.com/nodejs/node/pull/18018) +* [[`20844d1716`](https://github.com/nodejs/node/commit/20844d1716)] - **lib**: remove debugger dead code (Qingyan Li) [#18426](https://github.com/nodejs/node/pull/18426) +* [[`07a6770614`](https://github.com/nodejs/node/commit/07a6770614)] - **n-api**: add more `int64\_t` tests (Kyle Farnung) [#19402](https://github.com/nodejs/node/pull/19402) +* [[`8b3ef4660a`](https://github.com/nodejs/node/commit/8b3ef4660a)] - **n-api**: back up env before finalize (Gabriel Schulhof) [#19718](https://github.com/nodejs/node/pull/19718) +* [[`92f699e021`](https://github.com/nodejs/node/commit/92f699e021)] - **n-api**: ensure in-module exceptions are propagated (Gabriel Schulhof) [#19537](https://github.com/nodejs/node/pull/19537) +* [[`367113f5d7`](https://github.com/nodejs/node/commit/367113f5d7)] - **n-api**: bump version of n-api supported (Michael Dawson) [#19497](https://github.com/nodejs/node/pull/19497) +* [[`24b8bb6708`](https://github.com/nodejs/node/commit/24b8bb6708)] - **n-api**: re-write test\_make\_callback (Gabriel Schulhof) [#19448](https://github.com/nodejs/node/pull/19448) +* [[`3a6b7e610d`](https://github.com/nodejs/node/commit/3a6b7e610d)] - **n-api**: add napi\_fatal\_exception (Mathias Buus) [#19337](https://github.com/nodejs/node/pull/19337) +* [[`9949d55ae9`](https://github.com/nodejs/node/commit/9949d55ae9)] - **n-api**: separate out async\_hooks test (Gabriel Schulhof) [#19392](https://github.com/nodejs/node/pull/19392) +* [[`f29d8e0e8d`](https://github.com/nodejs/node/commit/f29d8e0e8d)] - **n-api**: add missing exception checking (Michael Dawson) [#19362](https://github.com/nodejs/node/pull/19362) +* [[`faf94b1c49`](https://github.com/nodejs/node/commit/faf94b1c49)] - **n-api**: resolve promise in test (Gabriel Schulhof) [#19245](https://github.com/nodejs/node/pull/19245) +* [[`df63adf7aa`](https://github.com/nodejs/node/commit/df63adf7aa)] - **n-api**: update documentation (Gabriel Schulhof) [#19078](https://github.com/nodejs/node/pull/19078) +* [[`b26410e86f`](https://github.com/nodejs/node/commit/b26410e86f)] - **n-api**: update reference test (Gabriel Schulhof) [#19086](https://github.com/nodejs/node/pull/19086) +* [[`cb3f90a1a9`](https://github.com/nodejs/node/commit/cb3f90a1a9)] - **n-api**: fix object test (Gabriel Schulhof) [#19039](https://github.com/nodejs/node/pull/19039) +* [[`9244e1d234`](https://github.com/nodejs/node/commit/9244e1d234)] - **n-api**: remove extra reference from test (Gabriel Schulhof) [#18542](https://github.com/nodejs/node/pull/18542) +* [[`927fc0b19f`](https://github.com/nodejs/node/commit/927fc0b19f)] - **n-api**: add methods to open/close callback scope (Michael Dawson) [#18089](https://github.com/nodejs/node/pull/18089) +* [[`969a520990`](https://github.com/nodejs/node/commit/969a520990)] - **n-api**: wrap control flow macro in do/while (Ben Noordhuis) [#18532](https://github.com/nodejs/node/pull/18532) +* [[`d89f5937eb`](https://github.com/nodejs/node/commit/d89f5937eb)] - **n-api**: implement wrapping using private properties (Gabriel Schulhof) [#18311](https://github.com/nodejs/node/pull/18311) +* [[`af655f586c`](https://github.com/nodejs/node/commit/af655f586c)] - **n-api**: change assert ok check to notStrictEqual. (Aaron Kau) [#18414](https://github.com/nodejs/node/pull/18414) +* [[`ca10fda064`](https://github.com/nodejs/node/commit/ca10fda064)] - **n-api**: throw RangeError napi\_create\_typedarray() (Jinho Bang) [#18037](https://github.com/nodejs/node/pull/18037) +* [[`853b4d593c`](https://github.com/nodejs/node/commit/853b4d593c)] - **n-api**: expose n-api version in process.versions (Michael Dawson) [#18067](https://github.com/nodejs/node/pull/18067) +* [[`48be8a4793`](https://github.com/nodejs/node/commit/48be8a4793)] - **n-api**: throw RangeError in napi\_create\_dataview() with invalid range (Jinho Bang) [#17869](https://github.com/nodejs/node/pull/17869) +* [[`a744535f99`](https://github.com/nodejs/node/commit/a744535f99)] - **n-api**: fix memory leak in napi\_async\_destroy() (alnyan) [#17714](https://github.com/nodejs/node/pull/17714) +* [[`584fadc605`](https://github.com/nodejs/node/commit/584fadc605)] - **n-api,test**: add int64 bounds tests (Kyle Farnung) [#19309](https://github.com/nodejs/node/pull/19309) +* [[`4c1181dc02`](https://github.com/nodejs/node/commit/4c1181dc02)] - **n-api,test**: add a new.target test to addons-napi (Taylor Woll) [#19236](https://github.com/nodejs/node/pull/19236) +* [[`3225601ffc`](https://github.com/nodejs/node/commit/3225601ffc)] - **net**: remove Socket.prototoype.read (Anna Henningsen) [#18568](https://github.com/nodejs/node/pull/18568) +* [[`35aaee1059`](https://github.com/nodejs/node/commit/35aaee1059)] - **net**: remove redundant code from \_writeGeneric() (Luigi Pinca) [#18429](https://github.com/nodejs/node/pull/18429) +* [[`54442efcd2`](https://github.com/nodejs/node/commit/54442efcd2)] - **perf_hooks**: refactor internals (James M Snell) [#17822](https://github.com/nodejs/node/pull/17822) +* [[`6bdfb1f8f0`](https://github.com/nodejs/node/commit/6bdfb1f8f0)] - **perf_hooks,http2**: add performance.clear() (James M Snell) [#18046](https://github.com/nodejs/node/pull/18046) +* [[`1faae90b74`](https://github.com/nodejs/node/commit/1faae90b74)] - **readline**: use Date.now() and move test to parallel (Anatoli Papirovski) [#18563](https://github.com/nodejs/node/pull/18563) +* [[`965b56a34e`](https://github.com/nodejs/node/commit/965b56a34e)] - **readline**: update references to archived repository (Tobias Nießen) [#17924](https://github.com/nodejs/node/pull/17924) +* [[`801a49935b`](https://github.com/nodejs/node/commit/801a49935b)] - **src**: add nullptr check for session in DEBUG macro (Daniel Bevenius) [#18815](https://github.com/nodejs/node/pull/18815) +* [[`4e807d648e`](https://github.com/nodejs/node/commit/4e807d648e)] - **src**: introduce internal buffer slice constructor (Anna Henningsen) [#18030](https://github.com/nodejs/node/pull/18030) +* [[`0b828e5125`](https://github.com/nodejs/node/commit/0b828e5125)] - **src**: remove declarations for missing functions (Anna Henningsen) [#18134](https://github.com/nodejs/node/pull/18134) +* [[`3766e04f31`](https://github.com/nodejs/node/commit/3766e04f31)] - **src**: silence http2 -Wunused-result warnings (cjihrig) [#17954](https://github.com/nodejs/node/pull/17954) +* [[`2b7732788a`](https://github.com/nodejs/node/commit/2b7732788a)] - **src**: add optional keep-alive object to SetImmediate (Anna Henningsen) [#17183](https://github.com/nodejs/node/pull/17183) +* [[`f3e082c4ea`](https://github.com/nodejs/node/commit/f3e082c4ea)] - **src**: replace SetAccessor w/ SetAccessorProperty (Jure Triglav) [#17665](https://github.com/nodejs/node/pull/17665) +* [[`45e28a8628`](https://github.com/nodejs/node/commit/45e28a8628)] - **src**: minor refactoring to StreamBase writes (Anna Henningsen) [#17564](https://github.com/nodejs/node/pull/17564) +* [[`42b4f3ce0b`](https://github.com/nodejs/node/commit/42b4f3ce0b)] - **src**: fix abort when taking a heap snapshot (Ben Noordhuis) [#18898](https://github.com/nodejs/node/pull/18898) +* [[`b48ca0a140`](https://github.com/nodejs/node/commit/b48ca0a140)] - **src**: fix crypto.pbkdf2 callback error argument (BufoViridis) [#18458](https://github.com/nodejs/node/pull/18458) +* [[`973488b77b`](https://github.com/nodejs/node/commit/973488b77b)] - **src**: replace var for let / const. (alejandro estrada) [#18649](https://github.com/nodejs/node/pull/18649) +* [[`9ac91b14de`](https://github.com/nodejs/node/commit/9ac91b14de)] - **src**: fix util abort (Ruben Bridgewater) [#19224](https://github.com/nodejs/node/pull/19224) +* [[`c0f40be23b`](https://github.com/nodejs/node/commit/c0f40be23b)] - **src**: free memory before re-setting URLHost value (Ivan Filenko) [#18357](https://github.com/nodejs/node/pull/18357) +* [[`5e4f9b37ba`](https://github.com/nodejs/node/commit/5e4f9b37ba)] - **src,doc,test**: Fix common misspellings (Roman Reiss) [#18151](https://github.com/nodejs/node/pull/18151) +* [[`10231a9e44`](https://github.com/nodejs/node/commit/10231a9e44)] - **stream**: cleanup() when unpiping all streams. (陈刚) [#18266](https://github.com/nodejs/node/pull/18266) +* [[`bf523822ba`](https://github.com/nodejs/node/commit/bf523822ba)] - **stream**: simplify `src.\_readableState` to `state` (陈刚) [#18264](https://github.com/nodejs/node/pull/18264) +* [[`37e594ed4a`](https://github.com/nodejs/node/commit/37e594ed4a)] - **stream**: remove unreachable code (Luigi Pinca) [#18239](https://github.com/nodejs/node/pull/18239) +* [[`f96b0bf494`](https://github.com/nodejs/node/commit/f96b0bf494)] - **string_decoder**: reset decoder on end (Justin Ridgewell) [#18494](https://github.com/nodejs/node/pull/18494) +* [[`4dbdb8ae4e`](https://github.com/nodejs/node/commit/4dbdb8ae4e)] - **test**: http2 errors on req.close() (Trivikram) [#18854](https://github.com/nodejs/node/pull/18854) +* [[`83d8ad351c`](https://github.com/nodejs/node/commit/83d8ad351c)] - **test**: http2 stream.respond() error checks (Trivikram) [#18861](https://github.com/nodejs/node/pull/18861) +* [[`b0664426f5`](https://github.com/nodejs/node/commit/b0664426f5)] - **test**: check endless loop while writing empty string (XadillaX) [#18924](https://github.com/nodejs/node/pull/18924) +* [[`7eba62e028`](https://github.com/nodejs/node/commit/7eba62e028)] - **test**: make test-tls-external-accessor agnostic (Rich Trott) [#16272](https://github.com/nodejs/node/pull/16272) +* [[`9e68947ff4`](https://github.com/nodejs/node/commit/9e68947ff4)] - **test**: add hasCrypto when using binding('crypto') (Daniel Bevenius) [#17867](https://github.com/nodejs/node/pull/17867) +* [[`6129ff4b99`](https://github.com/nodejs/node/commit/6129ff4b99)] - **test**: remove unnecessary timer (cjihrig) [#18719](https://github.com/nodejs/node/pull/18719) +* [[`2838f9b150`](https://github.com/nodejs/node/commit/2838f9b150)] - **test**: convert new tests to use error types (Jack Horton) [#18581](https://github.com/nodejs/node/pull/18581) +* [[`5c0983e5a2`](https://github.com/nodejs/node/commit/5c0983e5a2)] - **test**: improve error message output (Bhavani Shankar) [#18498](https://github.com/nodejs/node/pull/18498) +* [[`bebcdfe382`](https://github.com/nodejs/node/commit/bebcdfe382)] - **test**: show pending exception error in napi tests (Ben Wilcox) [#18413](https://github.com/nodejs/node/pull/18413) +* [[`5b1b74c5a5`](https://github.com/nodejs/node/commit/5b1b74c5a5)] - **test**: refactor addons-napi/test\_exception/test.js (Rich Trott) [#18340](https://github.com/nodejs/node/pull/18340) +* [[`8cfa87832d`](https://github.com/nodejs/node/commit/8cfa87832d)] - **test**: fixed typos in napi test (furstenheim) [#18148](https://github.com/nodejs/node/pull/18148) +* [[`ad8c079af7`](https://github.com/nodejs/node/commit/ad8c079af7)] - **test**: remove ambiguous error messages from test\_error (Nicholas Drane) [#17812](https://github.com/nodejs/node/pull/17812) +* [[`2e100c82be`](https://github.com/nodejs/node/commit/2e100c82be)] - **test**: remove literals that obscure assert messages (Rich Trott) [#17642](https://github.com/nodejs/node/pull/17642) +* [[`077e1870ae`](https://github.com/nodejs/node/commit/077e1870ae)] - **test**: add unhandled rejection guard (babygoat) [#17275](https://github.com/nodejs/node/pull/17275) +* [[`9236332cc3`](https://github.com/nodejs/node/commit/9236332cc3)] - **test**: update a few tests to work on OpenBSD (Aaron Bieber) [#18543](https://github.com/nodejs/node/pull/18543) +* [[`cbd698a521`](https://github.com/nodejs/node/commit/cbd698a521)] - **test**: refactor test-http-abort-before-end (cjihrig) [#18508](https://github.com/nodejs/node/pull/18508) +* [[`ab8edc9d48`](https://github.com/nodejs/node/commit/ab8edc9d48)] - **test**: fix flaky timers-block-eventloop test (Anatoli Papirovski) [#18567](https://github.com/nodejs/node/pull/18567) +* [[`53b702fdba`](https://github.com/nodejs/node/commit/53b702fdba)] - **test**: remove common.PORT from parallel tests (Rich Trott) [#17410](https://github.com/nodejs/node/pull/17410) +* [[`da162278de`](https://github.com/nodejs/node/commit/da162278de)] - **test**: mock the lookup function in parallel tests (Joyee Cheung) [#17296](https://github.com/nodejs/node/pull/17296) +* [[`34af49401b`](https://github.com/nodejs/node/commit/34af49401b)] - **test**: add common.dns.errorLookupMock (Joyee Cheung) [#17296](https://github.com/nodejs/node/pull/17296) +* [[`bff7258535`](https://github.com/nodejs/node/commit/bff7258535)] - **test**: do not check TXT content in test-dns-any (Joyee Cheung) [#18547](https://github.com/nodejs/node/pull/18547) +* [[`daeb6de8ec`](https://github.com/nodejs/node/commit/daeb6de8ec)] - **test**: use internet.addresses in internet tests (Joyee Cheung) [#16390](https://github.com/nodejs/node/pull/16390) +* [[`7813a0de0a`](https://github.com/nodejs/node/commit/7813a0de0a)] - **test**: introduce test/common/internet.addresses (Joyee Cheung) [#16390](https://github.com/nodejs/node/pull/16390) +* [[`745600a0b3`](https://github.com/nodejs/node/commit/745600a0b3)] - **test**: remove orphaned entries from status (Kyle Farnung) [#18092](https://github.com/nodejs/node/pull/18092) +* [[`e42ab10957`](https://github.com/nodejs/node/commit/e42ab10957)] - **test**: add assertions for TextEncoder/Decoder (Sho Miyamoto) [#18132](https://github.com/nodejs/node/pull/18132) +* [[`a1b0d5da07`](https://github.com/nodejs/node/commit/a1b0d5da07)] - **test**: move tmpdir to submodule of common (Rich Trott) [#17856](https://github.com/nodejs/node/pull/17856) +* [[`5155d8e7c3`](https://github.com/nodejs/node/commit/5155d8e7c3)] - **test**: update references to archived repository (Tobias Nießen) [#17924](https://github.com/nodejs/node/pull/17924) +* [[`1043b6fd7c`](https://github.com/nodejs/node/commit/1043b6fd7c)] - **test**: fix spelling in test case comments (Tobias Nießen) [#18018](https://github.com/nodejs/node/pull/18018) +* [[`fdb4dbc04b`](https://github.com/nodejs/node/commit/fdb4dbc04b)] - **test**: remove destructor from node\_test\_fixture (Daniel Bevenius) [#18524](https://github.com/nodejs/node/pull/18524) +* [[`e55e08c80e`](https://github.com/nodejs/node/commit/e55e08c80e)] - **test**: verify the shell option works properly on execFile (jvelezpo) [#18384](https://github.com/nodejs/node/pull/18384) +* [[`0cf9d0483f`](https://github.com/nodejs/node/commit/0cf9d0483f)] - **test**: add test for tls benchmarks (Anatoli Papirovski) [#18489](https://github.com/nodejs/node/pull/18489) +* [[`d630874250`](https://github.com/nodejs/node/commit/d630874250)] - **test**: speed up parallel/test-tls-session-cache (Anna Henningsen) [#18424](https://github.com/nodejs/node/pull/18424) +* [[`a1fb263880`](https://github.com/nodejs/node/commit/a1fb263880)] - **test**: fix flaky test-http-dns-error (Bryan English) [#16534](https://github.com/nodejs/node/pull/16534) +* [[`6f3fb46541`](https://github.com/nodejs/node/commit/6f3fb46541)] - **test**: use correct size in test-stream-buffer-list (Luigi Pinca) [#18239](https://github.com/nodejs/node/pull/18239) +* [[`a52f15efae`](https://github.com/nodejs/node/commit/a52f15efae)] - **timers**: fix a bug in error handling (Anatoli Papirovski) [#20497](https://github.com/nodejs/node/pull/20497) +* [[`e15f57745d`](https://github.com/nodejs/node/commit/e15f57745d)] - **timers**: allow Immediates to be unrefed (Anatoli Papirovski) [#18139](https://github.com/nodejs/node/pull/18139) +* [[`95c1e2d606`](https://github.com/nodejs/node/commit/95c1e2d606)] - **tls**: set servername on client side too (James M Snell) [#17935](https://github.com/nodejs/node/pull/17935) +* [[`d4bccccf23`](https://github.com/nodejs/node/commit/d4bccccf23)] - **tools**: add fixer for prefer-assert-iferror.js (Shobhit Chittora) [#16648](https://github.com/nodejs/node/pull/16648) +* [[`016a28ac08`](https://github.com/nodejs/node/commit/016a28ac08)] - **tools**: non-Ascii linter for /lib only (Sarat Addepalli) [#18043](https://github.com/nodejs/node/pull/18043) +* [[`a0a45fc3b6`](https://github.com/nodejs/node/commit/a0a45fc3b6)] - **tools**: add .mjs linting for Windows (Vse Mozhet Byt) [#18569](https://github.com/nodejs/node/pull/18569) +* [[`e0d2842b29`](https://github.com/nodejs/node/commit/e0d2842b29)] - **tools**: add check for using process.binding crypto (Daniel Bevenius) [#17867](https://github.com/nodejs/node/pull/17867) +* [[`a8b5a96d15`](https://github.com/nodejs/node/commit/a8b5a96d15)] - **tools**: auto fix custom eslint rule (Shobhit Chittora) [#16652](https://github.com/nodejs/node/pull/16652) +* [[`5d03c8219a`](https://github.com/nodejs/node/commit/5d03c8219a)] - **url**: simplify loop in parser (Tobias Nießen) [#18468](https://github.com/nodejs/node/pull/18468) +* [[`e0e0ef7bab`](https://github.com/nodejs/node/commit/e0e0ef7bab)] - **util**: escaping object keys in util.inspect() (buji) [#16986](https://github.com/nodejs/node/pull/16986) +* [[`8ac69c457b`](https://github.com/nodejs/node/commit/8ac69c457b)] - **v8**: add missing ',' in OpenBSD's 'sources' section. (Aaron Bieber) [#18448](https://github.com/nodejs/node/pull/18448) +* [[`c61754fad9`](https://github.com/nodejs/node/commit/c61754fad9)] - **win, build**: fix intl-none option (Birunthan Mohanathas) [#18292](https://github.com/nodejs/node/pull/18292) + ## 2018-03-29, Version 8.11.1 'Carbon' (LTS), @MylesBorins diff --git a/doc/guides/contributing/pull-requests.md b/doc/guides/contributing/pull-requests.md index 5812c8c54645e2..3d7c548bbd3022 100644 --- a/doc/guides/contributing/pull-requests.md +++ b/doc/guides/contributing/pull-requests.md @@ -109,12 +109,12 @@ If you are modifying code, please be sure to run `make lint` from time to time to ensure that the changes follow the Node.js code style guide. Any documentation you write (including code comments and API documentation) -should follow the [Style Guide](doc/STYLE_GUIDE.md). Code samples included +should follow the [Style Guide](../../STYLE_GUIDE.md). Code samples included in the API docs will also be checked when running `make lint` (or `vcbuild.bat lint` on Windows). For contributing C++ code, you may want to look at the -[C++ Style Guide](CPP_STYLE_GUIDE.md). +[C++ Style Guide](../../../CPP_STYLE_GUIDE.md). ### Step 4: Commit diff --git a/doc/guides/writing-and-running-benchmarks.md b/doc/guides/writing-and-running-benchmarks.md index 75435daf394898..740829fe1a51af 100644 --- a/doc/guides/writing-and-running-benchmarks.md +++ b/doc/guides/writing-and-running-benchmarks.md @@ -183,6 +183,17 @@ The `compare.js` tool will then produce a csv file with the benchmark results. $ node benchmark/compare.js --old ./node-master --new ./node-pr-5134 string_decoder > compare-pr-5134.csv ``` +*Tips: there are some useful options of `benchmark/compare.js`. For example, if you want to compare the benchmark of a single script instead of a whole module, you can use the `--filter` option:* + +```console + --new ./new-node-binary new node binary (required) + --old ./old-node-binary old node binary (required) + --runs 30 number of samples + --filter pattern string to filter benchmark scripts + --set variable=value set benchmark variable (can be repeated) + --no-progress don't show benchmark progress indicator +``` + For analysing the benchmark results use the `compare.R` tool. ```console diff --git a/doc/node.1 b/doc/node.1 index 3dfadc699c8749..50b8668007fdae 100644 --- a/doc/node.1 +++ b/doc/node.1 @@ -37,9 +37,9 @@ node \- Server-side JavaScript runtime .RI [ script.js \ | .B -e .RI \&" script \&" -.R | +.RI | .B - -.R ] +.RI ] .B [--] .RI [ arguments ] .br diff --git a/lib/.eslintrc.yaml b/lib/.eslintrc.yaml index 437aa575645ad6..0b00638e2a638c 100644 --- a/lib/.eslintrc.yaml +++ b/lib/.eslintrc.yaml @@ -6,3 +6,4 @@ rules: buffer-constructor: error no-let-in-for-declaration: error lowercase-name-for-primitive: error + non-ascii-character: error diff --git a/lib/_http_agent.js b/lib/_http_agent.js index 5f1e56caeab981..7586a48680bb6a 100644 --- a/lib/_http_agent.js +++ b/lib/_http_agent.js @@ -277,6 +277,7 @@ function installListeners(agent, s, options) { s.removeListener('close', onClose); s.removeListener('free', onFree); s.removeListener('agentRemove', onRemove); + s._httpMessage = null; } s.on('agentRemove', onRemove); } diff --git a/lib/_http_client.js b/lib/_http_client.js index 0170b2c9b0e6fb..dfab2fce131122 100644 --- a/lib/_http_client.js +++ b/lib/_http_client.js @@ -37,7 +37,7 @@ const { OutgoingMessage } = require('_http_outgoing'); const Agent = require('_http_agent'); const { Buffer } = require('buffer'); const { urlToOptions, searchParamsSymbol } = require('internal/url'); -const { outHeadersKey } = require('internal/http'); +const { outHeadersKey, ondrain } = require('internal/http'); const { nextTick } = require('internal/process/next_tick'); // The actual list of disallowed characters in regexp form is more like: @@ -452,7 +452,9 @@ function socketOnData(d) { socket.removeListener('data', socketOnData); socket.removeListener('end', socketOnEnd); + socket.removeListener('drain', ondrain); parser.finish(); + freeParser(parser, req, socket); var bodyHead = d.slice(bytesParsed, d.length); @@ -475,7 +477,6 @@ function socketOnData(d) { // Got Upgrade header or CONNECT method, but have no handler. socket.destroy(); } - freeParser(parser, req, socket); } else if (parser.incoming && parser.incoming.complete && // When the status code is 100 (Continue), the server will // send a final response after this client sends a request @@ -493,7 +494,6 @@ function parserOnIncomingClient(res, shouldKeepAlive) { var socket = this.socket; var req = socket._httpMessage; - // propagate "domain" setting... if (req.domain && !res.domain) { debug('setting "res.domain"'); @@ -506,29 +506,22 @@ function parserOnIncomingClient(res, shouldKeepAlive) { // We already have a response object, this means the server // sent a double response. socket.destroy(); - return; + return 0; // No special treatment. } req.res = res; // Responses to CONNECT request is handled as Upgrade. - if (req.method === 'CONNECT') { + const method = req.method; + if (method === 'CONNECT') { res.upgrade = true; - return 2; // skip body, and the rest + return 2; // Skip body and treat as Upgrade. } - // Responses to HEAD requests are crazy. - // HEAD responses aren't allowed to have an entity-body - // but *can* have a content-length which actually corresponds - // to the content-length of the entity-body had the request - // been a GET. - var isHeadResponse = req.method === 'HEAD'; - debug('AGENT isHeadResponse', isHeadResponse); - if (res.statusCode === 100) { // restart the parser, as this is a continue message. req.res = null; // Clear res so that we don't hit double-responses. req.emit('continue'); - return true; + return 1; // Skip body but don't treat as Upgrade. } if (req.shouldKeepAlive && !shouldKeepAlive && !req.upgradeOrConnect) { @@ -538,7 +531,6 @@ function parserOnIncomingClient(res, shouldKeepAlive) { req.shouldKeepAlive = false; } - DTRACE_HTTP_CLIENT_RESPONSE(socket, req); LTTNG_HTTP_CLIENT_RESPONSE(socket, req); COUNTER_HTTP_CLIENT_RESPONSE(); @@ -556,7 +548,10 @@ function parserOnIncomingClient(res, shouldKeepAlive) { if (!handled) res._dump(); - return isHeadResponse; + if (method === 'HEAD') + return 1; // Skip body but don't treat as Upgrade. + + return 0; // No special treatment. } // client diff --git a/lib/_http_common.js b/lib/_http_common.js index ad0dec520d1210..381ffeb807a84e 100644 --- a/lib/_http_common.js +++ b/lib/_http_common.js @@ -106,19 +106,10 @@ function parserOnHeadersComplete(versionMajor, versionMinor, headers, method, parser.incoming.upgrade = upgrade; - var skipBody = 0; // response to HEAD or CONNECT + if (upgrade) + return 2; // Skip body and treat as Upgrade. - if (!upgrade) { - // For upgraded connections and CONNECT method request, we'll emit this - // after parser.execute so that we can capture the first part of the new - // protocol. - skipBody = parser.onIncoming(parser.incoming, shouldKeepAlive); - } - - if (typeof skipBody !== 'number') - return skipBody ? 1 : 0; - else - return skipBody; + return parser.onIncoming(parser.incoming, shouldKeepAlive); } // XXX This is a mess. diff --git a/lib/_http_server.js b/lib/_http_server.js index 32c39e6160e5a9..be591c437ca083 100644 --- a/lib/_http_server.js +++ b/lib/_http_server.js @@ -618,7 +618,7 @@ function parserOnIncoming(server, socket, state, req, keepAlive) { } else { server.emit('request', req, res); } - return false; // Not a HEAD response. (Not even a response!) + return 0; // No special treatment. } function resetSocketTimeout(server, socket, state) { diff --git a/lib/_stream_readable.js b/lib/_stream_readable.js index a75f3fa74cba22..cd8669830a5368 100644 --- a/lib/_stream_readable.js +++ b/lib/_stream_readable.js @@ -645,8 +645,8 @@ Readable.prototype.pipe = function(dest, pipeOpts) { if (((state.pipesCount === 1 && state.pipes === dest) || (state.pipesCount > 1 && state.pipes.indexOf(dest) !== -1)) && !cleanedUp) { - debug('false write response, pause', src._readableState.awaitDrain); - src._readableState.awaitDrain++; + debug('false write response, pause', state.awaitDrain); + state.awaitDrain++; increasedAwaitDrain = true; } src.pause(); @@ -747,7 +747,7 @@ Readable.prototype.unpipe = function(dest) { state.flowing = false; for (var i = 0; i < len; i++) - dests[i].emit('unpipe', this, unpipeInfo); + dests[i].emit('unpipe', this, { hasUnpiped: false }); return this; } diff --git a/lib/_tls_wrap.js b/lib/_tls_wrap.js index 598bfaa5ae8c3e..4bd8ecbf48437d 100644 --- a/lib/_tls_wrap.js +++ b/lib/_tls_wrap.js @@ -628,7 +628,7 @@ TLSSocket.prototype._finishInit = function() { this.alpnProtocol = this.ssl.getALPNNegotiatedProtocol(); } - if (process.features.tls_sni && this._tlsOptions.isServer) { + if (process.features.tls_sni) { this.servername = this._handle.getServername(); } diff --git a/lib/console.js b/lib/console.js index d0f7e61fd5a709..4495074231a2eb 100644 --- a/lib/console.js +++ b/lib/console.js @@ -81,7 +81,7 @@ function createWriteErrorHandler(stream) { // If there was an error, it will be emitted on `stream` as // an `error` event. Adding a `once` listener will keep that error // from becoming an uncaught exception, but since the handler is - // removed after the event, non-console.* writes won’t be affected. + // removed after the event, non-console.* writes won't be affected. // we are only adding noop if there is no one else listening for 'error' if (stream.listenerCount('error') === 0) { stream.on('error', noop); @@ -114,7 +114,7 @@ function write(ignoreErrors, stream, string, errorhandler, groupIndent) { // even in edge cases such as low stack space. if (e.message === 'Maximum call stack size exceeded') throw e; - // Sorry, there’s no proper way to pass along the error here. + // Sorry, there's no proper way to pass along the error here. } finally { stream.removeListener('error', noop); } diff --git a/lib/fs.js b/lib/fs.js index bfd085b4213881..b5565201e2abdd 100644 --- a/lib/fs.js +++ b/lib/fs.js @@ -1950,8 +1950,7 @@ function ReadStream(path, options) { this.flags = options.flags === undefined ? 'r' : options.flags; this.mode = options.mode === undefined ? 0o666 : options.mode; - this.start = typeof this.fd !== 'number' && options.start === undefined ? - 0 : options.start; + this.start = options.start; this.end = options.end; this.autoClose = options.autoClose === undefined ? true : options.autoClose; this.pos = undefined; @@ -1974,6 +1973,12 @@ function ReadStream(path, options) { this.pos = this.start; } + // Backwards compatibility: Make sure `end` is a number regardless of `start`. + // TODO(addaleax): Make the above typecheck not depend on `start` instead. + // (That is a semver-major change). + if (typeof this.end !== 'number') + this.end = Infinity; + if (typeof this.fd !== 'number') this.open(); @@ -2028,6 +2033,8 @@ ReadStream.prototype._read = function(n) { if (this.pos !== undefined) toRead = Math.min(this.end - this.pos + 1, toRead); + else + toRead = Math.min(this.end - this.bytesRead + 1, toRead); // already read everything we were supposed to read! // treat as EOF. diff --git a/lib/internal/async_hooks.js b/lib/internal/async_hooks.js index 06f6b06f093804..e14a0000640e7d 100644 --- a/lib/internal/async_hooks.js +++ b/lib/internal/async_hooks.js @@ -11,16 +11,17 @@ const async_wrap = process.binding('async_wrap'); * the various asynchronous states of the application. These are: * kExecutionAsyncId: The async_id assigned to the resource responsible for the * current execution stack. - * kTriggerAsyncId: The trigger_async_id of the resource responsible for - * the current execution stack. + * kTriggerAsyncId: The async_id of the resource that caused (or 'triggered') + * the resource corresponding to the current execution stack. * kAsyncIdCounter: Incremental counter tracking the next assigned async_id. * kDefaultTriggerAsyncId: Written immediately before a resource's constructor - * that sets the value of the init()'s triggerAsyncId. The order of - * retrieving the triggerAsyncId value is passing directly to the - * constructor -> value set in kDefaultTriggerAsyncId -> executionAsyncId of - * the current resource. + * that sets the value of the init()'s triggerAsyncId. The precedence order + * of retrieving the triggerAsyncId value is: + * 1. the value passed directly to the constructor + * 2. value set in kDefaultTriggerAsyncId + * 3. executionAsyncId of the current resource. * - * async_ids_fast_stack is a Float64Array that contains part of the async ID + * async_ids_stack is a Float64Array that contains part of the async ID * stack. Each pushAsyncIds() call adds two doubles to it, and each * popAsyncIds() call removes two doubles from it. * It has a fixed size, so if that is exceeded, calls to the native @@ -28,12 +29,12 @@ const async_wrap = process.binding('async_wrap'); */ const { async_id_symbol, async_hook_fields, async_id_fields } = async_wrap; // Store the pair executionAsyncId and triggerAsyncId in a std::stack on -// Environment::AsyncHooks::ids_stack_ tracks the resource responsible for the -// current execution stack. This is unwound as each resource exits. In the case -// of a fatal exception this stack is emptied after calling each hook's after() -// callback. +// Environment::AsyncHooks::async_ids_stack_ tracks the resource responsible for +// the current execution stack. This is unwound as each resource exits. In the +// case of a fatal exception this stack is emptied after calling each hook's +// after() callback. const { pushAsyncIds: pushAsyncIds_, popAsyncIds: popAsyncIds_ } = async_wrap; -// For performance reasons, only track Proimses when a hook is enabled. +// For performance reasons, only track Promises when a hook is enabled. const { enablePromiseHook, disablePromiseHook } = async_wrap; // Properties in active_hooks are used to keep track of the set of hooks being // executed in case another hook is enabled/disabled. The new set of hooks is @@ -264,7 +265,7 @@ function getOrSetAsyncId(object) { // the user to safeguard this call and make sure it's zero'd out when the // constructor is complete. function getDefaultTriggerAsyncId() { - var defaultTriggerAsyncId = async_id_fields[kDefaultTriggerAsyncId]; + let defaultTriggerAsyncId = async_id_fields[kDefaultTriggerAsyncId]; // If defaultTriggerAsyncId isn't set, use the executionAsyncId if (defaultTriggerAsyncId < 0) defaultTriggerAsyncId = async_id_fields[kExecutionAsyncId]; @@ -278,7 +279,7 @@ function defaultTriggerAsyncIdScope(triggerAsyncId, block, ...args) { const oldDefaultTriggerAsyncId = async_id_fields[kDefaultTriggerAsyncId]; async_id_fields[kDefaultTriggerAsyncId] = triggerAsyncId; - var ret; + let ret; try { ret = Reflect.apply(block, null, args); } finally { diff --git a/lib/internal/bootstrap_node.js b/lib/internal/bootstrap_node.js index 9af547a923022b..6b7ac044f638af 100644 --- a/lib/internal/bootstrap_node.js +++ b/lib/internal/bootstrap_node.js @@ -58,11 +58,7 @@ NativeModule.require('internal/trace_events_async_hooks').setup(); NativeModule.require('internal/inspector_async_hook').setup(); - // Do not initialize channel in debugger agent, it deletes env variable - // and the main thread won't see it. - if (process.argv[1] !== '--debug-agent') - _process.setupChannel(); - + _process.setupChannel(); _process.setupRawDebug(); const browserGlobals = !process._noBrowserGlobals; @@ -75,6 +71,13 @@ // URL::ToObject() method is used. NativeModule.require('internal/url'); + // On OpenBSD process.execPath will be relative unless we + // get the full path before process.execPath is used. + if (process.platform === 'openbsd') { + const { realpathSync } = NativeModule.require('fs'); + process.execPath = realpathSync.native(process.execPath); + } + Object.defineProperty(process, 'argv0', { enumerable: true, configurable: false, @@ -175,7 +178,7 @@ const fs = NativeModule.require('fs'); // read the source const filename = Module._resolveFilename(process.argv[1]); - var source = fs.readFileSync(filename, 'utf-8'); + const source = fs.readFileSync(filename, 'utf-8'); checkScriptSyntax(source, filename); process.exit(0); } @@ -221,7 +224,7 @@ // Read all of stdin - execute it. process.stdin.setEncoding('utf8'); - var code = ''; + let code = ''; process.stdin.on('data', function(d) { code += d; }); @@ -415,7 +418,7 @@ const versionTypes = icu.getVersion().split(','); for (var n = 0; n < versionTypes.length; n++) { - var name = versionTypes[n]; + const name = versionTypes[n]; const version = icu.getVersion(name); Object.defineProperty(process.versions, name, { writable: false, @@ -583,7 +586,7 @@ ]; NativeModule.prototype.compile = function() { - var source = NativeModule.getSource(this.id); + let source = NativeModule.getSource(this.id); source = NativeModule.wrap(source); this.loading = true; diff --git a/lib/internal/cluster/master.js b/lib/internal/cluster/master.js index 27a591a6f8d6d5..4836ede540a9cf 100644 --- a/lib/internal/cluster/master.js +++ b/lib/internal/cluster/master.js @@ -14,6 +14,7 @@ const intercom = new EventEmitter(); const SCHED_NONE = 1; const SCHED_RR = 2; const { isLegalPort } = require('internal/net'); +const [ minPort, maxPort ] = [ 1024, 65535 ]; module.exports = cluster; @@ -119,6 +120,8 @@ function createWorkerProcess(id, env) { } } else { inspectPort = process.debugPort + debugPortOffset; + if (inspectPort > maxPort) + inspectPort = inspectPort - maxPort + minPort - 1; debugPortOffset++; } diff --git a/lib/internal/errors.js b/lib/internal/errors.js index 76ca6098f83868..551817b343bf63 100644 --- a/lib/internal/errors.js +++ b/lib/internal/errors.js @@ -150,6 +150,12 @@ E('ERR_ENCODING_INVALID_ENCODED_DATA', E('ERR_ENCODING_NOT_SUPPORTED', (enc) => `The "${enc}" encoding is not supported`); E('ERR_FALSY_VALUE_REJECTION', 'Promise was rejected with falsy value'); +E('ERR_HTTP2_ALREADY_SHUTDOWN', + 'Http2Session is already shutdown or destroyed'); +E('ERR_HTTP2_ALTSVC_INVALID_ORIGIN', + 'HTTP/2 ALTSVC frames require a valid origin'); +E('ERR_HTTP2_ALTSVC_LENGTH', + 'HTTP/2 ALTSVC frames are limited to 16382 bytes'); E('ERR_HTTP2_CONNECT_AUTHORITY', ':authority header is required for CONNECT requests'); E('ERR_HTTP2_CONNECT_PATH', @@ -164,6 +170,8 @@ E('ERR_HTTP2_FRAME_ERROR', msg += ` with code ${code}`; return msg; }); +E('ERR_HTTP2_GOAWAY_SESSION', + 'New streams cannot be created after receiving a GOAWAY'); E('ERR_HTTP2_HEADERS_AFTER_RESPOND', 'Cannot specify additional headers after response initiated'); E('ERR_HTTP2_HEADERS_OBJECT', 'Headers must be an object'); @@ -202,15 +210,14 @@ E('ERR_HTTP2_PING_LENGTH', 'HTTP2 ping payload must be 8 bytes'); E('ERR_HTTP2_PSEUDOHEADER_NOT_ALLOWED', 'Cannot set HTTP/2 pseudo-headers'); E('ERR_HTTP2_PUSH_DISABLED', 'HTTP/2 client has disabled push streams'); E('ERR_HTTP2_SEND_FILE', 'Only regular files can be sent'); +E('ERR_HTTP2_SESSION_ERROR', 'Session closed with error code %s'); E('ERR_HTTP2_SOCKET_BOUND', 'The socket is already bound to an Http2Session'); E('ERR_HTTP2_STATUS_101', 'HTTP status code 101 (Switching Protocols) is forbidden in HTTP/2'); -E('ERR_HTTP2_STATUS_INVALID', - (code) => `Invalid status code: ${code}`); -E('ERR_HTTP2_STREAM_CLOSED', 'The stream is already closed'); -E('ERR_HTTP2_STREAM_ERROR', - (code) => `Stream closed with error code ${code}`); +E('ERR_HTTP2_STATUS_INVALID', 'Invalid status code: %s'); +E('ERR_HTTP2_STREAM_CANCEL', 'The pending stream has been canceled'); +E('ERR_HTTP2_STREAM_ERROR', 'Stream closed with error code %s'); E('ERR_HTTP2_STREAM_SELF_DEPENDENCY', 'A stream cannot depend on itself'); E('ERR_HTTP2_UNSUPPORTED_PROTOCOL', (protocol) => `protocol "${protocol}" is unsupported.`); @@ -230,6 +237,7 @@ E('ERR_INVALID_ARRAY_LENGTH', }); E('ERR_INVALID_ASYNC_ID', (type, id) => `Invalid ${type} value: ${id}`); E('ERR_INVALID_CALLBACK', 'callback must be a function'); +E('ERR_INVALID_CHAR', 'Invalid character in %s'); E('ERR_INVALID_FD', (fd) => `"fd" must be a positive integer: ${fd}`); E('ERR_INVALID_FILE_URL_HOST', 'File URL host %s'); E('ERR_INVALID_FILE_URL_PATH', 'File URL path %s'); @@ -257,14 +265,23 @@ E('ERR_IPC_DISCONNECTED', 'IPC channel is already disconnected'); E('ERR_IPC_ONE_PIPE', 'Child process can have only one IPC pipe'); E('ERR_IPC_SYNC_FORK', 'IPC cannot be used with synchronous forks'); E('ERR_MISSING_ARGS', missingArgs); +E('ERR_MISSING_DYNAMIC_INSTANTIATE_HOOK', + 'The ES Module loader may not return a format of \'dynamic\' when no ' + + 'dynamicInstantiate function was provided'); E('ERR_MISSING_MODULE', 'Cannot find module %s'); E('ERR_MODULE_RESOLUTION_LEGACY', '%s not found by import in %s.' + ' Legacy behavior in require() would have found it at %s'); E('ERR_NAPI_CONS_FUNCTION', 'Constructor must be a function'); E('ERR_NAPI_CONS_PROTOTYPE_OBJECT', 'Constructor.prototype must be an object'); +E('ERR_NAPI_INVALID_DATAVIEW_ARGS', + 'byte_offset + byte_length should be less than or eqaul to the size in ' + + 'bytes of the array passed in'); +E('ERR_NAPI_INVALID_TYPEDARRAY_ALIGNMENT', 'start offset of %s should be a ' + + 'multiple of %s'); +E('ERR_NAPI_INVALID_TYPEDARRAY_LENGTH', 'Invalid typed array length'); E('ERR_NO_CRYPTO', 'Node.js is not compiled with OpenSSL crypto support'); E('ERR_NO_ICU', '%s is not supported on Node.js compiled without ICU'); -E('ERR_OUTOFMEMORY', 'Out of memory'); +E('ERR_OUT_OF_RANGE', 'The "%s" argument is out of range'); E('ERR_PARSE_HISTORY_DATA', 'Could not parse history data in %s'); E('ERR_REQUIRE_ESM', 'Must use import to load ES Module: %s'); E('ERR_SOCKET_ALREADY_BOUND', 'Socket is already bound'); diff --git a/lib/internal/http2/compat.js b/lib/internal/http2/compat.js index ec1f0ba64eff0a..b5dd81c80f4038 100644 --- a/lib/internal/http2/compat.js +++ b/lib/internal/http2/compat.js @@ -126,14 +126,11 @@ function onStreamAbortedRequest() { const request = this[kRequest]; if (request !== undefined && request[kState].closed === false) { request.emit('aborted'); - request.emit('close'); } } function onStreamAbortedResponse() { - const response = this[kResponse]; - if (response !== undefined && response[kState].closed === false) - response.emit('close'); + // non-op for now } function resumeStream(stream) { @@ -234,9 +231,7 @@ class Http2ServerRequest extends Readable { stream.on('end', onStreamEnd); stream.on('error', onStreamError); stream.on('aborted', onStreamAbortedRequest); - const onfinish = this[kFinish].bind(this); - stream.on('close', onfinish); - stream.on('finish', onfinish); + stream.on('close', this[kFinish].bind(this)); this.on('pause', onRequestPause); this.on('resume', onRequestResume); } @@ -297,7 +292,7 @@ class Http2ServerRequest extends Readable { state.didRead = true; process.nextTick(resumeStream, this[kStream]); } else { - this.emit('error', new errors.Error('ERR_HTTP2_STREAM_CLOSED')); + this.emit('error', new errors.Error('ERR_HTTP2_INVALID_STREAM')); } } @@ -345,6 +340,7 @@ class Http2ServerRequest extends Readable { // dump it for compatibility with http1 if (!state.didRead && !this._readableState.resumeScheduled) this.resume(); + this.emit('close'); } } @@ -366,9 +362,7 @@ class Http2ServerResponse extends Stream { this.writable = true; stream.on('drain', onStreamDrain); stream.on('aborted', onStreamAbortedResponse); - const onfinish = this[kFinish].bind(this); - stream.on('close', onfinish); - stream.on('finish', onfinish); + stream.on('close', this[kFinish].bind(this)); } // User land modules such as finalhandler just check truthiness of this @@ -520,7 +514,7 @@ class Http2ServerResponse extends Stream { const state = this[kState]; if (state.closed) - throw new errors.Error('ERR_HTTP2_STREAM_CLOSED'); + throw new errors.Error('ERR_HTTP2_INVALID_STREAM'); if (this[kStream].headersSent) throw new errors.Error('ERR_HTTP2_HEADERS_SENT'); @@ -550,7 +544,7 @@ class Http2ServerResponse extends Stream { } if (this[kState].closed) { - const err = new errors.Error('ERR_HTTP2_STREAM_CLOSED'); + const err = new errors.Error('ERR_HTTP2_INVALID_STREAM'); if (typeof cb === 'function') process.nextTick(cb, err); else @@ -620,12 +614,15 @@ class Http2ServerResponse extends Stream { if (typeof callback !== 'function') throw new errors.TypeError('ERR_INVALID_CALLBACK'); if (this[kState].closed) { - process.nextTick(callback, new errors.Error('ERR_HTTP2_STREAM_CLOSED')); + process.nextTick(callback, new errors.Error('ERR_HTTP2_INVALID_STREAM')); return; } - this[kStream].pushStream(headers, {}, function(stream, headers, options) { - const response = new Http2ServerResponse(stream); - callback(null, response); + this[kStream].pushStream(headers, {}, (err, stream, headers, options) => { + if (err) { + callback(err); + return; + } + callback(null, new Http2ServerResponse(stream)); }); } @@ -649,6 +646,7 @@ class Http2ServerResponse extends Stream { this[kProxySocket] = null; stream[kResponse] = undefined; this.emit('finish'); + this.emit('close'); } // TODO doesn't support callbacks diff --git a/lib/internal/http2/core.js b/lib/internal/http2/core.js index 2e0910dbee2963..cbaf908246bd6d 100644 --- a/lib/internal/http2/core.js +++ b/lib/internal/http2/core.js @@ -32,6 +32,9 @@ const kMaxFrameSize = (2 ** 24) - 1; const kMaxInt = (2 ** 32) - 1; const kMaxStreams = (2 ** 31) - 1; +// eslint-disable-next-line no-control-regex +const kQuotedString = /^[\x09\x20-\x5b\x5d-\x7e\x80-\xff]*$/; + const { assertIsObject, assertValidPseudoHeaderResponse, @@ -65,11 +68,14 @@ const TLSServer = tls.Server; const kInspect = require('internal/util').customInspectSymbol; +const kAlpnProtocol = Symbol('alpnProtocol'); const kAuthority = Symbol('authority'); -const kDestroySocket = Symbol('destroy-socket'); +const kEncrypted = Symbol('encrypted'); const kHandle = Symbol('handle'); const kID = Symbol('id'); const kInit = Symbol('init'); +const kInfoHeaders = Symbol('sent-info-headers'); +const kMaybeDestroy = Symbol('maybe-destroy'); const kLocalSettings = Symbol('local-settings'); const kOptions = Symbol('options'); const kOwner = Symbol('owner'); @@ -77,6 +83,8 @@ const kProceed = Symbol('proceed'); const kProtocol = Symbol('protocol'); const kProxySocket = Symbol('proxy-socket'); const kRemoteSettings = Symbol('remote-settings'); +const kSentHeaders = Symbol('sent-headers'); +const kSentTrailers = Symbol('sent-trailers'); const kServer = Symbol('server'); const kSession = Symbol('session'); const kState = Symbol('state'); @@ -84,7 +92,6 @@ const kType = Symbol('type'); const kUpdateTimer = Symbol('update-timer'); const kDefaultSocketTimeout = 2 * 60 * 1000; -const kRenegTest = /TLS session renegotiation disabled for this socket/; const { paddingBuffer, @@ -95,14 +102,13 @@ const { const { NGHTTP2_CANCEL, + NGHTTP2_REFUSED_STREAM, NGHTTP2_DEFAULT_WEIGHT, NGHTTP2_FLAG_END_STREAM, NGHTTP2_HCAT_PUSH_RESPONSE, NGHTTP2_HCAT_RESPONSE, NGHTTP2_INTERNAL_ERROR, NGHTTP2_NO_ERROR, - NGHTTP2_PROTOCOL_ERROR, - NGHTTP2_REFUSED_STREAM, NGHTTP2_SESSION_CLIENT, NGHTTP2_SESSION_SERVER, NGHTTP2_ERR_STREAM_ID_NOT_AVAILABLE, @@ -139,6 +145,18 @@ const { STREAM_OPTION_GET_TRAILERS } = constants; +const STREAM_FLAGS_PENDING = 0x0; +const STREAM_FLAGS_READY = 0x1; +const STREAM_FLAGS_CLOSED = 0x2; +const STREAM_FLAGS_HEADERS_SENT = 0x4; +const STREAM_FLAGS_HEAD_REQUEST = 0x8; +const STREAM_FLAGS_ABORTED = 0x10; + +const SESSION_FLAGS_PENDING = 0x0; +const SESSION_FLAGS_READY = 0x1; +const SESSION_FLAGS_CLOSED = 0x2; +const SESSION_FLAGS_DESTROYED = 0x4; + // Top level to avoid creating a closure function emit(self, ...args) { self.emit(...args); @@ -150,12 +168,15 @@ function emit(self, ...args) { // event. If the stream is not new, emit the 'headers' event to pass // the block of headers on. function onSessionHeaders(handle, id, cat, flags, headers) { - const owner = this[kOwner]; - const type = owner[kType]; - owner[kUpdateTimer](); + const session = this[kOwner]; + if (session.destroyed) + return; + + const type = session[kType]; + session[kUpdateTimer](); debug(`Http2Stream ${id} [Http2Session ` + `${sessionName(type)}]: headers received`); - const streams = owner[kState].streams; + const streams = session[kState].streams; const endOfStream = !!(flags & NGHTTP2_FLAG_END_STREAM); let stream = streams.get(id); @@ -164,21 +185,28 @@ function onSessionHeaders(handle, id, cat, flags, headers) { const obj = toHeaderObject(headers); if (stream === undefined) { + if (session.closed) { + // we are not accepting any new streams at this point. This callback + // should not be invoked at this point in time, but just in case it is, + // refuse the stream using an RST_STREAM and destroy the handle. + handle.rstStream(NGHTTP2_REFUSED_STREAM); + handle.destroy(); + return; + } const opts = { readable: !endOfStream }; - // owner[kType] can be only one of two possible values + // session[kType] can be only one of two possible values if (type === NGHTTP2_SESSION_SERVER) { - stream = new ServerHttp2Stream(owner, handle, id, opts, obj); + stream = new ServerHttp2Stream(session, handle, id, opts, obj); if (obj[HTTP2_HEADER_METHOD] === HTTP2_METHOD_HEAD) { // For head requests, there must not be a body... // end the writable side immediately. stream.end(); - stream[kState].headRequest = true; + stream[kState].flags |= STREAM_FLAGS_HEAD_REQUEST; } } else { - stream = new ClientHttp2Stream(owner, handle, id, opts); + stream = new ClientHttp2Stream(session, handle, id, opts); } - streams.set(id, stream); - process.nextTick(emit, owner, 'stream', stream, obj, flags, headers); + process.nextTick(emit, session, 'stream', stream, obj, flags, headers); } else { let event; const status = obj[HTTP2_HEADER_STATUS]; @@ -208,6 +236,12 @@ function onSessionHeaders(handle, id, cat, flags, headers) { } } +function tryClose(fd) { + // Try to close the file descriptor. If closing fails, assert because + // an error really should not happen at this point. + fs.close(fd, (err) => assert.ifError(err)); +} + // Called to determine if there are trailers to be sent at the end of a // Stream. The 'getTrailers' callback is invoked and passed a holder object. // The trailers to return are set on that object by the handler. Once the @@ -216,133 +250,192 @@ function onSessionHeaders(handle, id, cat, flags, headers) { // there are trailing headers to send. function onStreamTrailers() { const stream = this[kOwner]; + if (stream.destroyed) + return []; const trailers = Object.create(null); stream[kState].getTrailers.call(stream, trailers); const headersList = mapToHeaders(trailers, assertValidPseudoHeaderTrailer); if (!Array.isArray(headersList)) { - process.nextTick(emit, stream, 'error', headersList); - return; + stream.destroy(headersList); + return []; } + stream[kSentTrailers] = trailers; return headersList; } -// Called when the stream is closed. The close event is emitted on the -// Http2Stream instance +// Submit an RST-STREAM frame to be sent to the remote peer. +// This will cause the Http2Stream to be closed. +function submitRstStream(code) { + if (this[kHandle] !== undefined) { + this[kHandle].rstStream(code); + } +} + +// Called when the stream is closed either by sending or receiving an +// RST_STREAM frame, or through a natural end-of-stream. +// If the writable and readable sides of the stream are still open at this +// point, close them. If there is an open fd for file send, close that also. +// At this point the underlying node::http2:Http2Stream handle is no +// longer usable so destroy it also. function onStreamClose(code) { const stream = this[kOwner]; - stream[kUpdateTimer](); - abort(stream); + if (stream.destroyed) + return; + const state = stream[kState]; - state.rst = true; - state.rstCode = code; - if (state.fd !== undefined) - fs.close(state.fd, afterFDClose.bind(stream)); - setImmediate(stream.destroy.bind(stream)); -} + debug(`Http2Stream ${stream[kID]} [Http2Session ` + + `${sessionName(stream[kSession][kType])}]: closed with code ${code}`); -function afterFDClose(err) { - if (err) - process.nextTick(emit, this, 'error', err); -} + if (!stream.closed) { + // Unenroll from timeouts + unenroll(stream); + stream.removeAllListeners('timeout'); -// Called when an error event needs to be triggered -function onSessionError(error) { - const owner = this[kOwner]; - owner[kUpdateTimer](); - process.nextTick(emit, owner, 'error', error); + // Set the state flags + state.flags |= STREAM_FLAGS_CLOSED; + state.rstCode = code; + + // Close the writable side of the stream + abort(stream); + stream.end(); + } + + if (state.fd !== undefined) + tryClose(state.fd); + + // Defer destroy we actually emit end. + if (stream._readableState.endEmitted || code !== NGHTTP2_NO_ERROR) { + // If errored or ended, we can destroy immediately. + stream[kMaybeDestroy](null, code); + } else { + // Wait for end to destroy. + stream.on('end', stream[kMaybeDestroy]); + // Push a null so the stream can end whenever the client consumes + // it completely. + stream.push(null); + + // Same as net. + if (stream._readableState.length === 0) { + stream.read(0); + } + } } // Receives a chunk of data for a given stream and forwards it on // to the Http2Stream Duplex for processing. -function onStreamRead(nread, buf, handle) { - const stream = handle[kOwner]; - stream[kUpdateTimer](); +function onStreamRead(nread, buf) { + const stream = this[kOwner]; if (nread >= 0 && !stream.destroyed) { + debug(`Http2Stream ${stream[kID]} [Http2Session ` + + `${sessionName(stream[kSession][kType])}]: receiving data chunk ` + + `of size ${nread}`); + stream[kUpdateTimer](); if (!stream.push(buf)) { - handle.readStop(); + if (!stream.destroyed) // we have to check a second time + this.readStop(); } return; } + // Last chunk was received. End the readable side. - stream.push(null); + debug(`Http2Stream ${stream[kID]} [Http2Session ` + + `${sessionName(stream[kSession][kType])}]: ending readable.`); + + // defer this until we actually emit end + if (stream._readableState.endEmitted) { + stream[kMaybeDestroy](); + } else { + stream.on('end', stream[kMaybeDestroy]); + stream.push(null); + stream.read(0); + } } // Called when the remote peer settings have been updated. // Resets the cached settings. -function onSettings(ack) { - const owner = this[kOwner]; - debug(`Http2Session ${sessionName(owner[kType])}: new settings received`); - owner[kUpdateTimer](); - let event = 'remoteSettings'; - if (ack) { - if (owner[kState].pendingAck > 0) - owner[kState].pendingAck--; - owner[kLocalSettings] = undefined; - event = 'localSettings'; - } else { - owner[kRemoteSettings] = undefined; - } - // Only emit the event if there are listeners registered - if (owner.listenerCount(event) > 0) - process.nextTick(emit, owner, event, owner[event]); +function onSettings() { + const session = this[kOwner]; + if (session.destroyed) + return; + session[kUpdateTimer](); + debug(`Http2Session ${sessionName(session[kType])}: new settings received`); + session[kRemoteSettings] = undefined; + session.emit('remoteSettings', session.remoteSettings); } // If the stream exists, an attempt will be made to emit an event // on the stream object itself. Otherwise, forward it on to the // session (which may, in turn, forward it on to the server) function onPriority(id, parent, weight, exclusive) { - const owner = this[kOwner]; + const session = this[kOwner]; + if (session.destroyed) + return; debug(`Http2Stream ${id} [Http2Session ` + - `${sessionName(owner[kType])}]: priority [parent: ${parent}, ` + + `${sessionName(session[kType])}]: priority [parent: ${parent}, ` + `weight: ${weight}, exclusive: ${exclusive}]`); - owner[kUpdateTimer](); - const streams = owner[kState].streams; - const stream = streams.get(id); - const emitter = stream === undefined ? owner : stream; - process.nextTick(emit, emitter, 'priority', id, parent, weight, exclusive); -} - -function emitFrameError(self, id, type, code) { - if (!self.emit('frameError', type, code, id)) { - const err = new errors.Error('ERR_HTTP2_FRAME_ERROR', type, code, id); - err.errno = code; - self.emit('error', err); + const emitter = session[kState].streams.get(id) || session; + if (!emitter.destroyed) { + emitter[kUpdateTimer](); + emitter.emit('priority', id, parent, weight, exclusive); } } // Called by the native layer when an error has occurred sending a // frame. This should be exceedingly rare. function onFrameError(id, type, code) { - const owner = this[kOwner]; - debug(`Http2Session ${sessionName(owner[kType])}: error sending frame type ` + - `${type} on stream ${id}, code: ${code}`); - owner[kUpdateTimer](); - const streams = owner[kState].streams; - const stream = streams.get(id); - const emitter = stream !== undefined ? stream : owner; - process.nextTick(emitFrameError, emitter, id, type, code); + const session = this[kOwner]; + if (session.destroyed) + return; + debug(`Http2Session ${sessionName(session[kType])}: error sending frame ` + + `type ${type} on stream ${id}, code: ${code}`); + const emitter = session[kState].streams.get(id) || session; + emitter[kUpdateTimer](); + emitter.emit('frameError', type, code, id); } -function emitGoaway(self, code, lastStreamID, buf) { - self.emit('goaway', code, lastStreamID, buf); - // Tear down the session or destroy - const state = self[kState]; - if (state.destroying || state.destroyed) +function onAltSvc(stream, origin, alt) { + const session = this[kOwner]; + if (session.destroyed) return; - if (!state.shuttingDown && !state.shutdown) { - self.shutdown({}, self.destroy.bind(self)); + debug(`Http2Session ${sessionName(session[kType])}: altsvc received: ` + + `stream: ${stream}, origin: ${origin}, alt: ${alt}`); + session[kUpdateTimer](); + session.emit('altsvc', alt, origin, stream); +} + +// Receiving a GOAWAY frame from the connected peer is a signal that no +// new streams should be created. If the code === NGHTTP2_NO_ERROR, we +// are going to send our our close, but allow existing frames to close +// normally. If code !== NGHTTP2_NO_ERROR, we are going to send our own +// close using the same code then destroy the session with an error. +// The goaway event will be emitted on next tick. +function onGoawayData(code, lastStreamID, buf) { + const session = this[kOwner]; + if (session.destroyed) return; + debug(`Http2Session ${sessionName(session[kType])}: goaway ${code} ` + + `received [last stream id: ${lastStreamID}]`); + + const state = session[kState]; + state.goawayCode = code; + state.goawayLastStreamID = lastStreamID; + + session.emit('goaway', code, lastStreamID, buf); + if (code === NGHTTP2_NO_ERROR) { + // If this is a no error goaway, begin shutting down. + // No new streams permitted, but existing streams may + // close naturally on their own. + session.close(); + } else { + // However, if the code is not NGHTTP_NO_ERROR, destroy the + // session immediately. We destroy with an error but send a + // goaway using NGHTTP2_NO_ERROR because there was no error + // condition on this side of the session that caused the + // shutdown. + session.destroy(new errors.Error('ERR_HTTP2_SESSION_ERROR', code), + { errorCode: NGHTTP2_NO_ERROR }); } - self.destroy(); -} - -// Called by the native layer when a goaway frame has been received -function onGoawayData(code, lastStreamID, buf) { - const owner = this[kOwner]; - debug(`Http2Session ${sessionName(owner[kType])}: goaway ${code} received ` + - `[last stream id: ${lastStreamID}]`); - process.nextTick(emitGoaway, owner, code, lastStreamID, buf); } // Returns the padding to use per frame. The selectPadding callback is set @@ -353,11 +446,7 @@ function onSelectPadding(fn) { return function getPadding() { const frameLen = paddingBuffer[PADDING_BUF_FRAME_LENGTH]; const maxFramePayloadLen = paddingBuffer[PADDING_BUF_MAX_PAYLOAD_LENGTH]; - paddingBuffer[PADDING_BUF_RETURN_VALUE] = - Math.min(maxFramePayloadLen, - Math.max(frameLen, - fn(frameLen, - maxFramePayloadLen) | 0)); + paddingBuffer[PADDING_BUF_RETURN_VALUE] = fn(frameLen, maxFramePayloadLen); }; } @@ -366,18 +455,23 @@ function onSelectPadding(fn) { // will be deferred until the socket is ready to go. function requestOnConnect(headers, options) { const session = this[kSession]; - debug(`Http2Session ${sessionName(session[kType])}: connected, ` + - 'initializing request'); - const streams = session[kState].streams; - validatePriorityOptions(options); + // At this point, the stream should have already been destroyed during + // the session.destroy() method. Do nothing else. + if (session.destroyed) + return; - const headersList = mapToHeaders(headers); - if (!Array.isArray(headersList)) { - process.nextTick(emit, this, 'error', headersList); + // If the session was closed while waiting for for the connect, destroy + // the stream and do not continue with the request. + if (session.closed) { + const err = new errors.Error('ERR_HTTP2_GOAWAY_SESSION'); + this.destroy(err); return; } + debug(`Http2Session ${sessionName(session[kType])}: connected, ` + + 'initializing request'); + let streamOptions = 0; if (options.endStream) streamOptions |= STREAM_OPTION_EMPTY_PAYLOAD; @@ -389,7 +483,7 @@ function requestOnConnect(headers, options) { // ret will be either the reserved stream ID (if positive) // or an error code (if negative) - const ret = session[kHandle].request(headersList, + const ret = session[kHandle].request(headers, streamOptions, options.parent | 0, options.weight | 0, @@ -406,127 +500,111 @@ function requestOnConnect(headers, options) { // session if not handled. if (typeof ret === 'number') { let err; - let target = session; switch (ret) { case NGHTTP2_ERR_STREAM_ID_NOT_AVAILABLE: err = new errors.Error('ERR_HTTP2_OUT_OF_STREAMS'); - target = this; + this.destroy(err); break; case NGHTTP2_ERR_INVALID_ARGUMENT: err = new errors.Error('ERR_HTTP2_STREAM_SELF_DEPENDENCY'); - target = this; + this.destroy(err); break; default: - err = new NghttpError(ret); + session.destroy(new NghttpError(ret)); } - process.nextTick(emit, target, 'error', err); return; } - const id = ret.id(); - streams.set(id, this); - this[kInit](id, ret); + this[kInit](ret.id(), ret); } +// Validates that priority options are correct, specifically: +// 1. options.weight must be a number +// 2. options.parent must be a positive number +// 3. options.exclusive must be a boolean +// 4. if specified, options.silent must be a boolean +// +// Also sets the default priority options if they are not set. function validatePriorityOptions(options) { + let err; if (options.weight === undefined) { options.weight = NGHTTP2_DEFAULT_WEIGHT; } else if (typeof options.weight !== 'number') { - const err = new errors.TypeError('ERR_INVALID_OPT_VALUE', - 'weight', - options.weight); - Error.captureStackTrace(err, validatePriorityOptions); - throw err; + err = new errors.TypeError('ERR_INVALID_OPT_VALUE', + 'weight', + options.weight); } if (options.parent === undefined) { options.parent = 0; } else if (typeof options.parent !== 'number' || options.parent < 0) { - const err = new errors.TypeError('ERR_INVALID_OPT_VALUE', - 'parent', - options.parent); - Error.captureStackTrace(err, validatePriorityOptions); - throw err; + err = new errors.TypeError('ERR_INVALID_OPT_VALUE', + 'parent', + options.parent); } if (options.exclusive === undefined) { options.exclusive = false; } else if (typeof options.exclusive !== 'boolean') { - const err = new errors.TypeError('ERR_INVALID_OPT_VALUE', - 'exclusive', - options.exclusive); - Error.captureStackTrace(err, validatePriorityOptions); - throw err; + err = new errors.TypeError('ERR_INVALID_OPT_VALUE', + 'exclusive', + options.exclusive); } if (options.silent === undefined) { options.silent = false; } else if (typeof options.silent !== 'boolean') { - const err = new errors.TypeError('ERR_INVALID_OPT_VALUE', - 'silent', - options.silent); + err = new errors.TypeError('ERR_INVALID_OPT_VALUE', + 'silent', + options.silent); + } + + if (err) { Error.captureStackTrace(err, validatePriorityOptions); throw err; } } +// When an error occurs internally at the binding level, immediately +// destroy the session. function onSessionInternalError(code) { - const owner = this[kOwner]; - const err = new NghttpError(code); - process.nextTick(emit, owner, 'error', err); + if (this[kOwner] !== undefined) + this[kOwner].destroy(new NghttpError(code)); } -// Creates the internal binding.Http2Session handle for an Http2Session -// instance. This occurs only after the socket connection has been -// established. Note: the binding.Http2Session will take over ownership -// of the socket. No other code should read from or write to the socket. -function setupHandle(session, socket, type, options) { - return function() { - debug(`Http2Session ${sessionName(type)}: setting up session handle`); - session[kState].connecting = false; - - updateOptionsBuffer(options); - const handle = new binding.Http2Session(type); - handle[kOwner] = session; - handle.error = onSessionInternalError; - handle.onpriority = onPriority; - handle.onsettings = onSettings; - handle.onheaders = onSessionHeaders; - handle.onerror = onSessionError; - handle.onframeerror = onFrameError; - handle.ongoawaydata = onGoawayData; - - if (typeof options.selectPadding === 'function') - handle.ongetpadding = onSelectPadding(options.selectPadding); - - assert(socket._handle !== undefined, - 'Internal HTTP/2 Failure. The socket is not connected. Please ' + - 'report this as a bug in Node.js'); - handle.consume(socket._handle._externalStream); - - session[kHandle] = handle; - - const settings = typeof options.settings === 'object' ? - options.settings : Object.create(null); - - session.settings(settings); - process.nextTick(emit, session, 'connect', session, socket); - }; +function settingsCallback(cb, ack, duration) { + this[kState].pendingAck--; + this[kLocalSettings] = undefined; + if (ack) { + debug(`Http2Session ${sessionName(this[kType])}: settings received`); + const settings = this.localSettings; + if (typeof cb === 'function') + cb(null, settings, duration); + this.emit('localSettings', settings); + } else { + debug(`Http2Session ${sessionName(this[kType])}: settings canceled`); + if (typeof cb === 'function') + cb(new errors.Error('ERR_HTTP2_SETTINGS_CANCEL')); + } } // Submits a SETTINGS frame to be sent to the remote peer. -function submitSettings(settings) { - const type = this[kType]; - debug(`Http2Session ${sessionName(type)}: submitting settings`); +function submitSettings(settings, callback) { + if (this.destroyed) + return; + debug(`Http2Session ${sessionName(this[kType])}: submitting settings`); this[kUpdateTimer](); - this[kLocalSettings] = undefined; updateSettingsBuffer(settings); - this[kHandle].settings(); + if (!this[kHandle].settings(settingsCallback.bind(this, callback))) { + this.destroy(new errors.Error('ERR_HTTP2_MAX_PENDING_SETTINGS_ACK')); + } } // Submits a PRIORITY frame to be sent to the remote peer // Note: If the silent option is true, the change will be made // locally with no PRIORITY frame sent. function submitPriority(options) { + if (this.destroyed) + return; this[kUpdateTimer](); // If the parent is the id, do nothing because a @@ -540,74 +618,16 @@ function submitPriority(options) { !!options.silent); } -// Submit an RST-STREAM frame to be sent to the remote peer. -// This will cause the Http2Stream to be closed. -function submitRstStream(code) { - this[kUpdateTimer](); - - const state = this[kState]; - if (state.rst) return; - state.rst = true; - state.rstCode = code; - - const ret = this[kHandle].rstStream(code); - if (ret < 0) { - const err = new NghttpError(ret); - process.nextTick(emit, this, 'error', err); - return; - } - this.destroy(); -} - -function doShutdown(options) { - const handle = this[kHandle]; - const state = this[kState]; - if (handle === undefined || state.shutdown) - return; // Nothing to do, possibly because the session shutdown already. - const ret = handle.goaway(options.errorCode | 0, - options.lastStreamID | 0, - options.opaqueData); - state.shuttingDown = false; - state.shutdown = true; - if (ret < 0) { - debug(`Http2Session ${sessionName(this[kType])}: shutdown failed`); - const err = new NghttpError(ret); - process.nextTick(emit, this, 'error', err); - return; - } - process.nextTick(emit, this, 'shutdown', options); -} - -// Submit a graceful or immediate shutdown request for the Http2Session. -function submitShutdown(options) { - const type = this[kType]; - debug(`Http2Session ${sessionName(type)}: submitting shutdown request`); - const shutdownFn = doShutdown.bind(this, options); - if (type === NGHTTP2_SESSION_SERVER && options.graceful === true) { - // first send a shutdown notice - this[kHandle].shutdownNotice(); - // then, on flip of the event loop, do the actual shutdown - setImmediate(shutdownFn); +// Submit a GOAWAY frame to be sent to the remote peer. +// If the lastStreamID is set to <= 0, then the lastProcStreamID will +// be used. The opaqueData must either be a typed array or undefined +// (which will be checked elsewhere). +function submitGoaway(code, lastStreamID, opaqueData) { + if (this.destroyed) return; - } - shutdownFn(); -} - -function finishSessionDestroy(socket) { - if (!socket.destroyed) - socket.destroy(); - - const state = this[kState]; - state.destroying = false; - state.destroyed = true; - - // Destroy the handle - if (this[kHandle] !== undefined) { - this[kHandle].destroy(state.skipUnconsume); - this[kHandle] = undefined; - } - - process.nextTick(emit, this, 'close'); + debug(`Http2Session ${sessionName(this[kType])}: submitting goaway`); + this[kUpdateTimer](); + this[kHandle].goaway(code, lastStreamID, opaqueData); } const proxySocketHandler = { @@ -652,13 +672,27 @@ const proxySocketHandler = { } }; +// pingCallback() returns a function that is invoked when an HTTP2 PING +// frame acknowledgement is received. The ack is either true or false to +// indicate if the ping was successful or not. The duration indicates the +// number of milliseconds elapsed since the ping was sent and the ack +// received. The payload is a Buffer containing the 8 bytes of payload +// data received on the PING acknowlegement. function pingCallback(cb) { - return function(ack, duration, payload) { + return function pingCallback(ack, duration, payload) { const err = ack ? null : new errors.Error('ERR_HTTP2_PING_CANCEL'); cb(err, duration, payload); }; } +// Validates the values in a settings object. Specifically: +// 1. headerTableSize must be a number in the range 0 <= n <= kMaxInt +// 2. initialWindowSize must be a number in the range 0 <= n <= kMaxInt +// 3. maxFrameSize must be a number in the range 16384 <= n <= kMaxFrameSize +// 4. maxConcurrentStreams must be a number in the range 0 <= n <= kMaxStreams +// 5. maxHeaderListSize must be a number in the range 0 <= n <= kMaxInt +// 6. enablePush must be a boolean +// All settings are optional and may be left undefined function validateSettings(settings) { settings = Object.assign({}, settings); assertWithinRange('headerTableSize', @@ -681,13 +715,110 @@ function validateSettings(settings) { const err = new errors.TypeError('ERR_HTTP2_INVALID_SETTING_VALUE', 'enablePush', settings.enablePush); err.actual = settings.enablePush; + Error.captureStackTrace(err, 'validateSettings'); throw err; } return settings; } +// Creates the internal binding.Http2Session handle for an Http2Session +// instance. This occurs only after the socket connection has been +// established. Note: the binding.Http2Session will take over ownership +// of the socket. No other code should read from or write to the socket. +function setupHandle(socket, type, options) { + // If the session has been destroyed, go ahead and emit 'connect', + // but do nothing else. The various on('connect') handlers set by + // core will check for session.destroyed before progressing, this + // ensures that those at l`east get cleared out. + if (this.destroyed) { + process.nextTick(emit, this, 'connect', this, socket); + return; + } + debug(`Http2Session ${sessionName(type)}: setting up session handle`); + this[kState].flags |= SESSION_FLAGS_READY; + + updateOptionsBuffer(options); + const handle = new binding.Http2Session(type); + handle[kOwner] = this; + handle.error = onSessionInternalError; + handle.onpriority = onPriority; + handle.onsettings = onSettings; + handle.onheaders = onSessionHeaders; + handle.onframeerror = onFrameError; + handle.ongoawaydata = onGoawayData; + handle.onaltsvc = onAltSvc; + + if (typeof options.selectPadding === 'function') + handle.ongetpadding = onSelectPadding(options.selectPadding); + + assert(socket._handle !== undefined, + 'Internal HTTP/2 Failure. The socket is not connected. Please ' + + 'report this as a bug in Node.js'); + handle.consume(socket._handle._externalStream); + + this[kHandle] = handle; + + if (socket.encrypted) { + this[kAlpnProtocol] = socket.alpnProtocol; + this[kEncrypted] = true; + } else { + // 'h2c' is the protocol identifier for HTTP/2 over plain-text. We use + // it here to identify any session that is not explicitly using an + // encrypted socket. + this[kAlpnProtocol] = 'h2c'; + this[kEncrypted] = false; + } + + const settings = typeof options.settings === 'object' ? + options.settings : {}; + + this.settings(settings); + process.nextTick(emit, this, 'connect', this, socket); +} + +// Emits a close event followed by an error event if err is truthy. Used +// by Http2Session.prototype.destroy() +function emitClose(self, error) { + if (error) + self.emit('error', error); + self.emit('close'); +} + // Upon creation, the Http2Session takes ownership of the socket. The session // may not be ready to use immediately if the socket is not yet fully connected. +// In that case, the Http2Session will wait for the socket to connect. Once +// the Http2Session is ready, it will emit its own 'connect' event. +// +// The Http2Session.goaway() method will send a GOAWAY frame, signalling +// to the connected peer that a shutdown is in progress. Sending a goaway +// frame has no other effect, however. +// +// Receiving a GOAWAY frame will cause the Http2Session to first emit a 'goaway' +// event notifying the user that a shutdown is in progress. If the goaway +// error code equals 0 (NGHTTP2_NO_ERROR), session.close() will be called, +// causing the Http2Session to send its own GOAWAY frame and switch itself +// into a graceful closing state. In this state, new inbound or outbound +// Http2Streams will be rejected. Existing *pending* streams (those created +// but without an assigned stream ID or handle) will be destroyed with a +// cancel error. Existing open streams will be permitted to complete on their +// own. Once all existing streams close, session.destroy() will be called +// automatically. +// +// Calling session.destroy() will tear down the Http2Session immediately, +// making it no longer usable. Pending and existing streams will be destroyed. +// The bound socket will be destroyed. Once all resources have been freed up, +// the 'close' event will be emitted. Note that pending streams will be +// destroyed using a specific "ERR_HTTP2_STREAM_CANCEL" error. Existing open +// streams will be destroyed using the same error passed to session.destroy() +// +// If destroy is called with an error, an 'error' event will be emitted +// immediately following the 'close' event. +// +// The socket and Http2Session lifecycles are tightly bound. Once one is +// destroyed, the other should also be destroyed. When the socket is destroyed +// with an error, session.destroy() will be called with that same error. +// Likewise, when session.destroy() is called with an error, the same error +// will be sent to the socket. class Http2Session extends EventEmitter { constructor(type, options, socket) { super(); @@ -708,15 +839,16 @@ class Http2Session extends EventEmitter { socket[kSession] = this; this[kState] = { + flags: SESSION_FLAGS_PENDING, streams: new Map(), - destroyed: false, - shutdown: false, - shuttingDown: false, + pendingStreams: new Set(), pendingAck: 0, - maxPendingAck: Math.max(1, (options.maxPendingAck | 0) || 10), - writeQueueSize: 0 + writeQueueSize: 0, + originSet: undefined }; + this[kEncrypted] = undefined; + this[kAlpnProtocol] = undefined; this[kType] = type; this[kProxySocket] = null; this[kSocket] = socket; @@ -729,12 +861,8 @@ class Http2Session extends EventEmitter { if (typeof socket.disableRenegotiation === 'function') socket.disableRenegotiation(); - socket[kDestroySocket] = socket.destroy; - socket.destroy = socketDestroy; - - const setupFn = setupHandle(this, socket, type, options); + const setupFn = setupHandle.bind(this, socket, type, options); if (socket.connecting) { - this[kState].connecting = true; const connectEvent = socket instanceof tls.TLSSocket ? 'secureConnect' : 'connect'; socket.once(connectEvent, setupFn); @@ -742,22 +870,78 @@ class Http2Session extends EventEmitter { setupFn(); } - // Any individual session can have any number of active open - // streams, these may all need to be made aware of changes - // in state that occur -- such as when the associated socket - // is closed. To do so, we need to set the max listener count - // to something more reasonable because we may have any number - // of concurrent streams (2^31-1 is the upper limit on the number - // of streams) - this.setMaxListeners(kMaxStreams); debug(`Http2Session ${sessionName(type)}: created`); } + // Returns undefined if the socket is not yet connected, true if the + // socket is a TLSSocket, and false if it is not. + get encrypted() { + return this[kEncrypted]; + } + + // Returns undefined if the socket is not yet connected, `h2` if the + // socket is a TLSSocket and the alpnProtocol is `h2`, or `h2c` if the + // socket is not a TLSSocket. + get alpnProtocol() { + return this[kAlpnProtocol]; + } + + // TODO(jasnell): originSet is being added in preparation for ORIGIN frame + // support. At the current time, the ORIGIN frame specification is awaiting + // publication as an RFC and is awaiting implementation in nghttp2. Once + // added, an ORIGIN frame will add to the origins included in the origin + // set. 421 responses will remove origins from the set. + get originSet() { + if (!this.encrypted || this.destroyed) + return undefined; + + let originSet = this[kState].originSet; + if (originSet === undefined) { + const socket = this[kSocket]; + this[kState].originSet = originSet = new Set(); + if (socket.servername != null) { + let originString = `https://${socket.servername}`; + if (socket.remotePort != null) + originString += `:${socket.remotePort}`; + // We have to ensure that it is a properly serialized + // ASCII origin string. The socket.servername might not + // be properly ASCII encoded. + originSet.add((new URL(originString)).origin); + } + } + + return Array.from(originSet); + } + + // True if the Http2Session is still waiting for the socket to connect + get connecting() { + return (this[kState].flags & SESSION_FLAGS_READY) === 0; + } + + // True if Http2Session.prototype.close() has been called + get closed() { + return !!(this[kState].flags & SESSION_FLAGS_CLOSED); + } + + // True if Http2Session.prototype.destroy() has been called + get destroyed() { + return !!(this[kState].flags & SESSION_FLAGS_DESTROYED); + } + + // Resets the timeout counter [kUpdateTimer]() { + if (this.destroyed) + return; _unrefActive(this); } + // Sets the id of the next stream to be created by this Http2Session. + // The value must be a number in the range 0 <= n <= kMaxStreams. The + // value also needs to be larger than the current next stream ID. setNextStreamID(id) { + if (this.destroyed) + throw new errors.Error('ERR_HTTP2_INVALID_SESSION'); + if (typeof id !== 'number') throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'id', 'number'); if (id <= 0 || id > kMaxStreams) @@ -765,10 +949,13 @@ class Http2Session extends EventEmitter { this[kHandle].setNextStreamID(id); } + // If ping is called while we are still connecting, or after close() has + // been called, the ping callback will be invoked immediately will a ping + // cancelled error and a duration of 0.0. ping(payload, callback) { - const state = this[kState]; - if (state.destroyed || state.destroying) + if (this.destroyed) throw new errors.Error('ERR_HTTP2_INVALID_SESSION'); + if (typeof payload === 'function') { callback = payload; payload = undefined; @@ -783,17 +970,21 @@ class Http2Session extends EventEmitter { } if (typeof callback !== 'function') throw new errors.TypeError('ERR_INVALID_CALLBACK'); - return this[kHandle].ping(payload, pingCallback(callback)); + + const cb = pingCallback(callback); + if (this.connecting || this.closed) { + process.nextTick(cb, false, 0.0, payload); + return; + } + + return this[kHandle].ping(payload, cb); } [kInspect](depth, opts) { - const state = this[kState]; const obj = { type: this[kType], - destroyed: state.destroyed, - destroying: state.destroying, - shutdown: state.shutdown, - shuttingDown: state.shuttingDown, + closed: this.closed, + destroyed: this.destroyed, state: this.state, localSettings: this.localSettings, remoteSettings: this.remoteSettings @@ -814,171 +1005,209 @@ class Http2Session extends EventEmitter { return this[kType]; } + // If a GOAWAY frame has been received, gives the error code specified + get goawayCode() { + return this[kState].goawayCode || NGHTTP2_NO_ERROR; + } + + // If a GOAWAY frame has been received, gives the last stream ID reported + get goawayLastStreamID() { + return this[kState].goawayLastStreamID || 0; + } + // true if the Http2Session is waiting for a settings acknowledgement get pendingSettingsAck() { return this[kState].pendingAck > 0; } - // true if the Http2Session has been destroyed - get destroyed() { - return this[kState].destroyed; - } - // Retrieves state information for the Http2Session get state() { - const handle = this[kHandle]; - return handle === undefined ? {} : getSessionState(handle); + return this.connecting || this.destroyed ? + {} : getSessionState(this[kHandle]); } // The settings currently in effect for the local peer. These will // be updated only when a settings acknowledgement has been received. get localSettings() { - let settings = this[kLocalSettings]; + const settings = this[kLocalSettings]; if (settings !== undefined) return settings; - const handle = this[kHandle]; - if (handle === undefined) + if (this.destroyed || this.connecting) return {}; - settings = getSettings(handle, false); // Local - this[kLocalSettings] = settings; - return settings; + return this[kLocalSettings] = getSettings(this[kHandle], false); // Local } // The settings currently in effect for the remote peer. get remoteSettings() { - let settings = this[kRemoteSettings]; + const settings = this[kRemoteSettings]; if (settings !== undefined) return settings; - const handle = this[kHandle]; - if (handle === undefined) + if (this.destroyed || this.connecting) return {}; - settings = getSettings(handle, true); // Remote - this[kRemoteSettings] = settings; - return settings; + return this[kRemoteSettings] = getSettings(this[kHandle], true); // Remote } // Submits a SETTINGS frame to be sent to the remote peer. - settings(settings) { - const state = this[kState]; - if (state.destroyed || state.destroying) + settings(settings, callback) { + if (this.destroyed) throw new errors.Error('ERR_HTTP2_INVALID_SESSION'); - - // Validate the input first assertIsObject(settings, 'settings'); settings = validateSettings(settings); - if (state.pendingAck === state.maxPendingAck) { - throw new errors.Error('ERR_HTTP2_MAX_PENDING_SETTINGS_ACK', - this[kState].pendingAck); - } + + if (callback && typeof callback !== 'function') + throw new errors.TypeError('ERR_INVALID_CALLBACK'); debug(`Http2Session ${sessionName(this[kType])}: sending settings`); - state.pendingAck++; - const settingsFn = submitSettings.bind(this, settings); - if (state.connecting) { + this[kState].pendingAck++; + + const settingsFn = submitSettings.bind(this, settings, callback); + if (this.connecting) { this.once('connect', settingsFn); return; } settingsFn(); } - // Destroy the Http2Session - destroy() { - const state = this[kState]; - if (state.destroyed || state.destroying) - return; - debug(`Http2Session ${sessionName(this[kType])}: destroying`); - state.destroying = true; - state.destroyed = false; - - // Unenroll the timer - this.setTimeout(0, sessionOnTimeout); - - // Shut down any still open streams - const streams = state.streams; - streams.forEach((stream) => stream.destroy()); - - // Disassociate from the socket and server - const socket = this[kSocket]; - // socket.pause(); - delete this[kProxySocket]; - delete this[kSocket]; - delete this[kServer]; + // Sumits a GOAWAY frame to be sent to the remote peer. Note that this + // is only a notification, and does not affect the usable state of the + // session with the notable exception that new incoming streams will + // be rejected automatically. + goaway(code = NGHTTP2_NO_ERROR, lastStreamID = 0, opaqueData) { + if (this.destroyed) + throw new errors.Error('ERR_HTTP2_INVALID_SESSION'); - if (this[kHandle] !== undefined) - this[kHandle].destroying(); + if (opaqueData !== undefined && !isArrayBufferView(opaqueData)) { + throw new errors.TypeError('ERR_INVALID_ARG_TYPE', + 'opaqueData', + ['Buffer', 'TypedArray', 'DataView']); + } + if (typeof code !== 'number') { + throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'code', 'number'); + } + if (typeof lastStreamID !== 'number') { + throw new errors.TypeError('ERR_INVALID_ARG_TYPE', + 'lastStreamID', 'number'); + } - setImmediate(finishSessionDestroy.bind(this), socket); + const goawayFn = submitGoaway.bind(this, code, lastStreamID, opaqueData); + if (this.connecting) { + this.once('connect', goawayFn); + return; + } + goawayFn(); } - // Graceful or immediate shutdown of the Http2Session. Graceful shutdown - // is only supported on the server-side - shutdown(options, callback) { - const state = this[kState]; - if (state.destroyed || state.destroying) - throw new errors.Error('ERR_HTTP2_INVALID_SESSION'); - - if (state.shutdown || state.shuttingDown) + // Destroy the Http2Session, making it no longer usable and cancelling + // any pending activity. + destroy(error = NGHTTP2_NO_ERROR, code) { + if (this.destroyed) return; + debug(`Http2Session ${sessionName(this[kType])}: destroying`); - const type = this[kType]; - - if (typeof options === 'function') { - callback = options; - options = undefined; + if (typeof error === 'number') { + code = error; + error = + code !== NGHTTP2_NO_ERROR ? + new errors.Error('ERR_HTTP2_SESSION_ERROR', code) : undefined; } + if (code === undefined && error != null) + code = NGHTTP2_INTERNAL_ERROR; - assertIsObject(options, 'options'); - options = Object.assign(Object.create(null), options); - - if (options.opaqueData !== undefined && - !isArrayBufferView(options.opaqueData)) { - throw new errors.TypeError('ERR_INVALID_OPT_VALUE', - 'opaqueData', - options.opaqueData); - } - if (type === NGHTTP2_SESSION_SERVER && - options.graceful !== undefined && - typeof options.graceful !== 'boolean') { - throw new errors.TypeError('ERR_INVALID_OPT_VALUE', - 'graceful', - options.graceful); - } - if (options.errorCode !== undefined && - typeof options.errorCode !== 'number') { - throw new errors.TypeError('ERR_INVALID_OPT_VALUE', - 'errorCode', - options.errorCode); - } - if (options.lastStreamID !== undefined && - (typeof options.lastStreamID !== 'number' || - options.lastStreamID < 0)) { - throw new errors.TypeError('ERR_INVALID_OPT_VALUE', - 'lastStreamID', - options.lastStreamID); + const state = this[kState]; + state.flags |= SESSION_FLAGS_DESTROYED; + + // Clear timeout and remove timeout listeners + unenroll(this); + this.removeAllListeners('timeout'); + + // Destroy any pending and open streams + const cancel = new errors.Error('ERR_HTTP2_STREAM_CANCEL'); + if (error) { + cancel.cause = error; + if (typeof error.message === 'string') + cancel.message += ` (caused by: ${error.message})`; } + state.pendingStreams.forEach((stream) => stream.destroy(cancel)); + state.streams.forEach((stream) => stream.destroy(error)); + + // Disassociate from the socket and server + const socket = this[kSocket]; + const handle = this[kHandle]; - debug(`Http2Session ${sessionName(type)}: initiating shutdown`); - state.shuttingDown = true; + // Destroy the handle if it exists at this point + if (handle !== undefined) + handle.destroy(code, socket.destroyed); - if (callback) { - this.on('shutdown', callback); + // If there is no error, use setImmediate to destroy the socket on the + // next iteration of the event loop in order to give data time to transmit. + // Otherwise, destroy immediately. + if (!socket.destroyed) { + if (!error) { + setImmediate(socket.end.bind(socket)); + } else { + socket.destroy(error); + } } - const shutdownFn = submitShutdown.bind(this, options); - if (state.connecting) { - this.once('connect', shutdownFn); + this[kProxySocket] = undefined; + this[kSocket] = undefined; + this[kHandle] = undefined; + socket[kSession] = undefined; + socket[kServer] = undefined; + + // Finally, emit the close and error events (if necessary) on next tick. + process.nextTick(emitClose, this, error); + } + + // Closing the session will: + // 1. Send a goaway frame + // 2. Mark the session as closed + // 3. Prevent new inbound or outbound streams from being opened + // 4. Optionally register a 'close' event handler + // 5. Will cause the session to automatically destroy after the + // last currently open Http2Stream closes. + // + // Close always assumes a good, non-error shutdown (NGHTTP_NO_ERROR) + // + // If the session has not connected yet, the closed flag will still be + // set but the goaway will not be sent until after the connect event + // is emitted. + close(callback) { + if (this.closed || this.destroyed) return; + debug(`Http2Session ${sessionName(this[kType])}: marking session closed`); + this[kState].flags |= SESSION_FLAGS_CLOSED; + if (typeof callback === 'function') + this.once('close', callback); + this.goaway(); + this[kMaybeDestroy](); + } + + // Destroy the session if: + // * error is not undefined/null + // * session is closed and there are no more pending or open streams + [kMaybeDestroy](error) { + if (error == null) { + const state = this[kState]; + // Do not destroy if we're not closed and there are pending/open streams + if (!this.closed || + state.streams.size > 0 || + state.pendingStreams.size > 0) { + return; + } } - - debug(`Http2Session ${sessionName(type)}: sending shutdown`); - shutdownFn(); + this.destroy(error); } _onTimeout() { + // If the session is destroyed, this should never actually be invoked, + // but just in case... + if (this.destroyed) + return; // This checks whether a write is currently in progress and also whether // that write is actually sending data across the write. The kHandle // stored `chunksSentSinceLastWrite` is only updated when a timeout event @@ -995,10 +1224,25 @@ class Http2Session extends EventEmitter { } } - process.nextTick(emit, this, 'timeout'); + this.emit('timeout'); + } + + ref() { + if (this[kSocket]) { + this[kSocket].ref(); + } + } + + unref() { + if (this[kSocket]) { + this[kSocket].unref(); + } } } +// ServerHttp2Session instances should never have to wait for the socket +// to connect as they are always created after the socket has already been +// established. class ServerHttp2Session extends Http2Session { constructor(options, socket, server) { super(NGHTTP2_SESSION_SERVER, options, socket); @@ -1008,8 +1252,60 @@ class ServerHttp2Session extends Http2Session { get server() { return this[kServer]; } + + // Submits an altsvc frame to be sent to the client. `stream` is a + // numeric Stream ID. origin is a URL string that will be used to get + // the origin. alt is a string containing the altsvc details. No fancy + // API is provided for that. + altsvc(alt, originOrStream) { + if (this.destroyed) + throw new errors.Error('ERR_HTTP2_INVALID_SESSION'); + + let stream = 0; + let origin; + + if (typeof originOrStream === 'string') { + origin = (new URL(originOrStream)).origin; + if (origin === 'null') + throw new errors.TypeError('ERR_HTTP2_ALTSVC_INVALID_ORIGIN'); + } else if (typeof originOrStream === 'number') { + if (originOrStream >>> 0 !== originOrStream || originOrStream === 0) + throw new errors.RangeError('ERR_OUT_OF_RANGE', 'originOrStream'); + stream = originOrStream; + } else if (originOrStream !== undefined) { + // Allow origin to be passed a URL or object with origin property + if (originOrStream !== null && typeof originOrStream === 'object') + origin = originOrStream.origin; + // Note: if originOrStream is an object with an origin property other + // than a URL, then it is possible that origin will be malformed. + // We do not verify that here. Users who go that route need to + // ensure they are doing the right thing or the payload data will + // be invalid. + if (typeof origin !== 'string') { + throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'originOrStream', + ['string', 'number', 'URL', 'object']); + } else if (origin === 'null' || origin.length === 0) { + throw new errors.TypeError('ERR_HTTP2_ALTSVC_INVALID_ORIGIN'); + } + } + + if (typeof alt !== 'string') + throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'alt', 'string'); + if (!kQuotedString.test(alt)) + throw new errors.TypeError('ERR_INVALID_CHAR', 'alt'); + + // Max length permitted for ALTSVC + if ((alt.length + (origin !== undefined ? origin.length : 0)) > 16382) + throw new errors.TypeError('ERR_HTTP2_ALTSVC_LENGTH'); + + this[kHandle].altsvc(stream, origin || '', alt); + } } +// ClientHttp2Session instances have to wait for the socket to connect after +// they have been created. Various operations such as request() may be used, +// but the actual protocol communication will only occur after the socket +// has been connected. class ClientHttp2Session extends Http2Session { constructor(options, socket) { super(NGHTTP2_SESSION_CLIENT, options, socket); @@ -1018,11 +1314,14 @@ class ClientHttp2Session extends Http2Session { // Submits a new HTTP2 request to the connected peer. Returns the // associated Http2Stream instance. request(headers, options) { - const state = this[kState]; - if (state.destroyed || state.destroying) - throw new errors.Error('ERR_HTTP2_INVALID_SESSION'); debug(`Http2Session ${sessionName(this[kType])}: initiating request`); + if (this.destroyed) + throw new errors.Error('ERR_HTTP2_INVALID_SESSION'); + + if (this.closed) + throw new errors.Error('ERR_HTTP2_GOAWAY_SESSION'); + this[kUpdateTimer](); assertIsObject(headers, 'headers'); @@ -1072,15 +1371,20 @@ class ClientHttp2Session extends Http2Session { options.getTrailers); } + const headersList = mapToHeaders(headers); + if (!Array.isArray(headersList)) + throw headersList; + const stream = new ClientHttp2Stream(this, undefined, undefined, {}); + stream[kSentHeaders] = headers; // Close the writable side of the stream if options.endStream is set. if (options.endStream) stream.end(); - const onConnect = requestOnConnect.bind(stream, headers, options); - if (state.connecting) { - stream.on('connect', onConnect); + const onConnect = requestOnConnect.bind(stream, headersList, options); + if (this.connecting) { + this.on('connect', onConnect); } else { onConnect(); } @@ -1128,63 +1432,38 @@ function afterDoStreamWrite(status, handle, req) { if (session !== undefined) session[kState].writeQueueSize -= bytes; - if (typeof req.callback === 'function') - req.callback(); + req.callback(null); req.handle = undefined; } -function onHandleFinish() { - if (this[kID] === undefined) { - this.once('ready', onHandleFinish); - } else { - const handle = this[kHandle]; - if (handle !== undefined) { - const req = new ShutdownWrap(); - req.oncomplete = () => {}; - req.handle = handle; - handle.shutdown(req); - } - } -} - -function onSessionClose(hadError, code) { - abort(this); - this.push(null); // Close the readable side - this.end(); // Close the writable side -} - function streamOnResume() { - if (this[kID] === undefined) { - this.once('ready', streamOnResume); - return; - } - this[kHandle].readStart(); + if (!this.destroyed && !this.pending) + this[kHandle].readStart(); } function streamOnPause() { - this[kHandle].readStop(); -} - -function handleFlushData(handle) { - handle.flushData(); -} - -function streamOnSessionConnect() { - const session = this[kSession]; - debug(`Http2Session ${sessionName(session[kType])}: session connected`); - this[kState].connecting = false; - process.nextTick(emit, this, 'connect'); + if (!this.destroyed && !this.pending) + this[kHandle].readStop(); } +// If the writable side of the Http2Stream is still open, emit the +// 'aborted' event and set the aborted flag. function abort(stream) { - if (!stream[kState].aborted && + if (!stream.aborted && !(stream._writableState.ended || stream._writableState.ending)) { + stream[kState].flags |= STREAM_FLAGS_ABORTED; stream.emit('aborted'); - stream[kState].aborted = true; } } +function afterShutdown() { + this.callback(); + const stream = this.handle[kOwner]; + if (stream) + stream[kMaybeDestroy](); +} + // An Http2Stream is a Duplex stream that is backed by a // node::http2::Http2Stream handle implementing StreamBase. class Http2Stream extends Duplex { @@ -1193,37 +1472,40 @@ class Http2Stream extends Duplex { options.decodeStrings = false; super(options); this[async_id_symbol] = -1; + + // Corking the stream automatically allows writes to happen + // but ensures that those are buffered until the handle has + // been assigned. this.cork(); this[kSession] = session; + session[kState].pendingStreams.add(this); - const state = this[kState] = { - rst: false, + this[kState] = { + flags: STREAM_FLAGS_PENDING, rstCode: NGHTTP2_NO_ERROR, - headersSent: false, - headRequest: false, - aborted: false, - closeHandler: onSessionClose.bind(this), writeQueueSize: 0 }; - this.once('finish', onHandleFinish); this.on('resume', streamOnResume); this.on('pause', streamOnPause); - session.once('close', state.closeHandler); - - if (session[kState].connecting) { - state.connecting = true; - session.once('connect', streamOnSessionConnect.bind(this)); - } } [kUpdateTimer]() { + if (this.destroyed) + return; _unrefActive(this); if (this[kSession]) _unrefActive(this[kSession]); } [kInit](id, handle) { + const state = this[kState]; + state.flags |= STREAM_FLAGS_READY; + + const session = this[kSession]; + session[kState].pendingStreams.delete(this); + session[kState].streams.set(id, this); + this[kID] = id; this[async_id_symbol] = handle.getAsyncId(); handle[kOwner] = this; @@ -1237,7 +1519,9 @@ class Http2Stream extends Duplex { [kInspect](depth, opts) { const obj = { - id: this[kID], + id: this[kID] || '', + closed: this.closed, + destroyed: this.destroyed, state: this.state, readableState: this._readableState, writableState: this._writableState @@ -1245,6 +1529,22 @@ class Http2Stream extends Duplex { return `Http2Stream ${util.format(obj)}`; } + get sentHeaders() { + return this[kSentHeaders]; + } + + get sentTrailers() { + return this[kSentTrailers]; + } + + get sentInfoHeaders() { + return this[kInfoHeaders]; + } + + get pending() { + return this[kID] === undefined; + } + // The id of the Http2Stream, will be undefined if the socket is not // yet connected. get id() { @@ -1257,6 +1557,8 @@ class Http2Stream extends Duplex { } _onTimeout() { + if (this.destroyed) + return; // This checks whether a write is currently in progress and also whether // that write is actually sending data across the write. The kHandle // stored `chunksSentSinceLastWrite` is only updated when a timeout event @@ -1273,22 +1575,27 @@ class Http2Stream extends Duplex { } } - process.nextTick(emit, this, 'timeout'); + this.emit('timeout'); + } + + // true if the HEADERS frame has been sent + get headersSent() { + return !!(this[kState].flags & STREAM_FLAGS_HEADERS_SENT); } - // true if the Http2Stream was aborted abornomally. + // true if the Http2Stream was aborted abnormally. get aborted() { - return this[kState].aborted; + return !!(this[kState].flags & STREAM_FLAGS_ABORTED); } // true if dealing with a HEAD request get headRequest() { - return this[kState].headRequest; + return !!(this[kState].flags & STREAM_FLAGS_HEAD_REQUEST); } // The error code reported when this Http2Stream was closed. get rstCode() { - return this[kState].rst ? this[kState].rstCode : undefined; + return this[kState].rstCode; } // State information for the Http2Stream @@ -1306,14 +1613,26 @@ class Http2Stream extends Duplex { } _write(data, encoding, cb) { - if (this[kID] === undefined) { + // When the Http2Stream is first created, it is corked until the + // handle and the stream ID is assigned. However, if the user calls + // uncork() before that happens, the Duplex will attempt to pass + // writes through. Those need to be queued up here. + if (this.pending) { this.once('ready', this._write.bind(this, data, encoding, cb)); return; } - this[kUpdateTimer](); + // If the stream has been destroyed, there's nothing else we can do + // because the handle has been destroyed. This should only be an + // issue if a write occurs before the 'ready' event in the case where + // the duplex is uncorked before the stream is ready to go. In that + // case, drop the data on the floor. An error should have already been + // emitted. + if (this.destroyed) + return; - if (!this[kState].headersSent) + this[kUpdateTimer](); + if (!this.headersSent) this[kProceed](); const handle = this[kHandle]; @@ -1325,19 +1644,32 @@ class Http2Stream extends Duplex { req.async = false; const err = createWriteReq(req, handle, data, encoding); if (err) - throw util._errnoException(err, 'write', req.error); + return this.destroy(util._errnoException(err, 'write', req.error), cb); trackWriteState(this, req.bytes); } _writev(data, cb) { - if (this[kID] === undefined) { + // When the Http2Stream is first created, it is corked until the + // handle and the stream ID is assigned. However, if the user calls + // uncork() before that happens, the Duplex will attempt to pass + // writes through. Those need to be queued up here. + if (this.pending) { this.once('ready', this._writev.bind(this, data, cb)); return; } + // If the stream has been destroyed, there's nothing else we can do + // because the handle has been destroyed. This should only be an + // issue if a write occurs before the 'ready' event in the case where + // the duplex is uncorked before the stream is ready to go. In that + // case, drop the data on the floor. An error should have already been + // emitted. + if (this.destroyed) + return; + this[kUpdateTimer](); - if (!this[kState].headersSent) + if (!this.headersSent) this[kProceed](); const handle = this[kHandle]; @@ -1355,56 +1687,37 @@ class Http2Stream extends Duplex { } const err = handle.writev(req, chunks); if (err) - throw util._errnoException(err, 'write', req.error); + return this.destroy(util._errnoException(err, 'write', req.error), cb); trackWriteState(this, req.bytes); } + _final(cb) { + const handle = this[kHandle]; + if (this[kID] === undefined) { + this.once('ready', () => this._final(cb)); + } else if (handle !== undefined) { + debug(`Http2Stream ${this[kID]} [Http2Session ` + + `${sessionName(this[kSession][kType])}]: _final shutting down`); + const req = new ShutdownWrap(); + req.oncomplete = afterShutdown; + req.callback = cb; + req.handle = handle; + handle.shutdown(req); + } else { + cb(); + } + } + _read(nread) { if (this.destroyed) { this.push(null); return; } - if (this[kHandle] !== undefined) - process.nextTick(handleFlushData, this[kHandle]); - } - - // Submits an RST-STREAM frame to shutdown this stream. - // If the stream ID has not yet been allocated, the action will - // defer until the ready event is emitted. - // After sending the rstStream, this.destroy() will be called making - // the stream object no longer usable. - rstStream(code = NGHTTP2_NO_ERROR) { - if (typeof code !== 'number') - throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'code', 'number'); - if (code < 0 || code > kMaxInt) - throw new errors.RangeError('ERR_OUT_OF_RANGE', 'code'); - - const rstStreamFn = submitRstStream.bind(this, code); - if (this[kID] === undefined) { - this.once('ready', rstStreamFn); - return; + if (!this.pending) { + streamOnResume.call(this); + } else { + this.once('ready', streamOnResume); } - rstStreamFn(); - } - - rstWithNoError() { - this.rstStream(NGHTTP2_NO_ERROR); - } - - rstWithProtocolError() { - this.rstStream(NGHTTP2_PROTOCOL_ERROR); - } - - rstWithCancel() { - this.rstStream(NGHTTP2_CANCEL); - } - - rstWithRefuse() { - this.rstStream(NGHTTP2_REFUSED_STREAM); - } - - rstWithInternalError() { - this.rstStream(NGHTTP2_INTERNAL_ERROR); } priority(options) { @@ -1416,86 +1729,148 @@ class Http2Stream extends Duplex { validatePriorityOptions(options); const priorityFn = submitPriority.bind(this, options); - if (this[kID] === undefined) { + + // If the handle has not yet been assigned, queue up the priority + // frame to be sent as soon as the ready event is emitted. + if (this.pending) { this.once('ready', priorityFn); return; } priorityFn(); } + get closed() { + return !!(this[kState].flags & STREAM_FLAGS_CLOSED); + } + + // Close initiates closing the Http2Stream instance by sending an RST_STREAM + // frame to the connected peer. The readable and writable sides of the + // Http2Stream duplex are closed and the timeout timer is unenrolled. If + // a callback is passed, it is registered to listen for the 'close' event. + // + // If the handle and stream ID have not been assigned yet, the close + // will be queued up to wait for the ready event. As soon as the stream ID + // is determined, the close will proceed. + // + // Submitting the RST_STREAM frame to the underlying handle will cause + // the Http2Stream to be closed and ultimately destroyed. After calling + // close, it is still possible to queue up PRIORITY and RST_STREAM frames, + // but no DATA and HEADERS frames may be sent. + close(code = NGHTTP2_NO_ERROR, callback) { + if (typeof code !== 'number') + throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'code', 'number'); + if (code < 0 || code > kMaxInt) + throw new errors.RangeError('ERR_OUT_OF_RANGE', 'code'); + if (callback !== undefined && typeof callback !== 'function') + throw new errors.TypeError('ERR_INVALID_CALLBACK'); + + // Unenroll the timeout. + unenroll(this); + this.removeAllListeners('timeout'); + + // Close the writable + abort(this); + this.end(); + + if (this.closed) + return; + + const state = this[kState]; + state.flags |= STREAM_FLAGS_CLOSED; + state.rstCode = code; + + if (callback !== undefined) { + this.once('close', callback); + } + + if (this[kHandle] === undefined) + return; + + const rstStreamFn = submitRstStream.bind(this, code); + // If the handle has not yet been assigned, queue up the request to + // ensure that the RST_STREAM frame is sent after the stream ID has + // been determined. + if (this.pending) { + this.push(null); + this.once('ready', rstStreamFn); + return; + } + rstStreamFn(); + } + // Called by this.destroy(). - // * If called before the stream is allocated, will defer until the - // ready event is emitted. // * Will submit an RST stream to shutdown the stream if necessary. // This will cause the internal resources to be released. // * Then cleans up the resources on the js side _destroy(err, callback) { const session = this[kSession]; - if (this[kID] === undefined) { - this.once('ready', this._destroy.bind(this, err, callback)); - return; - } + const handle = this[kHandle]; + const id = this[kID]; - debug(`Http2Stream ${this[kID]} [Http2Session ` + + debug(`Http2Stream ${this[kID] || ''} [Http2Session ` + `${sessionName(session[kType])}]: destroying stream`); - const state = this[kState]; - session[kState].writeQueueSize -= state.writeQueueSize; - state.writeQueueSize = 0; - - const server = session[kServer]; - if (server !== undefined && err) { - server.emit('streamError', err, this); + const code = state.rstCode = + err != null ? + NGHTTP2_INTERNAL_ERROR : + state.rstCode || NGHTTP2_NO_ERROR; + if (handle !== undefined) { + // If the handle exists, we need to close, then destroy the handle + this.close(code); + if (!this._readableState.ended && !this._readableState.ending) + this.push(null); + handle.destroy(); + session[kState].streams.delete(id); + } else { + unenroll(this); + this.removeAllListeners('timeout'); + state.flags |= STREAM_FLAGS_CLOSED; + abort(this); + this.end(); + this.push(null); + session[kState].pendingStreams.delete(this); } - process.nextTick(continueStreamDestroy.bind(this), err, callback); - } -} - -function continueStreamDestroy(err, callback) { - const session = this[kSession]; - const state = this[kState]; - - // Submit RST-STREAM frame if one hasn't been sent already and the - // stream hasn't closed normally... - const rst = state.rst; - let code = state.rstCode; - if (!rst && !session.destroyed) { - code = err instanceof Error ? NGHTTP2_INTERNAL_ERROR : NGHTTP2_NO_ERROR; - this.rstStream(code); - } + // Adjust the write queue size for accounting + session[kState].writeQueueSize -= state.writeQueueSize; + state.writeQueueSize = 0; - // Remove the close handler on the session - session.removeListener('close', state.closeHandler); + // RST code 8 not emitted as an error as its used by clients to signify + // abort and is already covered by aborted event, also allows more + // seamless compatibility with http1 + if (err == null && code !== NGHTTP2_NO_ERROR && code !== NGHTTP2_CANCEL) + err = new errors.Error('ERR_HTTP2_STREAM_ERROR', code); - // Unenroll the timer - this.setTimeout(0); + this[kSession] = undefined; + this[kHandle] = undefined; - setImmediate(finishStreamDestroy.bind(this)); + // This notifies the session that this stream has been destroyed and + // gives the session the opportunity to clean itself up. The session + // will destroy if it has been closed and there are no other open or + // pending streams. + session[kMaybeDestroy](); + process.nextTick(emit, this, 'close', code); + callback(err); - // RST code 8 not emitted as an error as its used by clients to signify - // abort and is already covered by aborted event, also allows more - // seamless compatibility with http1 - if (code !== NGHTTP2_NO_ERROR && code !== NGHTTP2_CANCEL && !err) { - err = new errors.Error('ERR_HTTP2_STREAM_ERROR', code); } - callback(err); - abort(this); - this.push(null); // Close the readable side - this.end(); // Close the writable side - process.nextTick(emit, this, 'close', code); -} + // The Http2Stream can be destroyed if it has closed and if the readable + // side has received the final chunk. + [kMaybeDestroy](error, code = NGHTTP2_NO_ERROR) { + if (error || code !== NGHTTP2_NO_ERROR) { + this.destroy(error); + return; + } -function finishStreamDestroy() { - const id = this[kID]; - this[kSession][kState].streams.delete(id); - this[kSession] = undefined; - const handle = this[kHandle]; - if (handle !== undefined) { - this[kHandle] = undefined; - handle.destroy(); + // TODO(mcollina): remove usage of _*State properties + if (this._readableState.ended && + this._writableState.ended && + this._writableState.pendingcb === 0 && + this.closed) { + this.destroy(); + // This should return, but eslint complains. + // return + } } - this.emit('destroy'); } function processHeaders(headers) { @@ -1509,7 +1884,7 @@ function processHeaders(headers) { // This is intentionally stricter than the HTTP/1 implementation, which // allows values between 100 and 999 (inclusive) in order to allow for // backwards compatibility with non-spec compliant code. With HTTP/2, - // we have the opportunity to start fresh with stricter spec copmliance. + // we have the opportunity to start fresh with stricter spec compliance. // This will have an impact on the compatibility layer for anyone using // non-standard, non-compliant status codes. if (statusCode < 200 || statusCode > 599) @@ -1519,36 +1894,48 @@ function processHeaders(headers) { return headers; } -function processRespondWithFD(fd, headers, offset = 0, length = -1, +function processRespondWithFD(self, fd, headers, offset = 0, length = -1, streamOptions = 0) { - const state = this[kState]; - state.headersSent = true; + const state = self[kState]; + state.flags |= STREAM_FLAGS_HEADERS_SENT; + + const headersList = mapToHeaders(headers, assertValidPseudoHeaderResponse); + self[kSentHeaders] = headers; + if (!Array.isArray(headersList)) { + self.destroy(headersList); + return; + } + // Close the writable side of the stream - this.end(); + self.end(); - const ret = this[kHandle].respondFD(fd, headers, + const ret = self[kHandle].respondFD(fd, headersList, offset, length, streamOptions); if (ret < 0) { - const err = new NghttpError(ret); - process.nextTick(emit, this, 'error', err); + self.destroy(new NghttpError(ret)); return; } // exact length of the file doesn't matter here, since the - // stream is closing anyway — just use 1 to signify that + // stream is closing anyway - just use 1 to signify that // a write does exist - trackWriteState(this, 1); + trackWriteState(self, 1); } function doSendFD(session, options, fd, headers, streamOptions, err, stat) { - if (this.destroyed || session.destroyed) { - abort(this); + if (err) { + this.destroy(err); return; } - if (err) { - process.nextTick(emit, this, 'error', err); + + // This can happen if the stream is destroyed or closed while we are waiting + // for the file descriptor to be opened or the stat call to be completed. + // In either case, we do not want to continue because the we are shutting + // down and should not attempt to send any data. + if (this.destroyed || this.closed) { + this.destroy(new errors.Error('ERR_HTTP2_INVALID_STREAM')); return; } @@ -1557,47 +1944,47 @@ function doSendFD(session, options, fd, headers, streamOptions, err, stat) { length: options.length !== undefined ? options.length : -1 }; - if (typeof options.statCheck === 'function' && - options.statCheck.call(this, stat, headers, statOptions) === false) { - return; - } - - const headersList = mapToHeaders(headers, - assertValidPseudoHeaderResponse); - if (!Array.isArray(headersList)) { - process.nextTick(emit, this, 'error', headersList); + // options.statCheck is a user-provided function that can be used to + // verify stat values, override or set headers, or even cancel the + // response operation. If statCheck explicitly returns false, the + // response is canceled. The user code may also send a separate type + // of response so check again for the HEADERS_SENT flag + if ((typeof options.statCheck === 'function' && + options.statCheck.call(this, stat, headers, statOptions) === false) || + (this[kState].flags & STREAM_FLAGS_HEADERS_SENT)) { return; } - processRespondWithFD.call(this, fd, headersList, - statOptions.offset, - statOptions.length, - streamOptions); + processRespondWithFD(this, fd, headers, + statOptions.offset | 0, + statOptions.length | 0, + streamOptions); } function doSendFileFD(session, options, fd, headers, streamOptions, err, stat) { - if (this.destroyed || session.destroyed) { - abort(this); - return; - } const onError = options.onError; if (err) { - if (onError) { + tryClose(fd); + if (onError) onError(err); - } else { + else this.destroy(err); - } return; } if (!stat.isFile()) { - err = new errors.Error('ERR_HTTP2_SEND_FILE'); - if (onError) { + const err = new errors.Error('ERR_HTTP2_SEND_FILE'); + if (onError) onError(err); - } else { + else this.destroy(err); - } + return; + } + + if (this.destroyed || this.closed) { + tryClose(fd); + this.destroy(new errors.Error('ERR_HTTP2_INVALID_STREAM')); return; } @@ -1606,9 +1993,14 @@ function doSendFileFD(session, options, fd, headers, streamOptions, err, stat) { length: options.length !== undefined ? options.length : -1 }; - // Set the content-length by default - if (typeof options.statCheck === 'function' && - options.statCheck.call(this, stat, headers) === false) { + // options.statCheck is a user-provided function that can be used to + // verify stat values, override or set headers, or even cancel the + // response operation. If statCheck explicitly returns false, the + // response is canceled. The user code may also send a separate type + // of response so check again for the HEADERS_SENT flag + if ((typeof options.statCheck === 'function' && + options.statCheck.call(this, stat, headers) === false) || + (this[kState].flags & STREAM_FLAGS_HEADERS_SENT)) { return; } @@ -1617,35 +2009,27 @@ function doSendFileFD(session, options, fd, headers, streamOptions, err, stat) { Math.min(stat.size - (+statOptions.offset), statOptions.length); - if (headers[HTTP2_HEADER_CONTENT_LENGTH] === undefined) - headers[HTTP2_HEADER_CONTENT_LENGTH] = statOptions.length; - - const headersList = mapToHeaders(headers, - assertValidPseudoHeaderResponse); - if (!Array.isArray(headersList)) { - process.nextTick(emit, this, 'error', headersList); - return; - } + headers[HTTP2_HEADER_CONTENT_LENGTH] = statOptions.length; - processRespondWithFD.call(this, fd, headersList, - options.offset, - options.length, - streamOptions); + processRespondWithFD(this, fd, headers, + options.offset | 0, + statOptions.length | 0, + streamOptions); } function afterOpen(session, options, headers, streamOptions, err, fd) { const state = this[kState]; const onError = options.onError; - if (this.destroyed || session.destroyed) { - abort(this); - return; - } if (err) { - if (onError) { + if (onError) onError(err); - } else { + else this.destroy(err); - } + return; + } + if (this.destroyed || this.closed) { + tryClose(fd); + abort(this); return; } state.fd = fd; @@ -1658,8 +2042,6 @@ function afterOpen(session, options, headers, streamOptions, err, fd) { function streamOnError(err) { // we swallow the error for parity with HTTP1 // all the errors that ends here are not critical for the project - debug(`Http2Stream ${this[kID]} [Http2Session ` + - `${this[kSession][kType]}: error`, err); } @@ -1672,25 +2054,22 @@ class ServerHttp2Stream extends Http2Stream { this.on('error', streamOnError); } - // true if the HEADERS frame has been sent - get headersSent() { - return this[kState].headersSent; - } - // true if the remote peer accepts push streams get pushAllowed() { - return this[kSession].remoteSettings.enablePush; + return !this.destroyed && + !this.closed && + !this.session.closed && + !this.session.destroyed && + this[kSession].remoteSettings.enablePush; } // create a push stream, call the given callback with the created // Http2Stream for the push stream. pushStream(headers, options, callback) { - if (this.destroyed) - throw new errors.Error('ERR_HTTP2_INVALID_STREAM'); + if (!this.pushAllowed) + throw new errors.Error('ERR_HTTP2_PUSH_DISABLED'); const session = this[kSession]; - if (!session.remoteSettings.enablePush) - throw new errors.Error('ERR_HTTP2_PUSH_DISABLED'); debug(`Http2Stream ${this[kID]} [Http2Session ` + `${sessionName(session[kType])}]: initiating push stream`); @@ -1740,44 +2119,46 @@ class ServerHttp2Stream extends Http2Stream { err = new errors.Error('ERR_HTTP2_OUT_OF_STREAMS'); break; case NGHTTP2_ERR_STREAM_CLOSED: - err = new errors.Error('ERR_HTTP2_STREAM_CLOSED'); + err = new errors.Error('ERR_HTTP2_INVALID_STREAM'); break; default: err = new NghttpError(ret); break; } - process.nextTick(emit, this, 'error', err); + process.nextTick(callback, err); return; } const id = ret.id(); const stream = new ServerHttp2Stream(session, ret, id, options, headers); - session[kState].streams.set(id, stream); + stream[kSentHeaders] = headers; if (options.endStream) stream.end(); if (headRequest) - stream[kState].headRequest = true; + stream[kState].flags |= STREAM_FLAGS_HEAD_REQUEST; - process.nextTick(callback, stream, headers, 0); + process.nextTick(callback, null, stream, headers, 0); } // Initiate a response on this Http2Stream respond(headers, options) { - const session = this[kSession]; - if (this.destroyed) + if (this.destroyed || this.closed) throw new errors.Error('ERR_HTTP2_INVALID_STREAM'); - debug(`Http2Stream ${this[kID]} [Http2Session ` + - `${sessionName(session[kType])}]: initiating response`); - this[kUpdateTimer](); - const state = this[kState]; - - if (state.headersSent) + if (this.headersSent) throw new errors.Error('ERR_HTTP2_HEADERS_SENT'); + const state = this[kState]; + assertIsObject(options, 'options'); options = Object.assign({}, options); + + const session = this[kSession]; + debug(`Http2Stream ${this[kID]} [Http2Session ` + + `${sessionName(session[kType])}]: initiating response`); + this[kUpdateTimer](); + options.endStream = !!options.endStream; let streamOptions = 0; @@ -1803,25 +2184,24 @@ class ServerHttp2Stream extends Http2Stream { if (statusCode === HTTP_STATUS_NO_CONTENT || statusCode === HTTP_STATUS_RESET_CONTENT || statusCode === HTTP_STATUS_NOT_MODIFIED || - state.headRequest === true) { + this.headRequest === true) { options.endStream = true; } const headersList = mapToHeaders(headers, assertValidPseudoHeaderResponse); if (!Array.isArray(headersList)) throw headersList; + this[kSentHeaders] = headers; - state.headersSent = true; + state.flags |= STREAM_FLAGS_HEADERS_SENT; // Close the writable side if the endStream option is set if (options.endStream) this.end(); const ret = this[kHandle].respond(headersList, streamOptions); - if (ret < 0) { - const err = new NghttpError(ret); - process.nextTick(emit, this, 'error', err); - } + if (ret < 0) + this.destroy(new NghttpError(ret)); } // Initiate a response using an open FD. Note that there are fewer @@ -1831,19 +2211,15 @@ class ServerHttp2Stream extends Http2Stream { // mechanism is not able to read from the fd, then the stream will be // reset with an error code. respondWithFD(fd, headers, options) { - const session = this[kSession]; - if (this.destroyed) + if (this.destroyed || this.closed) throw new errors.Error('ERR_HTTP2_INVALID_STREAM'); - debug(`Http2Stream ${this[kID]} [Http2Session ` + - `${sessionName(session[kType])}]: initiating response`); - this[kUpdateTimer](); - const state = this[kState]; - - if (state.headersSent) + if (this.headersSent) throw new errors.Error('ERR_HTTP2_HEADERS_SENT'); + const session = this[kSession]; + assertIsObject(options, 'options'); - options = Object.assign(Object.create(null), options); + options = Object.assign({}, options); if (options.offset !== undefined && typeof options.offset !== 'number') throw new errors.TypeError('ERR_INVALID_OPT_VALUE', @@ -1870,13 +2246,17 @@ class ServerHttp2Stream extends Http2Stream { options.getTrailers); } streamOptions |= STREAM_OPTION_GET_TRAILERS; - state.getTrailers = options.getTrailers; + this[kState].getTrailers = options.getTrailers; } if (typeof fd !== 'number') throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'fd', 'number'); + debug(`Http2Stream ${this[kID]} [Http2Session ` + + `${sessionName(session[kType])}]: initiating response`); + this[kUpdateTimer](); + headers = processHeaders(headers); const statusCode = headers[HTTP2_HEADER_STATUS] |= 0; // Payload/DATA frames are not permitted in these cases @@ -1893,17 +2273,10 @@ class ServerHttp2Stream extends Http2Stream { return; } - const headersList = mapToHeaders(headers, - assertValidPseudoHeaderResponse); - if (!Array.isArray(headersList)) { - process.nextTick(emit, this, 'error', headersList); - return; - } - - processRespondWithFD.call(this, fd, headersList, - options.offset, - options.length, - streamOptions); + processRespondWithFD(this, fd, headers, + options.offset, + options.length, + streamOptions); } // Initiate a file response on this Http2Stream. The path is passed to @@ -1914,19 +2287,13 @@ class ServerHttp2Stream extends Http2Stream { // headers. If statCheck returns false, the operation is aborted and no // file details are sent. respondWithFile(path, headers, options) { - const session = this[kSession]; - if (this.destroyed) + if (this.destroyed || this.closed) throw new errors.Error('ERR_HTTP2_INVALID_STREAM'); - debug(`Http2Stream ${this[kID]} [Http2Session ` + - `${sessionName(session[kType])}]: initiating response`); - this[kUpdateTimer](); - const state = this[kState]; - - if (state.headersSent) + if (this.headersSent) throw new errors.Error('ERR_HTTP2_HEADERS_SENT'); assertIsObject(options, 'options'); - options = Object.assign(Object.create(null), options); + options = Object.assign({}, options); if (options.offset !== undefined && typeof options.offset !== 'number') throw new errors.TypeError('ERR_INVALID_OPT_VALUE', @@ -1953,9 +2320,15 @@ class ServerHttp2Stream extends Http2Stream { options.getTrailers); } streamOptions |= STREAM_OPTION_GET_TRAILERS; - state.getTrailers = options.getTrailers; + this[kState].getTrailers = options.getTrailers; } + const session = this[kSession]; + debug(`Http2Stream ${this[kID]} [Http2Session ` + + `${sessionName(session[kType])}]: initiating response`); + this[kUpdateTimer](); + + headers = processHeaders(headers); const statusCode = headers[HTTP2_HEADER_STATUS] |= 0; // Payload/DATA frames are not permitted in these cases @@ -1977,18 +2350,18 @@ class ServerHttp2Stream extends Http2Stream { // a 1xx informational code and it MUST be sent before the request/response // headers are sent, or an error will be thrown. additionalHeaders(headers) { - if (this.destroyed) + if (this.destroyed || this.closed) throw new errors.Error('ERR_HTTP2_INVALID_STREAM'); - - if (this[kState].headersSent) + if (this.headersSent) throw new errors.Error('ERR_HTTP2_HEADERS_AFTER_RESPOND'); + assertIsObject(headers, 'headers'); + headers = Object.assign(Object.create(null), headers); + const session = this[kSession]; debug(`Http2Stream ${this[kID]} [Http2Session ` + - `${sessionName(session[kType])}]: sending additional headers`); + `${sessionName(session[kType])}]: sending additional headers`); - assertIsObject(headers, 'headers'); - headers = Object.assign(Object.create(null), headers); if (headers[HTTP2_HEADER_STATUS] != null) { const statusCode = headers[HTTP2_HEADER_STATUS] |= 0; if (statusCode === HTTP_STATUS_SWITCHING_PROTOCOLS) @@ -2001,17 +2374,17 @@ class ServerHttp2Stream extends Http2Stream { this[kUpdateTimer](); - const headersList = mapToHeaders(headers, - assertValidPseudoHeaderResponse); - if (!Array.isArray(headersList)) { + const headersList = mapToHeaders(headers, assertValidPseudoHeaderResponse); + if (!Array.isArray(headersList)) throw headersList; - } + if (!this[kInfoHeaders]) + this[kInfoHeaders] = [headers]; + else + this[kInfoHeaders].push(headers); const ret = this[kHandle].info(headersList); - if (ret < 0) { - const err = new NghttpError(ret); - process.nextTick(emit, this, 'error', err); - } + if (ret < 0) + this.destroy(new NghttpError(ret)); } } @@ -2020,7 +2393,7 @@ ServerHttp2Stream.prototype[kProceed] = ServerHttp2Stream.prototype.respond; class ClientHttp2Stream extends Http2Stream { constructor(session, handle, id, options) { super(session, options); - this[kState].headersSent = true; + this[kState].flags |= STREAM_FLAGS_HEADERS_SENT; if (id !== undefined) this[kInit](id, handle); this.on('headers', handleHeaderContinue); @@ -2028,9 +2401,8 @@ class ClientHttp2Stream extends Http2Stream { } function handleHeaderContinue(headers) { - if (headers[HTTP2_HEADER_STATUS] === HTTP_STATUS_CONTINUE) { + if (headers[HTTP2_HEADER_STATUS] === HTTP_STATUS_CONTINUE) this.emit('continue'); - } } const setTimeout = { @@ -2038,6 +2410,8 @@ const setTimeout = { enumerable: true, writable: true, value: function(msecs, callback) { + if (this.destroyed) + return; if (typeof msecs !== 'number') { throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'msecs', @@ -2065,91 +2439,44 @@ const setTimeout = { Object.defineProperty(Http2Stream.prototype, 'setTimeout', setTimeout); Object.defineProperty(Http2Session.prototype, 'setTimeout', setTimeout); -// -------------------------------------------------------------------- - -// Set as a replacement for socket.prototype.destroy upon the -// establishment of a new connection. -function socketDestroy(error) { - const session = this[kSession]; - const type = session[kType]; - debug(`Http2Session ${sessionName(type)}: socket destroy called`); - delete this[kServer]; - // destroy the session first so that it will stop trying to - // send data while we close the socket. - session.destroy(); - this.destroy = this[kDestroySocket]; - this.destroy(error); -} - -// When an Http2Session emits an error, first try to forward it to the -// server as a sessionError; failing that, forward it to the socket as -// a sessionError; failing that, destroy, remove the error listener, and -// re-emit the error event -function sessionOnError(error) { - debug(`Http2Session ${sessionName(this[kType])}: session error: ` + - `${error.message}`); - if (this[kServer] !== undefined && this[kServer].emit('sessionError', error)) - return; - if (this[kSocket] !== undefined && this[kSocket].emit('sessionError', error)) - return; - this.destroy(); - this.removeListener('error', sessionOnError); - this.emit('error', error); -} -// When a Socket emits an error, forward it to the session as a -// socketError; failing that, remove the listener and call destroy +// When the socket emits an error, destroy the associated Http2Session and +// foward it the same error. function socketOnError(error) { const session = this[kSession]; - const type = session && session[kType]; - debug(`Http2Session ${sessionName(type)}: socket error: ${error.message}`); - if (kRenegTest.test(error.message)) - return this.destroy(); - if (session !== undefined && - session.emit('socketError', error, this)) - return; - this.removeListener('error', socketOnError); - this.destroy(error); + if (session !== undefined) { + debug(`Http2Session ${sessionName(session[kType])}: socket error [` + + `${error.message}]`); + session.destroy(error); + } } // Handles the on('stream') event for a session and forwards // it on to the server object. function sessionOnStream(stream, headers, flags, rawHeaders) { - this[kServer].emit('stream', stream, headers, flags, rawHeaders); + if (this[kServer] !== undefined) + this[kServer].emit('stream', stream, headers, flags, rawHeaders); } function sessionOnPriority(stream, parent, weight, exclusive) { - debug(`Http2Session ${sessionName(this[kType])}: priority change received`); - this[kServer].emit('priority', stream, parent, weight, exclusive); + if (this[kServer] !== undefined) + this[kServer].emit('priority', stream, parent, weight, exclusive); } -function sessionOnSocketError(error, socket) { - if (this.listenerCount('socketError') <= 1 && this[kServer] !== undefined) - this[kServer].emit('socketError', error, socket, this); +function sessionOnError(error) { + if (this[kServer]) + this[kServer].emit('sessionError', error, this); } -// When the session times out on the server, attempt a graceful shutdown +// When the session times out on the server, try emitting a timeout event. +// If no handler is registered, destroy the session. function sessionOnTimeout() { - process.nextTick(() => { - const state = this[kState]; - // if destroyed or destryoing, do nothing - if (state.destroyed || state.destroying) - return; - const server = this[kServer]; - const socket = this[kSocket]; - // If server or socket are undefined, then we're already in the process of - // shutting down, do nothing. - if (server === undefined || socket === undefined) - return; - if (!server.emit('timeout', this)) { - this.shutdown( - { - graceful: true, - errorCode: NGHTTP2_NO_ERROR - }, - socket.destroy.bind(socket)); - } - }); + // if destroyed or closed already, do nothing + if (this.destroyed || this.closed) + return; + const server = this[kServer]; + if (!server.emit('timeout', this)) + this.destroy(); // No error code, just things down. } function connectionListener(socket) { @@ -2161,10 +2488,18 @@ function connectionListener(socket) { if (options.allowHTTP1 === true) return httpConnectionListener.call(this, socket); // Let event handler deal with the socket - if (this.emit('unknownProtocol', socket)) - return; - // Reject non-HTTP/2 client - return socket.destroy(); + debug(`Unknown protocol from ${socket.remoteAddress}:${socket.remotePort}`); + if (!this.emit('unknownProtocol', socket)) { + // We don't know what to do, so let's just tell the other side what's + // going on in a format that they *might* understand. + socket.end('HTTP/1.0 403 Forbidden\r\n' + + 'Content-Type: text/plain\r\n\r\n' + + 'Unknown ALPN Protocol, expected `h2` to be available.\n' + + 'If this is a HTTP request: The server was not ' + + 'configured with the `allowHTTP1` option or a ' + + 'listener for the `unknownProtocol` event.\n'); + } + return; } socket.on('error', socketOnError); @@ -2173,27 +2508,24 @@ function connectionListener(socket) { // Set up the Session const session = new ServerHttp2Session(options, socket, this); - session.on('error', sessionOnError); session.on('stream', sessionOnStream); session.on('priority', sessionOnPriority); - session.on('socketError', sessionOnSocketError); + session.on('error', sessionOnError); - if (this.timeout) { - session.setTimeout(this.timeout); - session.on('timeout', sessionOnTimeout); - } + if (this.timeout) + session.setTimeout(this.timeout, sessionOnTimeout); socket[kServer] = this; - process.nextTick(emit, this, 'session', session); + this.emit('session', session); } function initializeOptions(options) { assertIsObject(options, 'options'); - options = Object.assign(Object.create(null), options); + options = Object.assign({}, options); options.allowHalfOpen = true; assertIsObject(options.settings, 'options.settings'); - options.settings = Object.assign(Object.create(null), options.settings); + options.settings = Object.assign({}, options.settings); return options; } @@ -2207,9 +2539,9 @@ function initializeTLSOptions(options, servername) { return options; } -function onErrorSecureServerSession(err, conn) { - if (!this.emit('clientError', err, conn)) - conn.destroy(err); +function onErrorSecureServerSession(err, socket) { + if (!this.emit('clientError', err, socket)) + socket.destroy(err); } class Http2SecureServer extends TLSServer { @@ -2265,25 +2597,18 @@ function setupCompat(ev) { function socketOnClose() { const session = this[kSession]; - if (session !== undefined && !session.destroyed) { - // Skip unconsume because the socket is destroyed. - session[kState].skipUnconsume = true; - session.destroy(); + if (session !== undefined) { + debug(`Http2Session ${sessionName(session[kType])}: socket closed`); + const err = session.connecting ? + new errors.Error('ERR_SOCKET_CLOSED') : null; + const state = session[kState]; + state.streams.forEach((stream) => stream.close(NGHTTP2_CANCEL)); + state.pendingStreams.forEach((stream) => stream.close(NGHTTP2_CANCEL)); + session.close(); + session[kMaybeDestroy](err); } } -// If the session emits an error, forward it to the socket as a sessionError; -// failing that, destroy the session, remove the listener and re-emit the error -function clientSessionOnError(error) { - debug(`Http2Session ${sessionName(this[kType])}]: session error: ` + - `${error.message}`); - if (this[kSocket] !== undefined && this[kSocket].emit('sessionError', error)) - return; - this.destroy(); - this.removeListener('error', socketOnError); - this.removeListener('error', clientSessionOnError); -} - function connect(authority, options, listener) { if (typeof options === 'function') { listener = options; @@ -2291,7 +2616,7 @@ function connect(authority, options, listener) { } assertIsObject(options, 'options'); - options = Object.assign(Object.create(null), options); + options = Object.assign({}, options); if (typeof authority === 'string') authority = new URL(authority); @@ -2324,8 +2649,6 @@ function connect(authority, options, listener) { const session = new ClientHttp2Session(options, socket); - session.on('error', clientSessionOnError); - session[kAuthority] = `${options.servername || host}:${port}`; session[kProtocol] = protocol; @@ -2346,19 +2669,16 @@ Object.defineProperty(connect, promisify.custom, { }); function createSecureServer(options, handler) { - if (options == null || typeof options !== 'object') { - throw new errors.TypeError('ERR_INVALID_ARG_TYPE', - 'options', - 'object'); - } + assertIsObject(options, 'options'); return new Http2SecureServer(options, handler); } function createServer(options, handler) { if (typeof options === 'function') { handler = options; - options = Object.create(null); + options = {}; } + assertIsObject(options, 'options'); return new Http2Server(options, handler); } diff --git a/lib/internal/http2/util.js b/lib/internal/http2/util.js index 1800dc5cff33ff..ef48b83d783af0 100644 --- a/lib/internal/http2/util.js +++ b/lib/internal/http2/util.js @@ -174,7 +174,9 @@ const IDX_OPTIONS_PEER_MAX_CONCURRENT_STREAMS = 3; const IDX_OPTIONS_PADDING_STRATEGY = 4; const IDX_OPTIONS_MAX_HEADER_LIST_PAIRS = 5; const IDX_OPTIONS_MAX_OUTSTANDING_PINGS = 6; -const IDX_OPTIONS_FLAGS = 7; +const IDX_OPTIONS_MAX_OUTSTANDING_SETTINGS = 7; +const IDX_OPTIONS_MAX_SESSION_MEMORY = 8; +const IDX_OPTIONS_FLAGS = 9; function updateOptionsBuffer(options) { var flags = 0; @@ -213,6 +215,16 @@ function updateOptionsBuffer(options) { optionsBuffer[IDX_OPTIONS_MAX_OUTSTANDING_PINGS] = options.maxOutstandingPings; } + if (typeof options.maxOutstandingSettings === 'number') { + flags |= (1 << IDX_OPTIONS_MAX_OUTSTANDING_SETTINGS); + optionsBuffer[IDX_OPTIONS_MAX_OUTSTANDING_SETTINGS] = + Math.max(1, options.maxOutstandingSettings); + } + if (typeof options.maxSessionMemory === 'number') { + flags |= (1 << IDX_OPTIONS_MAX_SESSION_MEMORY); + optionsBuffer[IDX_OPTIONS_MAX_SESSION_MEMORY] = + Math.max(1, options.maxSessionMemory); + } optionsBuffer[IDX_OPTIONS_FLAGS] = flags; } diff --git a/lib/internal/loader/Loader.js b/lib/internal/loader/Loader.js index f2c7fa0cfffc47..49c8699771e819 100644 --- a/lib/internal/loader/Loader.js +++ b/lib/internal/loader/Loader.js @@ -10,7 +10,8 @@ const ModuleRequest = require('internal/loader/ModuleRequest'); const errors = require('internal/errors'); const debug = require('util').debuglog('esm'); -function getBase() { +// Returns a file URL for the current working directory. +function getURLStringForCwd() { try { return getURLFromFilePath(`${process.cwd()}/`).href; } catch (e) { @@ -23,22 +24,44 @@ function getBase() { } } +/* A Loader instance is used as the main entry point for loading ES modules. + * Currently, this is a singleton -- there is only one used for loading + * the main module and everything in its dependency graph. */ class Loader { - constructor(base = getBase()) { - this.moduleMap = new ModuleMap(); + constructor(base = getURLStringForCwd()) { if (typeof base !== 'string') { throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'base', 'string'); } + + this.moduleMap = new ModuleMap(); this.base = base; - this.resolver = ModuleRequest.resolve.bind(null); + // The resolver has the signature + // (specifier : string, parentURL : string, defaultResolve) + // -> Promise<{ url : string, + // format: anything in Loader.validFormats }> + // where defaultResolve is ModuleRequest.resolve (having the same + // signature itself). + // If `.format` on the returned value is 'dynamic', .dynamicInstantiate + // will be used as described below. + this.resolver = ModuleRequest.resolve; + // This hook is only called when resolve(...).format is 'dynamic' and has + // the signature + // (url : string) -> Promise<{ exports: { ... }, execute: function }> + // Where `exports` is an object whose property names define the exported + // names of the generated module. `execute` is a function that receives + // an object with the same keys as `exports`, whose values are get/set + // functions for the actual exported values. this.dynamicInstantiate = undefined; } hook({ resolve = ModuleRequest.resolve, dynamicInstantiate }) { + // Use .bind() to avoid giving access to the Loader instance when it is + // called as this.resolver(...); this.resolver = resolve.bind(null); this.dynamicInstantiate = dynamicInstantiate; } + // Typechecking wrapper around .resolver(). async resolve(specifier, parentURL = this.base) { if (typeof parentURL !== 'string') { throw new errors.TypeError('ERR_INVALID_ARG_TYPE', @@ -48,10 +71,11 @@ class Loader { const { url, format } = await this.resolver(specifier, parentURL, ModuleRequest.resolve); - if (typeof format !== 'string') { + if (!Loader.validFormats.includes(format)) { throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'format', - ['esm', 'cjs', 'builtin', 'addon', 'json']); + Loader.validFormats); } + if (typeof url !== 'string') { throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'url', 'string'); } @@ -72,14 +96,20 @@ class Loader { return { url, format }; } + // May create a new ModuleJob instance if one did not already exist. async getModuleJob(specifier, parentURL = this.base) { const { url, format } = await this.resolve(specifier, parentURL); let job = this.moduleMap.get(url); if (job === undefined) { let loaderInstance; if (format === 'dynamic') { + const { dynamicInstantiate } = this; + if (typeof dynamicInstantiate !== 'function') { + throw new errors.Error('ERR_MISSING_DYNAMIC_INSTANTIATE_HOOK'); + } + loaderInstance = async (url) => { - const { exports, execute } = await this.dynamicInstantiate(url); + const { exports, execute } = await dynamicInstantiate(url); return createDynamicModule(exports, url, (reflect) => { debug(`Loading custom loader ${url}`); execute(reflect.exports); @@ -100,5 +130,6 @@ class Loader { return module.namespace(); } } +Loader.validFormats = ['esm', 'cjs', 'builtin', 'addon', 'json', 'dynamic']; Object.setPrototypeOf(Loader.prototype, null); module.exports = Loader; diff --git a/lib/internal/loader/ModuleJob.js b/lib/internal/loader/ModuleJob.js index 04d6111b87f1f6..77c89f6230e5de 100644 --- a/lib/internal/loader/ModuleJob.js +++ b/lib/internal/loader/ModuleJob.js @@ -1,27 +1,36 @@ 'use strict'; +const { ModuleWrap } = + require('internal/process').internalBinding('module_wrap'); const { SafeSet, SafePromise } = require('internal/safe_globals'); +const assert = require('assert'); const resolvedPromise = SafePromise.resolve(); +const enableDebug = (process.env.NODE_DEBUG || '').match(/\besm\b/) || + process.features.debug; + +/* A ModuleJob tracks the loading of a single Module, and the ModuleJobs of + * its dependencies, over time. */ class ModuleJob { - /** - * @param {module: ModuleWrap?, compiled: Promise} moduleProvider - */ + // `loader` is the Loader instance used for loading dependencies. + // `moduleProvider` is a function constructor(loader, url, moduleProvider) { this.loader = loader; this.error = null; this.hadError = false; - // linked == promise for dependency jobs, with module populated, - // module wrapper linked - this.moduleProvider = moduleProvider; - this.modulePromise = this.moduleProvider(url); + // This is a Promise<{ module, reflect }>, whose fields will be copied + // onto `this` by `link()` below once it has been resolved. + this.modulePromise = moduleProvider(url); this.module = undefined; this.reflect = undefined; - const linked = async () => { + + // Wait for the ModuleWrap instance being linked with all dependencies. + const link = async () => { const dependencyJobs = []; ({ module: this.module, reflect: this.reflect } = await this.modulePromise); + assert(this.module instanceof ModuleWrap); this.module.link(async (dependencySpecifier) => { const dependencyJobPromise = this.loader.getModuleJob(dependencySpecifier, url); @@ -29,63 +38,57 @@ class ModuleJob { const dependencyJob = await dependencyJobPromise; return (await dependencyJob.modulePromise).module; }); + if (enableDebug) { + // Make sure all dependencies are entered into the list synchronously. + Object.freeze(dependencyJobs); + } return SafePromise.all(dependencyJobs); }; - this.linked = linked(); + // Promise for the list of all dependencyJobs. + this.linked = link(); // instantiated == deep dependency jobs wrappers instantiated, // module wrapper instantiated this.instantiated = undefined; } - instantiate() { + async instantiate() { if (this.instantiated) { return this.instantiated; } - return this.instantiated = new Promise(async (resolve, reject) => { - const jobsInGraph = new SafeSet(); - let jobsReadyToInstantiate = 0; - // (this must be sync for counter to work) - const queueJob = (moduleJob) => { - if (jobsInGraph.has(moduleJob)) { - return; - } - jobsInGraph.add(moduleJob); - moduleJob.linked.then((dependencyJobs) => { - for (const dependencyJob of dependencyJobs) { - queueJob(dependencyJob); - } - checkComplete(); - }, (e) => { - if (!this.hadError) { - this.error = e; - this.hadError = true; - } - checkComplete(); - }); - }; - const checkComplete = () => { - if (++jobsReadyToInstantiate === jobsInGraph.size) { - // I believe we only throw once the whole tree is finished loading? - // or should the error bail early, leaving entire tree to still load? - if (this.hadError) { - reject(this.error); - } else { - try { - this.module.instantiate(); - for (const dependencyJob of jobsInGraph) { - dependencyJob.instantiated = resolvedPromise; - } - resolve(this.module); - } catch (e) { - e.stack; - reject(e); - } - } - } - }; - queueJob(this); - }); + return this.instantiated = this._instantiate(); + } + + // This method instantiates the module associated with this job and its + // entire dependency graph, i.e. creates all the module namespaces and the + // exported/imported variables. + async _instantiate() { + const jobsInGraph = new SafeSet(); + + const addJobsToDependencyGraph = async (moduleJob) => { + if (jobsInGraph.has(moduleJob)) { + return; + } + jobsInGraph.add(moduleJob); + const dependencyJobs = await moduleJob.linked; + return Promise.all(dependencyJobs.map(addJobsToDependencyGraph)); + }; + try { + await addJobsToDependencyGraph(this); + } catch (e) { + if (!this.hadError) { + this.error = e; + this.hadError = true; + } + throw e; + } + this.module.instantiate(); + for (const dependencyJob of jobsInGraph) { + // Calling `this.module.instantiate()` instantiates not only the + // ModuleWrap in this module, but all modules in the graph. + dependencyJob.instantiated = resolvedPromise; + } + return this.module; } async run() { diff --git a/lib/internal/loader/ModuleWrap.js b/lib/internal/loader/ModuleWrap.js index c97b4888ea22ce..0ee05ca81ffbb9 100644 --- a/lib/internal/loader/ModuleWrap.js +++ b/lib/internal/loader/ModuleWrap.js @@ -11,39 +11,49 @@ const createDynamicModule = (exports, url = '', evaluate) => { `creating ESM facade for ${url} with exports: ${ArrayJoin(exports, ', ')}` ); const names = ArrayMap(exports, (name) => `${name}`); - // sanitized ESM for reflection purposes - const src = `export let executor; - ${ArrayJoin(ArrayMap(names, (name) => `export let $${name}`), ';\n')} - ;(() => [ - fn => executor = fn, - { exports: { ${ - ArrayJoin(ArrayMap(names, (name) => `${name}: { - get: () => $${name}, - set: v => $${name} = v - }`), ',\n') -} } } - ]); - `; + // Create two modules: One whose exports are get- and set-able ('reflective'), + // and one which re-exports all of these but additionally may + // run an executor function once everything is set up. + const src = ` + export let executor; + ${ArrayJoin(ArrayMap(names, (name) => `export let $${name};`), '\n')} + /* This function is implicitly returned as the module's completion value */ + (() => ({ + setExecutor: fn => executor = fn, + reflect: { + exports: { ${ + ArrayJoin(ArrayMap(names, (name) => ` + ${name}: { + get: () => $${name}, + set: v => $${name} = v + }`), ', \n')} + } + } + }));`; const reflectiveModule = new ModuleWrap(src, `cjs-facade:${url}`); reflectiveModule.instantiate(); - const [setExecutor, reflect] = reflectiveModule.evaluate()(); + const { setExecutor, reflect } = reflectiveModule.evaluate()(); // public exposed ESM - const reexports = `import { executor, + const reexports = ` + import { + executor, ${ArrayMap(names, (name) => `$${name}`)} } from ""; export { ${ArrayJoin(ArrayMap(names, (name) => `$${name} as ${name}`), ', ')} } - // add await to this later if top level await comes along - typeof executor === "function" ? executor() : void 0;`; + if (typeof executor === "function") { + // add await to this later if top level await comes along + executor() + }`; if (typeof evaluate === 'function') { setExecutor(() => evaluate(reflect)); } - const runner = new ModuleWrap(reexports, `${url}`); - runner.link(async () => reflectiveModule); - runner.instantiate(); + const module = new ModuleWrap(reexports, `${url}`); + module.link(async () => reflectiveModule); + module.instantiate(); return { - module: runner, + module, reflect }; }; diff --git a/lib/internal/readline.js b/lib/internal/readline.js index b15ed4972ef7f2..e3d3007a75c645 100644 --- a/lib/internal/readline.js +++ b/lib/internal/readline.js @@ -9,8 +9,8 @@ const ansi = const kEscape = '\x1b'; -var getStringWidth; -var isFullWidthCodePoint; +let getStringWidth; +let isFullWidthCodePoint; function CSI(strings, ...args) { let ret = `${kEscape}[`; diff --git a/lib/internal/streams/BufferList.js b/lib/internal/streams/BufferList.js index 23d5a8a2db0eb7..b2daf82e74190b 100644 --- a/lib/internal/streams/BufferList.js +++ b/lib/internal/streams/BufferList.js @@ -61,8 +61,6 @@ module.exports = class BufferList { concat(n) { if (this.length === 0) return Buffer.alloc(0); - if (this.length === 1) - return this.head.data; const ret = Buffer.allocUnsafe(n >>> 0); var p = this.head; var i = 0; diff --git a/lib/internal/test/unicode.js b/lib/internal/test/unicode.js index 1445276d9ae891..7172a43ec20a8a 100644 --- a/lib/internal/test/unicode.js +++ b/lib/internal/test/unicode.js @@ -3,4 +3,6 @@ // This module exists entirely for regression testing purposes. // See `test/parallel/test-internal-unicode.js`. +/* eslint-disable non-ascii-character */ module.exports = '✓'; +/* eslint-enable non-ascii-character */ diff --git a/lib/net.js b/lib/net.js index 1781350f540dc8..53c91a640f5c1d 100644 --- a/lib/net.js +++ b/lib/net.js @@ -370,16 +370,6 @@ function writeAfterFIN(chunk, encoding, cb) { } } -Socket.prototype.read = function(n) { - if (n === 0) - return stream.Readable.prototype.read.call(this, n); - - this.read = stream.Readable.prototype.read; - this._consuming = true; - return this.read(n); -}; - - // FIXME(joyeecheung): this method is neither documented nor tested Socket.prototype.listen = function() { debug('socket.listen'); @@ -767,13 +757,7 @@ Socket.prototype._writeGeneric = function(writev, data, encoding, cb) { // Retain chunks if (err === 0) req._chunks = chunks; } else { - var enc; - if (data instanceof Buffer) { - enc = 'buffer'; - } else { - enc = encoding; - } - err = createWriteReq(req, this._handle, data, enc); + err = createWriteReq(req, this._handle, data, encoding); } if (err) @@ -1141,7 +1125,9 @@ Socket.prototype.ref = function() { return this; } - this._handle.ref(); + if (typeof this._handle.ref === 'function') { + this._handle.ref(); + } return this; }; @@ -1153,7 +1139,9 @@ Socket.prototype.unref = function() { return this; } - this._handle.unref(); + if (typeof this._handle.unref === 'function') { + this._handle.unref(); + } return this; }; diff --git a/lib/perf_hooks.js b/lib/perf_hooks.js index 4e7a0de7eb37be..15256a63c0b97c 100644 --- a/lib/perf_hooks.js +++ b/lib/perf_hooks.js @@ -18,6 +18,7 @@ const { NODE_PERFORMANCE_ENTRY_TYPE_MEASURE, NODE_PERFORMANCE_ENTRY_TYPE_GC, NODE_PERFORMANCE_ENTRY_TYPE_FUNCTION, + NODE_PERFORMANCE_ENTRY_TYPE_HTTP2, NODE_PERFORMANCE_MILESTONE_NODE_START, NODE_PERFORMANCE_MILESTONE_V8_START, @@ -61,9 +62,74 @@ const observerableTypes = [ 'mark', 'measure', 'gc', - 'function' + 'function', + 'http2' ]; +const IDX_STREAM_STATS_ID = 0; +const IDX_STREAM_STATS_TIMETOFIRSTBYTE = 1; +const IDX_STREAM_STATS_TIMETOFIRSTHEADER = 2; +const IDX_STREAM_STATS_TIMETOFIRSTBYTESENT = 3; +const IDX_STREAM_STATS_SENTBYTES = 4; +const IDX_STREAM_STATS_RECEIVEDBYTES = 5; + +const IDX_SESSION_STATS_TYPE = 0; +const IDX_SESSION_STATS_PINGRTT = 1; +const IDX_SESSION_STATS_FRAMESRECEIVED = 2; +const IDX_SESSION_STATS_FRAMESSENT = 3; +const IDX_SESSION_STATS_STREAMCOUNT = 4; +const IDX_SESSION_STATS_STREAMAVERAGEDURATION = 5; +const IDX_SESSION_STATS_DATA_SENT = 6; +const IDX_SESSION_STATS_DATA_RECEIVED = 7; +const IDX_SESSION_STATS_MAX_CONCURRENT_STREAMS = 8; + +let sessionStats; +let streamStats; + +function collectHttp2Stats(entry) { + switch (entry.name) { + case 'Http2Stream': + if (streamStats === undefined) + streamStats = process.binding('http2').streamStats; + entry.id = + streamStats[IDX_STREAM_STATS_ID] >>> 0; + entry.timeToFirstByte = + streamStats[IDX_STREAM_STATS_TIMETOFIRSTBYTE]; + entry.timeToFirstHeader = + streamStats[IDX_STREAM_STATS_TIMETOFIRSTHEADER]; + entry.timeToFirstByteSent = + streamStats[IDX_STREAM_STATS_TIMETOFIRSTBYTESENT]; + entry.bytesWritten = + streamStats[IDX_STREAM_STATS_SENTBYTES]; + entry.bytesRead = + streamStats[IDX_STREAM_STATS_RECEIVEDBYTES]; + break; + case 'Http2Session': + if (sessionStats === undefined) + sessionStats = process.binding('http2').sessionStats; + entry.type = + sessionStats[IDX_SESSION_STATS_TYPE] >>> 0 === 0 ? 'server' : 'client'; + entry.pingRTT = + sessionStats[IDX_SESSION_STATS_PINGRTT]; + entry.framesReceived = + sessionStats[IDX_SESSION_STATS_FRAMESRECEIVED]; + entry.framesSent = + sessionStats[IDX_SESSION_STATS_FRAMESSENT]; + entry.streamCount = + sessionStats[IDX_SESSION_STATS_STREAMCOUNT]; + entry.streamAverageDuration = + sessionStats[IDX_SESSION_STATS_STREAMAVERAGEDURATION]; + entry.bytesWritten = + sessionStats[IDX_SESSION_STATS_DATA_SENT]; + entry.bytesRead = + sessionStats[IDX_SESSION_STATS_DATA_RECEIVED]; + entry.maxConcurrentStreams = + sessionStats[IDX_SESSION_STATS_MAX_CONCURRENT_STREAMS]; + break; + } +} + + let errors; function lazyErrors() { if (errors === undefined) @@ -405,6 +471,10 @@ class Performance extends PerformanceObserverEntryList { this[kClearEntry]('function', name); } + clearEntries(name) { + this[kClearEntry](name); + } + timerify(fn) { if (typeof fn !== 'function') { const errors = lazyErrors(); @@ -465,6 +535,10 @@ function doNotify() { // Set up the callback used to receive PerformanceObserver notifications function observersCallback(entry) { const type = mapTypes(entry.entryType); + + if (type === NODE_PERFORMANCE_ENTRY_TYPE_HTTP2) + collectHttp2Stats(entry); + performance[kInsertEntry](entry); const list = getObserversList(type); @@ -504,6 +578,7 @@ function mapTypes(i) { case 'measure': return NODE_PERFORMANCE_ENTRY_TYPE_MEASURE; case 'gc': return NODE_PERFORMANCE_ENTRY_TYPE_GC; case 'function': return NODE_PERFORMANCE_ENTRY_TYPE_FUNCTION; + case 'http2': return NODE_PERFORMANCE_ENTRY_TYPE_HTTP2; } } diff --git a/lib/readline.js b/lib/readline.js index fa9cc188e1483c..d749e2c8f23f0f 100644 --- a/lib/readline.js +++ b/lib/readline.js @@ -47,8 +47,6 @@ const { kClearScreenDown } = CSI; -const { now } = process.binding('timer_wrap').Timer; - const kHistorySize = 30; const kMincrlfDelay = 100; // \r\n, \n, or \r followed by something other than \n @@ -400,7 +398,7 @@ Interface.prototype._normalWrite = function(b) { } var string = this._decoder.write(b); if (this._sawReturnAt && - now() - this._sawReturnAt <= this.crlfDelay) { + Date.now() - this._sawReturnAt <= this.crlfDelay) { string = string.replace(/^\n/, ''); this._sawReturnAt = 0; } @@ -413,7 +411,7 @@ Interface.prototype._normalWrite = function(b) { this._line_buffer = null; } if (newPartContainsEnding) { - this._sawReturnAt = string.endsWith('\r') ? now() : 0; + this._sawReturnAt = string.endsWith('\r') ? Date.now() : 0; // got one or more newlines; process into "line" events var lines = string.split(lineEnding); @@ -751,7 +749,8 @@ Interface.prototype._ttyWrite = function(s, key) { key = key || {}; this._previousKey = key; - // Ignore escape key - Fixes #2876 + // Ignore escape key, fixes + // https://github.com/nodejs/node-v0.x-archive/issues/2876. if (key.name === 'escape') return; if (key.ctrl && key.shift) { @@ -907,14 +906,14 @@ Interface.prototype._ttyWrite = function(s, key) { switch (key.name) { case 'return': // carriage return, i.e. \r - this._sawReturnAt = now(); + this._sawReturnAt = Date.now(); this._line(); break; case 'enter': // When key interval > crlfDelay if (this._sawReturnAt === 0 || - now() - this._sawReturnAt > this.crlfDelay) { + Date.now() - this._sawReturnAt > this.crlfDelay) { this._line(); } this._sawReturnAt = 0; diff --git a/lib/stream.js b/lib/stream.js index edc5f231b83411..9a816600a05e5a 100644 --- a/lib/stream.js +++ b/lib/stream.js @@ -45,7 +45,7 @@ try { try { Stream._isUint8Array = process.binding('util').isUint8Array; } catch (e) { - // This throws for Node < 4.2.0 because there’s no util binding and + // This throws for Node < 4.2.0 because there's no util binding and // returns undefined for Node < 7.4.0. } } diff --git a/lib/string_decoder.js b/lib/string_decoder.js index 5e60bf1ad1c3d7..d22a85b050d8ab 100644 --- a/lib/string_decoder.js +++ b/lib/string_decoder.js @@ -209,8 +209,11 @@ function utf8Text(buf, i) { // character. function utf8End(buf) { const r = (buf && buf.length ? this.write(buf) : ''); - if (this.lastNeed) + if (this.lastNeed) { + this.lastNeed = 0; + this.lastTotal = 0; return r + '\ufffd'; + } return r; } @@ -245,6 +248,8 @@ function utf16End(buf) { const r = (buf && buf.length ? this.write(buf) : ''); if (this.lastNeed) { const end = this.lastTotal - this.lastNeed; + this.lastNeed = 0; + this.lastTotal = 0; return r + this.lastChar.toString('utf16le', 0, end); } return r; @@ -268,8 +273,12 @@ function base64Text(buf, i) { function base64End(buf) { const r = (buf && buf.length ? this.write(buf) : ''); - if (this.lastNeed) - return r + this.lastChar.toString('base64', 0, 3 - this.lastNeed); + if (this.lastNeed) { + const end = 3 - this.lastNeed; + this.lastNeed = 0; + this.lastTotal = 0; + return r + this.lastChar.toString('base64', 0, end); + } return r; } diff --git a/lib/timers.js b/lib/timers.js index 0e6ae45950c5c1..e6655c2f527349 100644 --- a/lib/timers.js +++ b/lib/timers.js @@ -89,6 +89,7 @@ const TIMEOUT_MAX = 2147483647; // 2^31-1 // TimerWrap C++ handle, which makes the call after the duration to process the // list it is attached to. // +/* eslint-disable non-ascii-character */ // // ╔════ > Object Map // ║ @@ -110,6 +111,7 @@ const TIMEOUT_MAX = 2147483647; // 2^31-1 // ║ // ╚════ > Linked List // +/* eslint-enable non-ascii-character */ // // With this, virtually constant-time insertion (append), removal, and timeout // is possible in the JavaScript layer. Any one list of timers is able to be @@ -215,6 +217,17 @@ function TimersList(msecs, unrefed) { this.nextTick = false; } +function deleteTimersList(list, msecs) { + // Either refedLists[msecs] or unrefedLists[msecs] may have been removed and + // recreated since the reference to `list` was created. Make sure they're + // the same instance of the list before destroying. + if (list._unrefed === true && list === unrefedLists[msecs]) { + delete unrefedLists[msecs]; + } else if (list === refedLists[msecs]) { + delete refedLists[msecs]; + } +} + function listOnTimeout() { var list = this._list; var msecs = list.msecs; @@ -286,14 +299,7 @@ function listOnTimeout() { debug('%d list empty', msecs); assert(L.isEmpty(list)); - // Either refedLists[msecs] or unrefedLists[msecs] may have been removed and - // recreated since the reference to `list` was created. Make sure they're - // the same instance of the list before destroying. - if (list._unrefed === true && list === unrefedLists[msecs]) { - delete unrefedLists[msecs]; - } else if (list === refedLists[msecs]) { - delete refedLists[msecs]; - } + deleteTimersList(list, msecs); // Do not close the underlying handle if its ownership has changed // (e.g it was unrefed in its callback). @@ -327,24 +333,34 @@ function tryOnTimeout(timer, list) { } } - if (!threw) return; + if (threw) { + const { msecs } = list; + + if (L.isEmpty(list)) { + deleteTimersList(list, msecs); - // Postpone all later list events to next tick. We need to do this - // so that the events are called in the order they were created. - const lists = list._unrefed === true ? unrefedLists : refedLists; - for (var key in lists) { - if (key > list.msecs) { - lists[key].nextTick = true; + if (!list._timer.owner) + list._timer.close(); + } else { + // Postpone all later list events to next tick. We need to do this + // so that the events are called in the order they were created. + const lists = list._unrefed === true ? unrefedLists : refedLists; + for (var key in lists) { + if (key > msecs) { + lists[key].nextTick = true; + } + } + + // We need to continue processing after domain error handling + // is complete, but not by using whatever domain was left over + // when the timeout threw its exception. + const domain = process.domain; + process.domain = null; + // If we threw, we need to process the rest of the list in nextTick. + process.nextTick(listOnTimeoutNT, list); + process.domain = domain; } } - // We need to continue processing after domain error handling - // is complete, but not by using whatever domain was left over - // when the timeout threw its exception. - const domain = process.domain; - process.domain = null; - // If we threw, we need to process the rest of the list in nextTick. - process.nextTick(listOnTimeoutNT, list); - process.domain = domain; } } diff --git a/lib/url.js b/lib/url.js index 2cc4488a3edfd9..6562be5d8302e6 100644 --- a/lib/url.js +++ b/lib/url.js @@ -880,7 +880,7 @@ Url.prototype.resolveObject = function resolveObject(relative) { // if the path is allowed to go above the root, restore leading ..s if (!mustEndAbs && !removeAllDots) { - for (; up--; up) { + while (up--) { srcPath.unshift('..'); } } diff --git a/lib/util.js b/lib/util.js index 0b1d25ba22a520..4c6db1a479e7b3 100644 --- a/lib/util.js +++ b/lib/util.js @@ -77,9 +77,7 @@ var Debug; /* eslint-disable */ const strEscapeSequencesRegExp = /[\x00-\x1f\x27\x5c]/; -const keyEscapeSequencesRegExp = /[\x00-\x1f\x27]/; const strEscapeSequencesReplacer = /[\x00-\x1f\x27\x5c]/g; -const keyEscapeSequencesReplacer = /[\x00-\x1f\x27]/g; /* eslint-enable */ const keyStrRegExp = /^[a-zA-Z_][a-zA-Z_0-9]*$/; const colorRegExp = /\u001b\[\d\d?m/g; @@ -133,34 +131,6 @@ function strEscape(str) { return `'${result}'`; } -// Escape control characters and single quotes. -// Note: for performance reasons this is not combined with strEscape -function keyEscape(str) { - if (str.length < 5000 && !keyEscapeSequencesRegExp.test(str)) - return `'${str}'`; - if (str.length > 100) - return `'${str.replace(keyEscapeSequencesReplacer, escapeFn)}'`; - var result = ''; - var last = 0; - for (var i = 0; i < str.length; i++) { - const point = str.charCodeAt(i); - if (point === 39 || point < 32) { - if (last === i) { - result += meta[point]; - } else { - result += `${str.slice(last, i)}${meta[point]}`; - } - last = i + 1; - } - } - if (last === 0) { - result = str; - } else if (last !== i) { - result += str.slice(last); - } - return `'${result}'`; -} - function tryStringify(arg) { try { return JSON.stringify(arg); @@ -851,7 +821,7 @@ function formatProperty(ctx, value, recurseTimes, key, array) { } else if (keyStrRegExp.test(key)) { name = ctx.stylize(key, 'name'); } else { - name = ctx.stylize(keyEscape(key), 'string'); + name = ctx.stylize(strEscape(key), 'string'); } return `${name}: ${str}`; diff --git a/lib/zlib.js b/lib/zlib.js index 7f41200f86be19..bbe89043248459 100644 --- a/lib/zlib.js +++ b/lib/zlib.js @@ -339,7 +339,7 @@ Zlib.prototype.flush = function flush(kind, callback) { this._scheduledFlushFlag = maxFlush(kind, this._scheduledFlushFlag); // If a callback was passed, always register a new `drain` + flush handler, - // mostly because that’s simpler and flush callbacks piling up is a rare + // mostly because that's simpler and flush callbacks piling up is a rare // thing anyway. if (!alreadyHadFlushScheduled || callback) { const drainHandler = () => this.flush(this._scheduledFlushFlag, callback); diff --git a/node.gyp b/node.gyp index 38dcdac089ffff..ee4157567f5a31 100644 --- a/node.gyp +++ b/node.gyp @@ -22,6 +22,8 @@ 'node_v8_options%': '', 'node_enable_v8_vtunejit%': 'false', 'node_core_target_name%': 'node', + 'node_lib_target_name%': 'node_lib', + 'node_intermediate_lib_type%': 'static_library', 'library_files': [ 'lib/internal/bootstrap_node.js', 'lib/async_hooks.js', @@ -145,6 +147,17 @@ 'conditions': [ [ 'node_shared=="true"', { 'node_target_type%': 'shared_library', + 'conditions': [ + ['OS=="aix"', { + # For AIX, always generate static library first, + # It needs an extra step to generate exp and + # then use both static lib and exp to create + # shared lib. + 'node_intermediate_lib_type': 'static_library', + }, { + 'node_intermediate_lib_type': 'shared_library', + }], + ], }, { 'node_target_type%': 'executable', }], @@ -161,7 +174,81 @@ 'targets': [ { 'target_name': '<(node_core_target_name)', - 'type': '<(node_target_type)', + 'type': 'executable', + 'sources': [ + 'src/node_main.cc' + ], + 'include_dirs': [ + 'src', + 'deps/v8/include', + ], + 'conditions': [ + [ 'node_intermediate_lib_type=="static_library" and ' + 'node_shared=="true" and OS=="aix"', { + # For AIX, shared lib is linked by static lib and .exp. In the + # case here, the executable needs to link to shared lib. + # Therefore, use 'node_aix_shared' target to generate the + # shared lib and then executable. + 'dependencies': [ 'node_aix_shared' ], + }, { + 'dependencies': [ '<(node_lib_target_name)' ], + }], + [ 'node_intermediate_lib_type=="static_library" and ' + 'node_shared=="false"', { + 'includes': [ + 'node.gypi' + ], + 'xcode_settings': { + 'OTHER_LDFLAGS': [ + '-Wl,-force_load,<(PRODUCT_DIR)/<(STATIC_LIB_PREFIX)' + '<(node_core_target_name)<(STATIC_LIB_SUFFIX)', + ], + }, + 'msvs_settings': { + 'VCLinkerTool': { + 'AdditionalOptions': [ + '/WHOLEARCHIVE:<(PRODUCT_DIR)\\lib\\' + '<(node_core_target_name)<(STATIC_LIB_SUFFIX)', + ], + }, + }, + 'conditions': [ + ['OS in "linux freebsd openbsd solaris android"', { + 'ldflags': [ + '-Wl,--whole-archive,<(OBJ_DIR)/<(STATIC_LIB_PREFIX)' + '<(node_core_target_name)<(STATIC_LIB_SUFFIX)', + '-Wl,--no-whole-archive', + ], + }], + [ 'OS=="win"', { + 'sources': [ 'src/res/node.rc' ], + 'conditions': [ + [ 'node_use_etw=="true"', { + 'sources': [ + 'tools/msvs/genfiles/node_etw_provider.rc' + ], + }], + [ 'node_use_perfctr=="true"', { + 'sources': [ + 'tools/msvs/genfiles/node_perfctr_provider.rc', + ], + }] + ], + }], + ], + }], + [ 'node_intermediate_lib_type=="shared_library" and OS=="win"', { + # On Windows, having the same name for both executable and shared + # lib causes filename collision. Need a different PRODUCT_NAME for + # the executable and rename it back to node.exe later + 'product_name': '<(node_core_target_name)-win', + }], + ], + }, + { + 'target_name': '<(node_lib_target_name)', + 'type': '<(node_intermediate_lib_type)', + 'product_name': '<(node_core_target_name)', 'dependencies': [ 'node_js2c#host', @@ -173,7 +260,6 @@ 'include_dirs': [ 'src', - 'tools/msvs/genfiles', '<(SHARED_INTERMEDIATE_DIR)' # for node_natives.h ], @@ -199,7 +285,6 @@ 'src/node_file.cc', 'src/node_http2.cc', 'src/node_http_parser.cc', - 'src/node_main.cc', 'src/node_os.cc', 'src/node_platform.cc', 'src/node_perf.cc', @@ -279,7 +364,6 @@ 'src/util-inl.h', 'deps/http_parser/http_parser.h', 'deps/v8/include/v8.h', - 'deps/v8/include/v8-debug.h', # javascript files to make for an even more pleasant IDE experience '<@(library_files)', # node.gyp is added to the project by default. @@ -303,6 +387,9 @@ [ 'node_shared=="true" and node_module_version!="" and OS!="win"', { 'product_extension': '<(shlib_suffix)', }], + ['node_shared=="true" and OS=="aix"', { + 'product_name': 'node_base', + }], [ 'v8_enable_inspector==1', { 'defines': [ 'HAVE_INSPECTOR=1', @@ -333,7 +420,7 @@ 'src/backtrace_win32.cc', ], 'conditions': [ - [ 'node_target_type!="static_library"', { + [ 'node_intermediate_lib_type!="static_library"', { 'sources': [ 'src/res/node.rc', ], @@ -353,6 +440,64 @@ 'defines': [ '__POSIX__' ], 'sources': [ 'src/backtrace_posix.cc' ], }], + [ 'node_use_etw=="true"', { + 'defines': [ 'HAVE_ETW=1' ], + 'dependencies': [ 'node_etw' ], + 'include_dirs': [ + 'src', + 'tools/msvs/genfiles', + '<(SHARED_INTERMEDIATE_DIR)' # for node_natives.h + ], + 'sources': [ + 'src/node_win32_etw_provider.h', + 'src/node_win32_etw_provider-inl.h', + 'src/node_win32_etw_provider.cc', + 'src/node_dtrace.cc', + 'tools/msvs/genfiles/node_etw_provider.h', + ], + 'conditions': [ + ['node_intermediate_lib_type != "static_library"', { + 'sources': [ + 'tools/msvs/genfiles/node_etw_provider.rc', + ], + }], + ], + }], + [ 'node_use_perfctr=="true"', { + 'defines': [ 'HAVE_PERFCTR=1' ], + 'dependencies': [ 'node_perfctr' ], + 'include_dirs': [ + 'src', + 'tools/msvs/genfiles', + '<(SHARED_INTERMEDIATE_DIR)' # for node_natives.h + ], + 'sources': [ + 'src/node_win32_perfctr_provider.h', + 'src/node_win32_perfctr_provider.cc', + 'src/node_counters.cc', + 'src/node_counters.h', + ], + 'conditions': [ + ['node_intermediate_lib_type != "static_library"', { + 'sources': [ + 'tools/msvs/genfiles/node_perfctr_provider.rc', + ], + }], + ], + }], + [ 'node_use_lttng=="true"', { + 'defines': [ 'HAVE_LTTNG=1' ], + 'include_dirs': [ '<(SHARED_INTERMEDIATE_DIR)' ], + 'libraries': [ '-llttng-ust' ], + 'include_dirs': [ + 'src', + 'tools/msvs/genfiles', + '<(SHARED_INTERMEDIATE_DIR)' # for node_natives.h + ], + 'sources': [ + 'src/node_lttng.cc' + ], + }], [ 'node_use_dtrace=="true"', { 'defines': [ 'HAVE_DTRACE=1' ], 'dependencies': [ @@ -393,7 +538,6 @@ ] ] } ], [ 'node_use_openssl=="true"', { - 'defines': [ 'HAVE_OPENSSL=1' ], 'sources': [ 'src/node_crypto.cc', 'src/node_crypto_bio.cc', @@ -404,49 +548,6 @@ 'src/tls_wrap.cc', 'src/tls_wrap.h' ], - 'conditions': [ - ['openssl_fips != ""', { - 'defines': [ 'NODE_FIPS_MODE' ], - }], - [ 'node_shared_openssl=="false"', { - 'dependencies': [ - './deps/openssl/openssl.gyp:openssl', - - # For tests - './deps/openssl/openssl.gyp:openssl-cli', - ], - 'conditions': [ - # -force_load or --whole-archive are not applicable for - # the static library - [ 'node_target_type!="static_library"', { - 'xcode_settings': { - 'OTHER_LDFLAGS': [ - '-Wl,-force_load,<(PRODUCT_DIR)/<(OPENSSL_PRODUCT)', - ], - }, - 'conditions': [ - ['OS in "linux freebsd" and node_shared=="false"', { - 'ldflags': [ - '-Wl,--whole-archive,' - '<(OBJ_DIR)/deps/openssl/' - '<(OPENSSL_PRODUCT)', - '-Wl,--no-whole-archive', - ], - }], - # openssl.def is based on zlib.def, zlib symbols - # are always exported. - ['use_openssl_def==1', { - 'sources': ['<(SHARED_INTERMEDIATE_DIR)/openssl.def'], - }], - ['OS=="win" and use_openssl_def==0', { - 'sources': ['deps/zlib/win32/zlib.def'], - }], - ], - }], - ], - }]] - }, { - 'defines': [ 'HAVE_OPENSSL=0' ] }], ], 'direct_dependent_settings': { @@ -509,7 +610,7 @@ 'target_name': 'node_etw', 'type': 'none', 'conditions': [ - [ 'node_use_etw=="true" and node_target_type!="static_library"', { + [ 'node_use_etw=="true"', { 'actions': [ { 'action_name': 'node_etw', @@ -530,7 +631,7 @@ 'target_name': 'node_perfctr', 'type': 'none', 'conditions': [ - [ 'node_use_perfctr=="true" and node_target_type!="static_library"', { + [ 'node_use_perfctr=="true"', { 'actions': [ { 'action_name': 'node_perfctr_man', @@ -592,15 +693,13 @@ '<(SHARED_INTERMEDIATE_DIR)/node_javascript.cc', ], 'conditions': [ - [ 'node_use_dtrace=="false" and node_use_etw=="false" or ' - 'node_target_type=="static_library"', { + [ 'node_use_dtrace=="false" and node_use_etw=="false"', { 'inputs': [ 'src/notrace_macros.py' ] }], - ['node_use_lttng=="false" or node_target_type=="static_library"', { + [ 'node_use_lttng=="false"', { 'inputs': [ 'src/nolttng_macros.py' ] }], - [ 'node_use_perfctr=="false" or ' - 'node_target_type=="static_library"', { + [ 'node_use_perfctr=="false"', { 'inputs': [ 'src/noperfctr_macros.py' ] }] ], @@ -650,10 +749,10 @@ { 'action_name': 'node_dtrace_provider_o', 'inputs': [ - '<(OBJ_DIR)/node/src/node_dtrace.o', + '<(OBJ_DIR)/<(node_lib_target_name)/src/node_dtrace.o', ], 'outputs': [ - '<(OBJ_DIR)/node/src/node_dtrace_provider.o' + '<(OBJ_DIR)/<(node_lib_target_name)/src/node_dtrace_provider.o' ], 'action': [ 'dtrace', '-G', '-xnolibs', '-s', 'src/node_provider.d', '<@(_inputs)', '-o', '<@(_outputs)' ] @@ -703,7 +802,7 @@ '<(SHARED_INTERMEDIATE_DIR)/v8constants.h' ], 'outputs': [ - '<(OBJ_DIR)/node/src/node_dtrace_ustack.o' + '<(OBJ_DIR)/<(node_lib_target_name)/src/node_dtrace_ustack.o' ], 'conditions': [ [ 'target_arch=="ia32" or target_arch=="arm"', { @@ -750,12 +849,41 @@ } ], ] }, + { + # When using shared lib to build executable in Windows, in order to avoid + # filename collision, the executable name is node-win.exe. Need to rename + # it back to node.exe + 'target_name': 'rename_node_bin_win', + 'type': 'none', + 'dependencies': [ + '<(node_core_target_name)', + ], + 'conditions': [ + [ 'OS=="win" and node_intermediate_lib_type=="shared_library"', { + 'actions': [ + { + 'action_name': 'rename_node_bin_win', + 'inputs': [ + '<(PRODUCT_DIR)/<(node_core_target_name)-win.exe' + ], + 'outputs': [ + '<(PRODUCT_DIR)/<(node_core_target_name).exe', + ], + 'action': [ + 'mv', '<@(_inputs)', '<@(_outputs)', + ], + }, + ], + } ], + ] + }, { 'target_name': 'cctest', 'type': 'executable', 'dependencies': [ '<(node_core_target_name)', + 'rename_node_bin_win', 'deps/gtest/gtest.gyp:gtest', 'node_js2c#host', 'node_dtrace_header', @@ -764,9 +892,9 @@ ], 'variables': { - 'OBJ_PATH': '<(OBJ_DIR)/node/src', - 'OBJ_GEN_PATH': '<(OBJ_DIR)/node/gen', - 'OBJ_TRACING_PATH': '<(OBJ_DIR)/node/src/tracing', + 'OBJ_PATH': '<(OBJ_DIR)/<(node_lib_target_name)/src', + 'OBJ_GEN_PATH': '<(OBJ_DIR)/<(node_lib_target_name)/gen', + 'OBJ_TRACING_PATH': '<(OBJ_DIR)/<(node_lib_target_name)/src/tracing', 'OBJ_SUFFIX': 'o', 'OBJ_SEPARATOR': '/', 'conditions': [ @@ -777,18 +905,19 @@ 'OBJ_PATH': '<(OBJ_DIR)/src', 'OBJ_GEN_PATH': '<(OBJ_DIR)/gen', 'OBJ_TRACING_PATH': '<(OBJ_DIR)/src/tracing', - 'OBJ_SEPARATOR': '/node.', + 'OBJ_SEPARATOR': '/<(node_lib_target_name).', }, { 'conditions': [ ['OS=="win"', { - 'OBJ_PATH': '<(OBJ_DIR)/node', - 'OBJ_GEN_PATH': '<(OBJ_DIR)/node', - 'OBJ_TRACING_PATH': '<(OBJ_DIR)/node', + 'OBJ_PATH': '<(OBJ_DIR)/<(node_lib_target_name)', + 'OBJ_GEN_PATH': '<(OBJ_DIR)/<(node_lib_target_name)', + 'OBJ_TRACING_PATH': '<(OBJ_DIR)/<(node_lib_target_name)', }], ['OS=="aix"', { - 'OBJ_PATH': '<(OBJ_DIR)/node_base/src', - 'OBJ_GEN_PATH': '<(OBJ_DIR)/node_base/gen', - 'OBJ_TRACING_PATH': '<(OBJ_DIR)/node_base/src/tracing', + 'OBJ_PATH': '<(OBJ_DIR)/<(node_lib_target_name)/src', + 'OBJ_GEN_PATH': '<(OBJ_DIR)/<(node_lib_target_name)/gen', + 'OBJ_TRACING_PATH': + '<(OBJ_DIR)/<(node_lib_target_name)/src/tracing', }], ]} ] @@ -820,34 +949,29 @@ 'test/cctest/test_url.cc' ], - 'sources!': [ - 'src/node_main.cc' + 'libraries': [ + '<(OBJ_PATH)<(OBJ_SEPARATOR)async_wrap.<(OBJ_SUFFIX)', + '<(OBJ_PATH)<(OBJ_SEPARATOR)env.<(OBJ_SUFFIX)', + '<(OBJ_PATH)<(OBJ_SEPARATOR)node.<(OBJ_SUFFIX)', + '<(OBJ_PATH)<(OBJ_SEPARATOR)node_buffer.<(OBJ_SUFFIX)', + '<(OBJ_PATH)<(OBJ_SEPARATOR)node_debug_options.<(OBJ_SUFFIX)', + '<(OBJ_PATH)<(OBJ_SEPARATOR)node_i18n.<(OBJ_SUFFIX)', + '<(OBJ_PATH)<(OBJ_SEPARATOR)node_perf.<(OBJ_SUFFIX)', + '<(OBJ_PATH)<(OBJ_SEPARATOR)node_platform.<(OBJ_SUFFIX)', + '<(OBJ_PATH)<(OBJ_SEPARATOR)node_url.<(OBJ_SUFFIX)', + '<(OBJ_PATH)<(OBJ_SEPARATOR)util.<(OBJ_SUFFIX)', + '<(OBJ_PATH)<(OBJ_SEPARATOR)string_bytes.<(OBJ_SUFFIX)', + '<(OBJ_PATH)<(OBJ_SEPARATOR)string_search.<(OBJ_SUFFIX)', + '<(OBJ_PATH)<(OBJ_SEPARATOR)stream_base.<(OBJ_SUFFIX)', + '<(OBJ_PATH)<(OBJ_SEPARATOR)node_constants.<(OBJ_SUFFIX)', + '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)agent.<(OBJ_SUFFIX)', + '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)node_trace_buffer.<(OBJ_SUFFIX)', + '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)node_trace_writer.<(OBJ_SUFFIX)', + '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)trace_event.<(OBJ_SUFFIX)', + '<(OBJ_GEN_PATH)<(OBJ_SEPARATOR)node_javascript.<(OBJ_SUFFIX)', ], 'conditions': [ - ['node_target_type!="static_library"', { - 'libraries': [ - '<(OBJ_PATH)<(OBJ_SEPARATOR)async_wrap.<(OBJ_SUFFIX)', - '<(OBJ_PATH)<(OBJ_SEPARATOR)env.<(OBJ_SUFFIX)', - '<(OBJ_PATH)<(OBJ_SEPARATOR)node.<(OBJ_SUFFIX)', - '<(OBJ_PATH)<(OBJ_SEPARATOR)node_buffer.<(OBJ_SUFFIX)', - '<(OBJ_PATH)<(OBJ_SEPARATOR)node_debug_options.<(OBJ_SUFFIX)', - '<(OBJ_PATH)<(OBJ_SEPARATOR)node_i18n.<(OBJ_SUFFIX)', - '<(OBJ_PATH)<(OBJ_SEPARATOR)node_perf.<(OBJ_SUFFIX)', - '<(OBJ_PATH)<(OBJ_SEPARATOR)node_platform.<(OBJ_SUFFIX)', - '<(OBJ_PATH)<(OBJ_SEPARATOR)node_url.<(OBJ_SUFFIX)', - '<(OBJ_PATH)<(OBJ_SEPARATOR)util.<(OBJ_SUFFIX)', - '<(OBJ_PATH)<(OBJ_SEPARATOR)string_bytes.<(OBJ_SUFFIX)', - '<(OBJ_PATH)<(OBJ_SEPARATOR)string_search.<(OBJ_SUFFIX)', - '<(OBJ_PATH)<(OBJ_SEPARATOR)stream_base.<(OBJ_SUFFIX)', - '<(OBJ_PATH)<(OBJ_SEPARATOR)node_constants.<(OBJ_SUFFIX)', - '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)agent.<(OBJ_SUFFIX)', - '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)node_trace_buffer.<(OBJ_SUFFIX)', - '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)node_trace_writer.<(OBJ_SUFFIX)', - '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)trace_event.<(OBJ_SUFFIX)', - '<(OBJ_GEN_PATH)<(OBJ_SEPARATOR)node_javascript.<(OBJ_SUFFIX)', - ], - }], [ 'node_use_openssl=="true"', { 'conditions': [ ['node_target_type!="static_library"', { @@ -863,6 +987,14 @@ 'HAVE_OPENSSL=1', ], }], + [ 'node_use_perfctr=="true"', { + 'defines': [ 'HAVE_PERFCTR=1' ], + 'libraries': [ + '<(OBJ_PATH)<(OBJ_SEPARATOR)node_counters.<(OBJ_SUFFIX)', + '<(OBJ_PATH)<(OBJ_SEPARATOR)' + 'node_win32_perfctr_provider.<(OBJ_SUFFIX)', + ], + }], ['v8_enable_inspector==1', { 'sources': [ 'test/cctest/test_inspector_socket.cc', @@ -896,10 +1028,21 @@ }], ['OS=="linux"', { 'libraries': [ - '<(SHARED_INTERMEDIATE_DIR)/node_dtrace_provider.o', + '<(SHARED_INTERMEDIATE_DIR)<(OBJ_SEPARATOR)' + 'node_dtrace_provider.<(OBJ_SUFFIX)', ] }], ], + }, { + 'conditions': [ + [ 'node_use_etw=="true" and OS=="win"', { + 'libraries': [ + '<(OBJ_PATH)<(OBJ_SEPARATOR)node_dtrace.<(OBJ_SUFFIX)', + '<(OBJ_PATH)<(OBJ_SEPARATOR)' + 'node_win32_etw_provider.<(OBJ_SUFFIX)', + ], + }] + ] }], [ 'OS=="win" and node_target_type!="static_library"', { 'libraries': [ @@ -914,129 +1057,27 @@ }], ], }], - [ 'node_shared_zlib=="false"', { - 'dependencies': [ - 'deps/zlib/zlib.gyp:zlib', - ] - }], - [ 'node_shared_openssl=="false" and node_shared=="false"', { - 'dependencies': [ - 'deps/openssl/openssl.gyp:openssl' - ] - }], - [ 'node_shared_http_parser=="false"', { - 'dependencies': [ - 'deps/http_parser/http_parser.gyp:http_parser' - ] - }], - [ 'node_shared_libuv=="false"', { - 'dependencies': [ - 'deps/uv/uv.gyp:libuv' - ] - }], - [ 'node_shared_nghttp2=="false"', { - 'dependencies': [ - 'deps/nghttp2/nghttp2.gyp:nghttp2' - ], - 'include_dirs': [ - 'deps/nghttp2/lib/includes' - ] - }], - [ 'node_use_v8_platform=="true"', { - 'dependencies': [ - 'deps/v8/src/v8.gyp:v8_libplatform', - ], - }], ['OS=="solaris"', { 'ldflags': [ '-I<(SHARED_INTERMEDIATE_DIR)' ] }], - [ 'node_use_openssl=="true"', { - 'conditions': [ - [ 'node_shared_openssl=="false"', { - 'conditions': [ - # -force_load or --whole-archive are not applicable for - # the static library - [ 'node_target_type!="static_library"', { - 'xcode_settings': { - 'OTHER_LDFLAGS': [ - '-Wl,-force_load,<(PRODUCT_DIR)/<(OPENSSL_PRODUCT)', - ], - }, - 'conditions': [ - ['OS in "linux freebsd" and node_shared=="false"', { - 'ldflags': [ - '-Wl,--whole-archive,' - '<(OBJ_DIR)/deps/openssl/' - '<(OPENSSL_PRODUCT)', - '-Wl,--no-whole-archive', - ], - }], - ], - }], - ], - }]] - }], ] } ], # end targets 'conditions': [ - [ 'node_target_type=="static_library"', { + [ 'OS=="aix" and node_shared=="true"', { 'targets': [ { - 'target_name': 'static_node', - 'type': 'executable', + 'target_name': 'node_aix_shared', + 'type': 'shared_library', 'product_name': '<(node_core_target_name)', - 'dependencies': [ - '<(node_core_target_name)', - ], - 'sources+': [ - 'src/node_main.cc', - ], - 'include_dirs': [ - 'deps/v8/include', - ], - 'xcode_settings': { - 'OTHER_LDFLAGS': [ - '-Wl,-force_load,<(PRODUCT_DIR)/<(STATIC_LIB_PREFIX)' - '<(node_core_target_name)<(STATIC_LIB_SUFFIX)', - ], - }, - 'msvs_settings': { - 'VCLinkerTool': { - 'AdditionalOptions': [ - '/WHOLEARCHIVE:<(PRODUCT_DIR)/lib/' - '<(node_core_target_name)<(STATIC_LIB_SUFFIX)', - ], - }, - }, - 'conditions': [ - ['OS in "linux freebsd openbsd solaris android"', { - 'ldflags': [ - '-Wl,--whole-archive,<(OBJ_DIR)/<(STATIC_LIB_PREFIX)' - '<(node_core_target_name)<(STATIC_LIB_SUFFIX)', - '-Wl,--no-whole-archive', - ], - }], - ], - }, - ], - }], - ['OS=="aix"', { - 'targets': [ - { - 'target_name': 'node', + 'ldflags': [ '--shared' ], + 'product_extension': '<(shlib_suffix)', 'conditions': [ - ['node_shared=="true"', { - 'type': 'shared_library', - 'ldflags': ['--shared'], - 'product_extension': '<(shlib_suffix)', - }, { - 'type': 'executable', - }], ['target_arch=="ppc64"', { 'ldflags': [ - '-Wl,-blibpath:/usr/lib:/lib:/opt/freeware/lib/pthread/ppc64' + '-Wl,-blibpath:/usr/lib:/lib:' + '/opt/freeware/lib/pthread/ppc64' ], }], ['target_arch=="ppc"', { @@ -1045,45 +1086,20 @@ ], }] ], - 'dependencies': ['<(node_core_target_name)', 'node_exp'], - + 'includes': [ + 'node.gypi' + ], + 'dependencies': [ '<(node_lib_target_name)' ], 'include_dirs': [ 'src', 'deps/v8/include', ], - 'sources': [ - 'src/node_main.cc', '<@(library_files)', - # node.gyp is added to the project by default. 'common.gypi', ], - - 'ldflags': ['-Wl,-bE:<(PRODUCT_DIR)/node.exp'], }, - { - 'target_name': 'node_exp', - 'type': 'none', - 'dependencies': [ - '<(node_core_target_name)', - ], - 'actions': [ - { - 'action_name': 'expfile', - 'inputs': [ - '<(OBJ_DIR)' - ], - 'outputs': [ - '<(PRODUCT_DIR)/node.exp' - ], - 'action': [ - 'sh', 'tools/create_expfile.sh', - '<@(_inputs)', '<@(_outputs)' - ], - } - ] - } - ], # end targets + ] }], # end aix section ], # end conditions block } diff --git a/node.gypi b/node.gypi index 3990c59ef98851..386601906fbe4a 100644 --- a/node.gypi +++ b/node.gypi @@ -1,4 +1,29 @@ { + # 'force_load' means to include the static libs into the shared lib or + # executable. Therefore, it is enabled when building: + # 1. The executable and it uses static lib (cctest and node) + # 2. The shared lib + # Linker optimizes out functions that are not used. When force_load=true, + # --whole-archive,force_load and /WHOLEARCHIVE are used to include + # all obj files in static libs into the executable or shared lib. + 'variables': { + 'variables': { + 'variables': { + 'force_load%': 'true', + 'current_type%': '<(_type)', + }, + 'force_load%': '<(force_load)', + 'conditions': [ + ['current_type=="static_library"', { + 'force_load': 'false', + }], + [ 'current_type=="executable" and node_target_type=="shared_library"', { + 'force_load': 'false', + }] + ], + }, + 'force_load%': '<(force_load)', + }, 'conditions': [ [ 'node_shared=="false"', { 'msvs_settings': { @@ -36,12 +61,6 @@ [ 'node_v8_options!=""', { 'defines': [ 'NODE_V8_OPTIONS="<(node_v8_options)"'], }], - # No node_main.cc for anything except executable - [ 'node_target_type!="executable"', { - 'sources!': [ - 'src/node_main.cc', - ], - }], [ 'node_release_urlbase!=""', { 'defines': [ 'NODE_RELEASE_URLBASE="<(node_release_urlbase)"', @@ -70,37 +89,6 @@ 'deps/v8/src/third_party/vtune/v8vtune.gyp:v8_vtune' ], }], - [ 'node_use_lttng=="true"', { - 'defines': [ 'HAVE_LTTNG=1' ], - 'include_dirs': [ '<(SHARED_INTERMEDIATE_DIR)' ], - 'libraries': [ '-llttng-ust' ], - 'sources': [ - 'src/node_lttng.cc' - ], - } ], - [ 'node_use_etw=="true" and node_target_type!="static_library"', { - 'defines': [ 'HAVE_ETW=1' ], - 'dependencies': [ 'node_etw' ], - 'sources': [ - 'src/node_win32_etw_provider.h', - 'src/node_win32_etw_provider-inl.h', - 'src/node_win32_etw_provider.cc', - 'src/node_dtrace.cc', - 'tools/msvs/genfiles/node_etw_provider.h', - 'tools/msvs/genfiles/node_etw_provider.rc', - ] - } ], - [ 'node_use_perfctr=="true" and node_target_type!="static_library"', { - 'defines': [ 'HAVE_PERFCTR=1' ], - 'dependencies': [ 'node_perfctr' ], - 'sources': [ - 'src/node_win32_perfctr_provider.h', - 'src/node_win32_perfctr_provider.cc', - 'src/node_counters.cc', - 'src/node_counters.h', - 'tools/msvs/genfiles/node_perfctr_provider.rc', - ] - } ], [ 'node_no_browser_globals=="true"', { 'defines': [ 'NODE_NO_BROWSER_GLOBALS' ], } ], @@ -108,7 +96,7 @@ 'dependencies': [ 'deps/v8/src/v8.gyp:postmortem-metadata' ], 'conditions': [ # -force_load is not applicable for the static library - [ 'node_target_type!="static_library"', { + [ 'force_load=="true"', { 'xcode_settings': { 'OTHER_LDFLAGS': [ '-Wl,-force_load,<(V8_BASE)', @@ -159,6 +147,27 @@ 'defines': [ '_LINUX_SOURCE_COMPAT', ], + 'conditions': [ + [ 'force_load=="true"', { + + 'actions': [ + { + 'action_name': 'expfile', + 'inputs': [ + '<(OBJ_DIR)' + ], + 'outputs': [ + '<(PRODUCT_DIR)/node.exp' + ], + 'action': [ + 'sh', 'tools/create_expfile.sh', + '<@(_inputs)', '<@(_outputs)' + ], + } + ], + 'ldflags': ['-Wl,-bE:<(PRODUCT_DIR)/node.exp', '-Wl,-brtl'], + }], + ], }], [ 'OS=="solaris"', { 'libraries': [ @@ -174,12 +183,14 @@ 'NODE_PLATFORM="sunos"', ], }], - [ '(OS=="freebsd" or OS=="linux") and node_shared=="false" and coverage=="false"', { + [ '(OS=="freebsd" or OS=="linux") and node_shared=="false"' + ' and coverage=="false" and force_load=="true"', { 'ldflags': [ '-Wl,-z,noexecstack', '-Wl,--whole-archive <(V8_BASE)', '-Wl,--no-whole-archive' ] }], - [ '(OS=="freebsd" or OS=="linux") and node_shared=="false" and coverage=="true"', { + [ '(OS=="freebsd" or OS=="linux") and node_shared=="false"' + ' and coverage=="true" and force_load=="true"', { 'ldflags': [ '-Wl,-z,noexecstack', '-Wl,--whole-archive <(V8_BASE)', '-Wl,--no-whole-archive', @@ -206,5 +217,54 @@ [ 'OS=="sunos"', { 'ldflags': [ '-Wl,-M,/usr/lib/ld/map.noexstk' ], }], + + [ 'node_use_openssl=="true"', { + 'defines': [ 'HAVE_OPENSSL=1' ], + 'conditions': [ + ['openssl_fips != ""', { + 'defines': [ 'NODE_FIPS_MODE' ], + }], + [ 'node_shared_openssl=="false"', { + 'dependencies': [ + './deps/openssl/openssl.gyp:openssl', + + # For tests + './deps/openssl/openssl.gyp:openssl-cli', + ], + 'conditions': [ + # -force_load or --whole-archive are not applicable for + # the static library + [ 'force_load=="true"', { + 'xcode_settings': { + 'OTHER_LDFLAGS': [ + '-Wl,-force_load,<(PRODUCT_DIR)/<(OPENSSL_PRODUCT)', + ], + }, + 'conditions': [ + ['OS in "linux freebsd" and node_shared=="false"', { + 'ldflags': [ + '-Wl,--whole-archive,' + '<(OBJ_DIR)/deps/openssl/' + '<(OPENSSL_PRODUCT)', + '-Wl,--no-whole-archive', + ], + }], + # openssl.def is based on zlib.def, zlib symbols + # are always exported. + ['use_openssl_def==1', { + 'sources': ['<(SHARED_INTERMEDIATE_DIR)/openssl.def'], + }], + ['OS=="win" and use_openssl_def==0', { + 'sources': ['deps/zlib/win32/zlib.def'], + }], + ], + }], + ], + }]] + + }, { + 'defines': [ 'HAVE_OPENSSL=0' ] + }], + ], } diff --git a/src/async_wrap.cc b/src/async_wrap.cc index 7e3c0c257ab22d..b1212e97704ef0 100644 --- a/src/async_wrap.cc +++ b/src/async_wrap.cc @@ -129,7 +129,7 @@ RetainedObjectInfo* WrapperInfo(uint16_t class_id, Local wrapper) { CHECK_GT(object->InternalFieldCount(), 0); AsyncWrap* wrap = Unwrap(object); - CHECK_NE(nullptr, wrap); + if (wrap == nullptr) return nullptr; // ClearWrap() already called. return new RetainedAsyncInfo(class_id, wrap); } @@ -554,12 +554,12 @@ void AsyncWrap::Initialize(Local target, // this way to allow JS and C++ to read/write each value as quickly as // possible. The fields are represented as follows: // - // kAsyncUid: Maintains the state of the next unique id to be assigned. + // kAsyncIdCounter: Maintains the state of the next unique id to be assigned. // // kDefaultTriggerAsyncId: Write the id of the resource responsible for a // handle's creation just before calling the new handle's constructor. // After the new handle is constructed kDefaultTriggerAsyncId is set back - // to 0. + // to -1. FORCE_SET_TARGET_FIELD(target, "async_id_fields", env->async_hooks()->async_id_fields().GetJSArray()); diff --git a/src/async_wrap.h b/src/async_wrap.h index bc826768d92736..8325d152ab09c4 100644 --- a/src/async_wrap.h +++ b/src/async_wrap.h @@ -44,6 +44,7 @@ namespace node { V(HTTP2SESSION) \ V(HTTP2STREAM) \ V(HTTP2PING) \ + V(HTTP2SETTINGS) \ V(HTTPPARSER) \ V(JSSTREAM) \ V(PIPECONNECTWRAP) \ diff --git a/src/env-inl.h b/src/env-inl.h index 956153bb965f44..4b6f147f64778c 100644 --- a/src/env-inl.h +++ b/src/env-inl.h @@ -541,8 +541,15 @@ Environment::scheduled_immediate_count() { return scheduled_immediate_count_; } -void Environment::SetImmediate(native_immediate_callback cb, void* data) { - native_immediate_callbacks_.push_back({ cb, data }); +void Environment::SetImmediate(native_immediate_callback cb, + void* data, + v8::Local obj) { + native_immediate_callbacks_.push_back({ + cb, + data, + std::unique_ptr>( + obj.IsEmpty() ? nullptr : new v8::Persistent(isolate_, obj)) + }); if (scheduled_immediate_count_[0] == 0) ActivateImmediateCheck(); scheduled_immediate_count_[0] = scheduled_immediate_count_[0] + 1; diff --git a/src/env.cc b/src/env.cc index 8c3b43d2102cb1..e105fcd7c57ef1 100644 --- a/src/env.cc +++ b/src/env.cc @@ -224,6 +224,8 @@ void Environment::RunAndClearNativeImmediates() { native_immediate_callbacks_.swap(list); for (const auto& cb : list) { cb.cb_(this, cb.data_); + if (cb.keep_alive_) + cb.keep_alive_->Reset(); } #ifdef DEBUG diff --git a/src/env.h b/src/env.h index 6113e6d2de26ea..fd208038157b8b 100644 --- a/src/env.h +++ b/src/env.h @@ -93,6 +93,8 @@ class ModuleWrap; V(processed_private_symbol, "node:processed") \ V(selected_npn_buffer_private_symbol, "node:selectedNpnBuffer") \ V(domain_private_symbol, "node:domain") \ + V(napi_env, "node:napi:env") \ + V(napi_wrapper, "node:napi:wrapper") \ // Strings are per-isolate primitives but Environment proxies them // for the sake of convenience. Strings should be ASCII-only. @@ -199,6 +201,7 @@ class ModuleWrap; V(nsname_string, "nsname") \ V(nexttick_string, "nextTick") \ V(ocsp_request_string, "OCSPRequest") \ + V(onaltsvc_string, "onaltsvc") \ V(onchange_string, "onchange") \ V(onclienthello_string, "onclienthello") \ V(oncomplete_string, "oncomplete") \ @@ -315,6 +318,7 @@ class ModuleWrap; V(domains_stack_array, v8::Array) \ V(http2ping_constructor_template, v8::ObjectTemplate) \ V(http2stream_constructor_template, v8::ObjectTemplate) \ + V(http2settings_constructor_template, v8::ObjectTemplate) \ V(inspector_console_api_object, v8::Object) \ V(module_load_list_array, v8::Array) \ V(pbkdf2_constructor_template, v8::ObjectTemplate) \ @@ -686,7 +690,11 @@ class Environment { bool EmitNapiWarning(); typedef void (*native_immediate_callback)(Environment* env, void* data); - inline void SetImmediate(native_immediate_callback cb, void* data); + // cb will be called as cb(env, data) on the next event loop iteration. + // obj will be kept alive between now and after the callback has run. + inline void SetImmediate(native_immediate_callback cb, + void* data, + v8::Local obj = v8::Local()); // This needs to be available for the JS-land setImmediate(). void ActivateImmediateCheck(); @@ -754,6 +762,7 @@ class Environment { struct NativeImmediateCallback { native_immediate_callback cb_; void* data_; + std::unique_ptr> keep_alive_; }; std::vector native_immediate_callbacks_; void RunAndClearNativeImmediates(); diff --git a/src/js_stream.cc b/src/js_stream.cc index 7d4ad7a4e978a6..e8e31f41cb64ad 100644 --- a/src/js_stream.cc +++ b/src/js_stream.cc @@ -181,7 +181,7 @@ void JSStream::DoAfterWrite(const FunctionCallbackInfo& args) { ASSIGN_OR_RETURN_UNWRAP(&wrap, args.Holder()); ASSIGN_OR_RETURN_UNWRAP(&w, args[0].As()); - wrap->OnAfterWrite(w); + w->Done(0); } diff --git a/src/node.cc b/src/node.cc index d17752bbad38f5..67c8a808ffb00e 100644 --- a/src/node.cc +++ b/src/node.cc @@ -3374,6 +3374,12 @@ void SetupProcessObject(Environment* env, "nghttp2", FIXED_ONE_BYTE_STRING(env->isolate(), NGHTTP2_VERSION)); + const char node_napi_version[] = NODE_STRINGIFY(NAPI_VERSION); + READONLY_PROPERTY( + versions, + "napi", + FIXED_ONE_BYTE_STRING(env->isolate(), node_napi_version)); + // process._promiseRejectEvent Local promiseRejectEvent = Object::New(env->isolate()); READONLY_DONT_ENUM_PROPERTY(process, diff --git a/src/node.h b/src/node.h index a48c9bc86f6904..78b2b2b64a6feb 100644 --- a/src/node.h +++ b/src/node.h @@ -178,7 +178,6 @@ NODE_EXTERN v8::Local MakeCallback( #endif #ifdef _WIN32 -// TODO(tjfontaine) consider changing the usage of ssize_t to ptrdiff_t #if !defined(_SSIZE_T_) && !defined(_SSIZE_T_DEFINED) typedef intptr_t ssize_t; # define _SSIZE_T_ diff --git a/src/node_api.cc b/src/node_api.cc index f16ceefd5eb5b9..b0456811bfb1dd 100644 --- a/src/node_api.cc +++ b/src/node_api.cc @@ -17,8 +17,7 @@ #include #include "node_api.h" #include "node_internals.h" - -#define NAPI_VERSION 2 +#include "env.h" static napi_status napi_set_last_error(napi_env env, napi_status error_code, @@ -45,9 +44,13 @@ struct napi_env__ { v8::Persistent accessor_data_template; napi_extended_error_info last_error; int open_handle_scopes = 0; + int open_callback_scopes = 0; uv_loop_t* loop = nullptr; }; +#define NAPI_PRIVATE_KEY(context, suffix) \ + (node::Environment::GetCurrent((context))->napi_ ## suffix()) + #define ENV_OBJECT_TEMPLATE(env, prefix, destination, field_count) \ do { \ if ((env)->prefix ## _template.IsEmpty()) { \ @@ -68,10 +71,12 @@ struct napi_env__ { } \ } while (0) -#define CHECK_ENV(env) \ - if ((env) == nullptr) { \ - return napi_invalid_arg; \ - } +#define CHECK_ENV(env) \ + do { \ + if ((env) == nullptr) { \ + return napi_invalid_arg; \ + } \ + } while (0) #define CHECK_ARG(env, arg) \ RETURN_STATUS_IF_FALSE((env), ((arg) != nullptr), napi_invalid_arg) @@ -144,6 +149,48 @@ struct napi_env__ { (!try_catch.HasCaught() ? napi_ok \ : napi_set_last_error((env), napi_pending_exception)) +#define THROW_RANGE_ERROR_IF_FALSE(env, condition, error, message) \ + do { \ + if (!(condition)) { \ + napi_throw_range_error((env), (error), (message)); \ + return napi_set_last_error((env), napi_generic_failure); \ + } \ + } while (0) + +#define CREATE_TYPED_ARRAY( \ + env, type, size_of_element, buffer, byte_offset, length, out) \ + do { \ + if ((size_of_element) > 1) { \ + THROW_RANGE_ERROR_IF_FALSE( \ + (env), (byte_offset) % (size_of_element) == 0, \ + "ERR_NAPI_INVALID_TYPEDARRAY_ALIGNMENT", \ + "start offset of "#type" should be a multiple of "#size_of_element); \ + } \ + THROW_RANGE_ERROR_IF_FALSE((env), (length) * (size_of_element) + \ + (byte_offset) <= buffer->ByteLength(), \ + "ERR_NAPI_INVALID_TYPEDARRAY_LENGTH", \ + "Invalid typed array length"); \ + (out) = v8::type::New((buffer), (byte_offset), (length)); \ + } while (0) + +#define NAPI_CALL_INTO_MODULE(env, call, handle_exception) \ + do { \ + int open_handle_scopes = (env)->open_handle_scopes; \ + int open_callback_scopes = (env)->open_callback_scopes; \ + napi_clear_last_error((env)); \ + call; \ + CHECK_EQ((env)->open_handle_scopes, open_handle_scopes); \ + CHECK_EQ((env)->open_callback_scopes, open_callback_scopes); \ + if (!(env)->last_exception.IsEmpty()) { \ + handle_exception( \ + v8::Local::New((env)->isolate, (env)->last_exception)); \ + (env)->last_exception.Reset(); \ + } \ + } while (0) + +#define NAPI_CALL_INTO_MODULE_THROW(env, call) \ + NAPI_CALL_INTO_MODULE((env), call, (env)->isolate->ThrowException) + namespace { namespace v8impl { @@ -225,6 +272,18 @@ V8EscapableHandleScopeFromJsEscapableHandleScope( return reinterpret_cast(s); } +static +napi_callback_scope JsCallbackScopeFromV8CallbackScope( + node::CallbackScope* s) { + return reinterpret_cast(s); +} + +static +node::CallbackScope* V8CallbackScopeFromJsCallbackScope( + napi_callback_scope s) { + return reinterpret_cast(s); +} + //=== Conversion between V8 Handles and napi_value ======================== // This asserts v8::Local<> will always be implemented with a single @@ -254,6 +313,13 @@ v8::Local V8LocalValueFromJsValue(napi_value v) { return local; } +static inline void trigger_fatal_exception( + napi_env env, v8::Local local_err) { + v8::Local local_msg = + v8::Exception::CreateMessage(env->isolate, local_err); + node::FatalException(env->isolate, local_err, local_msg); +} + static inline napi_status V8NameFromPropertyDescriptor(napi_env env, const napi_property_descriptor* p, v8::Local* result) { @@ -303,10 +369,11 @@ class Finalizer { static void FinalizeBufferCallback(char* data, void* hint) { Finalizer* finalizer = static_cast(hint); if (finalizer->_finalize_callback != nullptr) { - finalizer->_finalize_callback( - finalizer->_env, - data, - finalizer->_finalize_hint); + NAPI_CALL_INTO_MODULE_THROW(finalizer->_env, + finalizer->_finalize_callback( + finalizer->_env, + data, + finalizer->_finalize_hint)); } Delete(finalizer); @@ -351,6 +418,10 @@ class Reference : private Finalizer { } public: + void* Data() { + return _finalize_data; + } + static Reference* New(napi_env env, v8::Local value, uint32_t initial_refcount, @@ -412,12 +483,14 @@ class Reference : private Finalizer { // Check before calling the finalize callback, because the callback might // delete it. bool delete_self = reference->_delete_self; + napi_env env = reference->_env; if (reference->_finalize_callback != nullptr) { - reference->_finalize_callback( - reference->_env, - reference->_finalize_data, - reference->_finalize_hint); + NAPI_CALL_INTO_MODULE_THROW(env, + reference->_finalize_callback( + reference->_env, + reference->_finalize_data, + reference->_finalize_hint)); } if (delete_self) { @@ -502,30 +575,17 @@ class CallbackWrapperBase : public CallbackWrapper { napi_callback cb = reinterpret_cast( v8::Local::Cast( _cbdata->GetInternalField(kInternalFieldIndex))->Value()); - v8::Isolate* isolate = _cbinfo.GetIsolate(); napi_env env = static_cast( v8::Local::Cast( _cbdata->GetInternalField(kEnvIndex))->Value()); - // Make sure any errors encountered last time we were in N-API are gone. - napi_clear_last_error(env); - - int open_handle_scopes = env->open_handle_scopes; - - napi_value result = cb(env, cbinfo_wrapper); + napi_value result; + NAPI_CALL_INTO_MODULE_THROW(env, result = cb(env, cbinfo_wrapper)); if (result != nullptr) { this->SetReturnValue(result); } - - CHECK_EQ(env->open_handle_scopes, open_handle_scopes); - - if (!env->last_exception.IsEmpty()) { - isolate->ThrowException( - v8::Local::New(isolate, env->last_exception)); - env->last_exception.Reset(); - } } const Info& _cbinfo; @@ -710,45 +770,6 @@ v8::Local CreateAccessorCallbackData(napi_env env, return cbdata; } -int kWrapperFields = 3; - -// Pointer used to identify items wrapped by N-API. Used by FindWrapper and -// napi_wrap(). -const char napi_wrap_name[] = "N-API Wrapper"; - -// Search the object's prototype chain for the wrapper object. Usually the -// wrapper would be the first in the chain, but it is OK for other objects to -// be inserted in the prototype chain. -static -bool FindWrapper(v8::Local obj, - v8::Local* result = nullptr, - v8::Local* parent = nullptr) { - v8::Local wrapper = obj; - - do { - v8::Local proto = wrapper->GetPrototype(); - if (proto.IsEmpty() || !proto->IsObject()) { - return false; - } - if (parent != nullptr) { - *parent = wrapper; - } - wrapper = proto.As(); - if (wrapper->InternalFieldCount() == kWrapperFields) { - v8::Local external = wrapper->GetInternalField(1); - if (external->IsExternal() && - external.As()->Value() == v8impl::napi_wrap_name) { - break; - } - } - } while (true); - - if (result != nullptr) { - *result = wrapper; - } - return true; -} - static void DeleteEnv(napi_env env, void* data, void* hint) { delete env; } @@ -765,11 +786,8 @@ napi_env GetEnv(v8::Local context) { // because we need to stop hard if either of them is empty. // // Re https://github.com/nodejs/node/pull/14217#discussion_r128775149 - auto key = v8::Private::ForApi(isolate, - v8::String::NewFromOneByte(isolate, - reinterpret_cast("N-API Environment"), - v8::NewStringType::kInternalized).ToLocalChecked()); - auto value = global->GetPrivate(context, key).ToLocalChecked(); + auto value = global->GetPrivate(context, NAPI_PRIVATE_KEY(context, env)) + .ToLocalChecked(); if (value->IsExternal()) { result = static_cast(value.As()->Value()); @@ -779,7 +797,8 @@ napi_env GetEnv(v8::Local context) { // We must also stop hard if the result of assigning the env to the global // is either nothing or false. - CHECK(global->SetPrivate(context, key, external).FromJust()); + CHECK(global->SetPrivate(context, NAPI_PRIVATE_KEY(context, env), external) + .FromJust()); // Create a self-destructing reference to external that will get rid of the // napi_env when external goes out of scope. @@ -789,28 +808,46 @@ napi_env GetEnv(v8::Local context) { return result; } +enum UnwrapAction { + KeepWrap, + RemoveWrap +}; + static napi_status Unwrap(napi_env env, napi_value js_object, void** result, - v8::Local* wrapper, - v8::Local* parent = nullptr) { + UnwrapAction action) { + NAPI_PREAMBLE(env); CHECK_ARG(env, js_object); - CHECK_ARG(env, result); + if (action == KeepWrap) { + CHECK_ARG(env, result); + } + + v8::Isolate* isolate = env->isolate; + v8::Local context = isolate->GetCurrentContext(); v8::Local value = v8impl::V8LocalValueFromJsValue(js_object); RETURN_STATUS_IF_FALSE(env, value->IsObject(), napi_invalid_arg); v8::Local obj = value.As(); - RETURN_STATUS_IF_FALSE( - env, v8impl::FindWrapper(obj, wrapper, parent), napi_invalid_arg); + auto val = obj->GetPrivate(context, NAPI_PRIVATE_KEY(context, wrapper)) + .ToLocalChecked(); + RETURN_STATUS_IF_FALSE(env, val->IsExternal(), napi_invalid_arg); + Reference* reference = + static_cast(val.As()->Value()); - v8::Local unwrappedValue = (*wrapper)->GetInternalField(0); - RETURN_STATUS_IF_FALSE(env, unwrappedValue->IsExternal(), napi_invalid_arg); + if (result) { + *result = reference->Data(); + } - *result = unwrappedValue.As()->Value(); + if (action == RemoveWrap) { + CHECK(obj->DeletePrivate(context, NAPI_PRIVATE_KEY(context, wrapper)) + .FromJust()); + Reference::Delete(reference); + } - return napi_ok; + return GET_RETURN_STATUS(env); } static @@ -856,8 +893,10 @@ void napi_module_register_cb(v8::Local exports, // one is found. napi_env env = v8impl::GetEnv(context); - napi_value _exports = - mod->nm_register_func(env, v8impl::JsValueFromV8LocalValue(exports)); + napi_value _exports; + NAPI_CALL_INTO_MODULE_THROW(env, + _exports = mod->nm_register_func(env, + v8impl::JsValueFromV8LocalValue(exports))); // If register function returned a non-null exports object different from // the exports object we passed it, set that as the "exports" property of @@ -902,7 +941,8 @@ const char* error_messages[] = {nullptr, "An exception is pending", "The async work item was cancelled", "napi_escape_handle already called on scope", - "Invalid handle scope usage"}; + "Invalid handle scope usage", + "Invalid callback scope usage"}; static inline napi_status napi_clear_last_error(napi_env env) { env->last_error.error_code = napi_ok; @@ -933,9 +973,9 @@ napi_status napi_get_last_error_info(napi_env env, // We don't have a napi_status_last as this would result in an ABI // change each time a message was added. static_assert( - node::arraysize(error_messages) == napi_handle_scope_mismatch + 1, + node::arraysize(error_messages) == napi_callback_scope_mismatch + 1, "Count of error messages must match count of error values"); - CHECK_LE(env->last_error.error_code, napi_handle_scope_mismatch); + CHECK_LE(env->last_error.error_code, napi_callback_scope_mismatch); // Wait until someone requests the last error information to fetch the error // message string @@ -946,6 +986,16 @@ napi_status napi_get_last_error_info(napi_env env, return napi_ok; } +napi_status napi_fatal_exception(napi_env env, napi_value err) { + NAPI_PREAMBLE(env); + CHECK_ARG(env, err); + + v8::Local local_err = v8impl::V8LocalValueFromJsValue(err); + v8impl::trigger_fatal_exception(env, local_err); + + return napi_clear_last_error(env); +} + NAPI_NO_RETURN void napi_fatal_error(const char* location, size_t location_len, const char* message, @@ -2147,15 +2197,16 @@ napi_status napi_get_value_int64(napi_env env, RETURN_STATUS_IF_FALSE(env, val->IsNumber(), napi_number_expected); - // v8::Value::IntegerValue() converts NaN to INT64_MIN, inconsistent with - // v8::Value::Int32Value() that converts NaN to 0. So special-case NaN here. + // v8::Value::IntegerValue() converts NaN, +Inf, and -Inf to INT64_MIN, + // inconsistent with v8::Value::Int32Value() which converts those values to 0. + // Special-case all non-finite values to match that behavior. double doubleValue = val.As()->Value(); - if (std::isnan(doubleValue)) { - *result = 0; - } else { + if (std::isfinite(doubleValue)) { // Empty context: https://github.com/nodejs/node/issues/14379 v8::Local context; *result = val->IntegerValue(context).FromJust(); + } else { + *result = 0; } return napi_clear_last_error(env); @@ -2369,26 +2420,9 @@ napi_status napi_wrap(napi_env env, v8::Local obj = value.As(); // If we've already wrapped this object, we error out. - RETURN_STATUS_IF_FALSE(env, !v8impl::FindWrapper(obj), napi_invalid_arg); - - // Create a wrapper object with an internal field to hold the wrapped pointer - // and a second internal field to identify the owner as N-API. - v8::Local wrapper_template; - ENV_OBJECT_TEMPLATE(env, wrap, wrapper_template, v8impl::kWrapperFields); - - auto maybe_object = wrapper_template->NewInstance(context); - CHECK_MAYBE_EMPTY(env, maybe_object, napi_generic_failure); - v8::Local wrapper = maybe_object.ToLocalChecked(); - - // Store the pointer as an external in the wrapper. - wrapper->SetInternalField(0, v8::External::New(isolate, native_object)); - wrapper->SetInternalField(1, v8::External::New(isolate, - reinterpret_cast(const_cast(v8impl::napi_wrap_name)))); - - // Insert the wrapper into the object's prototype chain. - v8::Local proto = obj->GetPrototype(); - CHECK(wrapper->SetPrototype(context, proto).FromJust()); - CHECK(obj->SetPrototype(context, wrapper).FromJust()); + RETURN_STATUS_IF_FALSE(env, + !obj->HasPrivate(context, NAPI_PRIVATE_KEY(context, wrapper)).FromJust(), + napi_invalid_arg); v8impl::Reference* reference = nullptr; if (result != nullptr) { @@ -2400,52 +2434,24 @@ napi_status napi_wrap(napi_env env, reference = v8impl::Reference::New( env, obj, 0, false, finalize_cb, native_object, finalize_hint); *result = reinterpret_cast(reference); - } else if (finalize_cb != nullptr) { - // Create a self-deleting reference just for the finalize callback. - reference = v8impl::Reference::New( - env, obj, 0, true, finalize_cb, native_object, finalize_hint); + } else { + // Create a self-deleting reference. + reference = v8impl::Reference::New(env, obj, 0, true, finalize_cb, + native_object, finalize_cb == nullptr ? nullptr : finalize_hint); } - if (reference != nullptr) { - wrapper->SetInternalField(2, v8::External::New(isolate, reference)); - } + CHECK(obj->SetPrivate(context, NAPI_PRIVATE_KEY(context, wrapper), + v8::External::New(isolate, reference)).FromJust()); return GET_RETURN_STATUS(env); } napi_status napi_unwrap(napi_env env, napi_value obj, void** result) { - // Omit NAPI_PREAMBLE and GET_RETURN_STATUS because V8 calls here cannot throw - // JS exceptions. - CHECK_ENV(env); - v8::Local wrapper; - return napi_set_last_error(env, v8impl::Unwrap(env, obj, result, &wrapper)); + return v8impl::Unwrap(env, obj, result, v8impl::KeepWrap); } napi_status napi_remove_wrap(napi_env env, napi_value obj, void** result) { - NAPI_PREAMBLE(env); - v8::Local wrapper; - v8::Local parent; - napi_status status = v8impl::Unwrap(env, obj, result, &wrapper, &parent); - if (status != napi_ok) { - return napi_set_last_error(env, status); - } - - v8::Local external = wrapper->GetInternalField(2); - if (external->IsExternal()) { - v8impl::Reference::Delete( - static_cast(external.As()->Value())); - } - - if (!parent.IsEmpty()) { - v8::Maybe maybe = parent->SetPrototype( - env->isolate->GetCurrentContext(), wrapper->GetPrototype()); - CHECK_MAYBE_NOTHING(env, maybe, napi_generic_failure); - if (!maybe.FromMaybe(false)) { - return napi_set_last_error(env, napi_generic_failure); - } - } - - return GET_RETURN_STATUS(env); + return v8impl::Unwrap(env, obj, result, v8impl::RemoveWrap); } napi_status napi_create_external(napi_env env, @@ -2669,6 +2675,46 @@ napi_status napi_escape_handle(napi_env env, return napi_set_last_error(env, napi_escape_called_twice); } +napi_status napi_open_callback_scope(napi_env env, + napi_value resource_object, + napi_async_context async_context_handle, + napi_callback_scope* result) { + // Omit NAPI_PREAMBLE and GET_RETURN_STATUS because V8 calls here cannot throw + // JS exceptions. + CHECK_ENV(env); + CHECK_ARG(env, result); + + v8::Local context = env->isolate->GetCurrentContext(); + + node::async_context* node_async_context = + reinterpret_cast(async_context_handle); + + v8::Local resource; + CHECK_TO_OBJECT(env, context, resource, resource_object); + + *result = v8impl::JsCallbackScopeFromV8CallbackScope( + new node::CallbackScope(env->isolate, + resource, + *node_async_context)); + + env->open_callback_scopes++; + return napi_clear_last_error(env); +} + +napi_status napi_close_callback_scope(napi_env env, napi_callback_scope scope) { + // Omit NAPI_PREAMBLE and GET_RETURN_STATUS because V8 calls here cannot throw + // JS exceptions. + CHECK_ENV(env); + CHECK_ARG(env, scope); + if (env->open_callback_scopes == 0) { + return napi_callback_scope_mismatch; + } + + env->open_callback_scopes--; + delete v8impl::V8CallbackScopeFromJsCallbackScope(scope); + return napi_clear_last_error(env); +} + napi_status napi_new_instance(napi_env env, napi_value constructor, size_t argc, @@ -2770,6 +2816,8 @@ napi_status napi_async_destroy(napi_env env, reinterpret_cast(async_context); node::EmitAsyncDestroy(isolate, *node_async_context); + delete node_async_context; + return napi_clear_last_error(env); } @@ -2806,11 +2854,15 @@ napi_status napi_make_callback(napi_env env, isolate, v8recv, v8func, argc, reinterpret_cast*>(const_cast(argv)), *node_async_context); - CHECK_MAYBE_EMPTY(env, callback_result, napi_generic_failure); - if (result != nullptr) { - *result = v8impl::JsValueFromV8LocalValue( - callback_result.ToLocalChecked()); + if (try_catch.HasCaught()) { + return napi_set_last_error(env, napi_pending_exception); + } else { + CHECK_MAYBE_EMPTY(env, callback_result, napi_generic_failure); + if (result != nullptr) { + *result = v8impl::JsValueFromV8LocalValue( + callback_result.ToLocalChecked()); + } } return GET_RETURN_STATUS(env); @@ -3063,31 +3115,40 @@ napi_status napi_create_typedarray(napi_env env, switch (type) { case napi_int8_array: - typedArray = v8::Int8Array::New(buffer, byte_offset, length); + CREATE_TYPED_ARRAY( + env, Int8Array, 1, buffer, byte_offset, length, typedArray); break; case napi_uint8_array: - typedArray = v8::Uint8Array::New(buffer, byte_offset, length); + CREATE_TYPED_ARRAY( + env, Uint8Array, 1, buffer, byte_offset, length, typedArray); break; case napi_uint8_clamped_array: - typedArray = v8::Uint8ClampedArray::New(buffer, byte_offset, length); + CREATE_TYPED_ARRAY( + env, Uint8ClampedArray, 1, buffer, byte_offset, length, typedArray); break; case napi_int16_array: - typedArray = v8::Int16Array::New(buffer, byte_offset, length); + CREATE_TYPED_ARRAY( + env, Int16Array, 2, buffer, byte_offset, length, typedArray); break; case napi_uint16_array: - typedArray = v8::Uint16Array::New(buffer, byte_offset, length); + CREATE_TYPED_ARRAY( + env, Uint16Array, 2, buffer, byte_offset, length, typedArray); break; case napi_int32_array: - typedArray = v8::Int32Array::New(buffer, byte_offset, length); + CREATE_TYPED_ARRAY( + env, Int32Array, 4, buffer, byte_offset, length, typedArray); break; case napi_uint32_array: - typedArray = v8::Uint32Array::New(buffer, byte_offset, length); + CREATE_TYPED_ARRAY( + env, Uint32Array, 4, buffer, byte_offset, length, typedArray); break; case napi_float32_array: - typedArray = v8::Float32Array::New(buffer, byte_offset, length); + CREATE_TYPED_ARRAY( + env, Float32Array, 4, buffer, byte_offset, length, typedArray); break; case napi_float64_array: - typedArray = v8::Float64Array::New(buffer, byte_offset, length); + CREATE_TYPED_ARRAY( + env, Float64Array, 8, buffer, byte_offset, length, typedArray); break; default: return napi_set_last_error(env, napi_invalid_arg); @@ -3168,6 +3229,14 @@ napi_status napi_create_dataview(napi_env env, RETURN_STATUS_IF_FALSE(env, value->IsArrayBuffer(), napi_invalid_arg); v8::Local buffer = value.As(); + if (byte_length + byte_offset > buffer->ByteLength()) { + napi_throw_range_error( + env, + "ERR_NAPI_INVALID_DATAVIEW_ARGS", + "byte_offset + byte_length should be less than or " + "equal to the size in bytes of the array passed in"); + return napi_set_last_error(env, napi_pending_exception); + } v8::Local DataView = v8::DataView::New(buffer, byte_offset, byte_length); @@ -3323,20 +3392,17 @@ class Work : public node::AsyncResource { v8::HandleScope scope(env->isolate); CallbackScope callback_scope(work); - work->_complete(env, ConvertUVErrorCode(status), work->_data); + NAPI_CALL_INTO_MODULE(env, + work->_complete(env, ConvertUVErrorCode(status), work->_data), + [env] (v8::Local local_err) { + // If there was an unhandled exception in the complete callback, + // report it as a fatal exception. (There is no JavaScript on the + // callstack that can possibly handle it.) + v8impl::trigger_fatal_exception(env, local_err); + }); // Note: Don't access `work` after this point because it was // likely deleted by the complete callback. - - // If there was an unhandled exception in the complete callback, - // report it as a fatal exception. (There is no JavaScript on the - // callstack that can possibly handle it.) - if (!env->last_exception.IsEmpty()) { - v8::TryCatch try_catch(env->isolate); - env->isolate->ThrowException( - v8::Local::New(env->isolate, env->last_exception)); - node::FatalException(env->isolate, try_catch); - } } } diff --git a/src/node_api.h b/src/node_api.h index ee0ad3518e13aa..627e56118011e6 100644 --- a/src/node_api.h +++ b/src/node_api.h @@ -112,6 +112,8 @@ NAPI_EXTERN napi_status napi_get_last_error_info(napi_env env, const napi_extended_error_info** result); +NAPI_EXTERN napi_status napi_fatal_exception(napi_env env, napi_value err); + NAPI_EXTERN NAPI_NO_RETURN void napi_fatal_error(const char* location, size_t location_len, const char* message, @@ -424,6 +426,14 @@ NAPI_EXTERN napi_status napi_escape_handle(napi_env env, napi_value escapee, napi_value* result); +NAPI_EXTERN napi_status napi_open_callback_scope(napi_env env, + napi_value resource_object, + napi_async_context context, + napi_callback_scope* result); + +NAPI_EXTERN napi_status napi_close_callback_scope(napi_env env, + napi_callback_scope scope); + // Methods to support error handling NAPI_EXTERN napi_status napi_throw(napi_env env, napi_value error); NAPI_EXTERN napi_status napi_throw_error(napi_env env, diff --git a/src/node_api_types.h b/src/node_api_types.h index 230c1f4ae3446f..76f38802e83e2e 100644 --- a/src/node_api_types.h +++ b/src/node_api_types.h @@ -15,6 +15,7 @@ typedef struct napi_value__ *napi_value; typedef struct napi_ref__ *napi_ref; typedef struct napi_handle_scope__ *napi_handle_scope; typedef struct napi_escapable_handle_scope__ *napi_escapable_handle_scope; +typedef struct napi_callback_scope__ *napi_callback_scope; typedef struct napi_callback_info__ *napi_callback_info; typedef struct napi_async_context__ *napi_async_context; typedef struct napi_async_work__ *napi_async_work; @@ -70,7 +71,8 @@ typedef enum { napi_pending_exception, napi_cancelled, napi_escape_called_twice, - napi_handle_scope_mismatch + napi_handle_scope_mismatch, + napi_callback_scope_mismatch } napi_status; typedef napi_value (*napi_callback)(napi_env env, diff --git a/src/node_buffer.cc b/src/node_buffer.cc index 66ae9feb697a32..0c68e04e5a7b18 100644 --- a/src/node_buffer.cc +++ b/src/node_buffer.cc @@ -303,15 +303,14 @@ MaybeLocal New(Environment* env, size_t length) { data, length, ArrayBufferCreationMode::kInternalized); - Local ui = Uint8Array::New(ab, 0, length); - Maybe mb = - ui->SetPrototype(env->context(), env->buffer_prototype_object()); - if (mb.FromMaybe(false)) - return scope.Escape(ui); + MaybeLocal ui = Buffer::New(env, ab, 0, length); - // Object failed to be created. Clean up resources. - free(data); - return Local(); + if (ui.IsEmpty()) { + // Object failed to be created. Clean up resources. + free(data); + } + + return scope.Escape(ui.FromMaybe(Local())); } @@ -349,15 +348,14 @@ MaybeLocal Copy(Environment* env, const char* data, size_t length) { new_data, length, ArrayBufferCreationMode::kInternalized); - Local ui = Uint8Array::New(ab, 0, length); - Maybe mb = - ui->SetPrototype(env->context(), env->buffer_prototype_object()); - if (mb.FromMaybe(false)) - return scope.Escape(ui); + MaybeLocal ui = Buffer::New(env, ab, 0, length); - // Object failed to be created. Clean up resources. - free(new_data); - return Local(); + if (ui.IsEmpty()) { + // Object failed to be created. Clean up resources. + free(new_data); + } + + return scope.Escape(ui.FromMaybe(Local())); } @@ -392,15 +390,14 @@ MaybeLocal New(Environment* env, // correct. if (data == nullptr) ab->Neuter(); - Local ui = Uint8Array::New(ab, 0, length); - Maybe mb = - ui->SetPrototype(env->context(), env->buffer_prototype_object()); + MaybeLocal ui = Buffer::New(env, ab, 0, length); - if (!mb.FromMaybe(false)) + if (ui.IsEmpty()) { return Local(); + } CallbackInfo::New(env->isolate(), ab, callback, data, hint); - return scope.Escape(ui); + return scope.Escape(ui.ToLocalChecked()); } @@ -415,8 +412,6 @@ MaybeLocal New(Isolate* isolate, char* data, size_t length) { MaybeLocal New(Environment* env, char* data, size_t length) { - EscapableHandleScope scope(env->isolate()); - if (length > 0) { CHECK_NE(data, nullptr); CHECK(length <= kMaxLength); @@ -427,12 +422,7 @@ MaybeLocal New(Environment* env, char* data, size_t length) { data, length, ArrayBufferCreationMode::kInternalized); - Local ui = Uint8Array::New(ab, 0, length); - Maybe mb = - ui->SetPrototype(env->context(), env->buffer_prototype_object()); - if (mb.FromMaybe(false)) - return scope.Escape(ui); - return Local(); + return Buffer::New(env, ab, 0, length).FromMaybe(Local()); } namespace { diff --git a/src/node_crypto.cc b/src/node_crypto.cc index 685e7bb73b5a5c..615e281a313812 100644 --- a/src/node_crypto.cc +++ b/src/node_crypto.cc @@ -79,7 +79,6 @@ static const int X509_NAME_FLAGS = ASN1_STRFLGS_ESC_CTRL namespace node { namespace crypto { -using v8::AccessorSignature; using v8::Array; using v8::Boolean; using v8::Context; @@ -102,8 +101,8 @@ using v8::Object; using v8::ObjectTemplate; using v8::Persistent; using v8::PropertyAttribute; -using v8::PropertyCallbackInfo; using v8::ReadOnly; +using v8::Signature; using v8::String; using v8::Value; @@ -481,14 +480,18 @@ void SecureContext::Initialize(Environment* env, Local target) { t->Set(FIXED_ONE_BYTE_STRING(env->isolate(), "kTicketKeyIVIndex"), Integer::NewFromUnsigned(env->isolate(), kTicketKeyIVIndex)); - t->PrototypeTemplate()->SetAccessor( + Local ctx_getter_templ = + FunctionTemplate::New(env->isolate(), + CtxGetter, + env->as_external(), + Signature::New(env->isolate(), t)); + + + t->PrototypeTemplate()->SetAccessorProperty( FIXED_ONE_BYTE_STRING(env->isolate(), "_external"), - CtxGetter, - nullptr, - env->as_external(), - DEFAULT, - static_cast(ReadOnly | DontDelete), - AccessorSignature::New(env->isolate(), t)); + ctx_getter_templ, + Local(), + static_cast(ReadOnly | DontDelete)); target->Set(secureContextString, t->GetFunction()); env->set_secure_context_constructor_template(t); @@ -1457,8 +1460,7 @@ int SecureContext::TicketCompatibilityCallback(SSL* ssl, #endif -void SecureContext::CtxGetter(Local property, - const PropertyCallbackInfo& info) { +void SecureContext::CtxGetter(const FunctionCallbackInfo& info) { SecureContext* sc; ASSIGN_OR_RETURN_UNWRAP(&sc, info.This()); Local ext = External::New(info.GetIsolate(), sc->ctx_); @@ -1528,14 +1530,17 @@ void SSLWrap::AddMethods(Environment* env, Local t) { env->SetProtoMethod(t, "getALPNNegotiatedProtocol", GetALPNNegotiatedProto); env->SetProtoMethod(t, "setALPNProtocols", SetALPNProtocols); - t->PrototypeTemplate()->SetAccessor( + Local ssl_getter_templ = + FunctionTemplate::New(env->isolate(), + SSLGetter, + env->as_external(), + Signature::New(env->isolate(), t)); + + t->PrototypeTemplate()->SetAccessorProperty( FIXED_ONE_BYTE_STRING(env->isolate(), "_external"), - SSLGetter, - nullptr, - env->as_external(), - DEFAULT, - static_cast(ReadOnly | DontDelete), - AccessorSignature::New(env->isolate(), t)); + ssl_getter_templ, + Local(), + static_cast(ReadOnly | DontDelete)); } @@ -2696,8 +2701,7 @@ void SSLWrap::CertCbDone(const FunctionCallbackInfo& args) { template -void SSLWrap::SSLGetter(Local property, - const PropertyCallbackInfo& info) { +void SSLWrap::SSLGetter(const FunctionCallbackInfo& info) { Base* base; ASSIGN_OR_RETURN_UNWRAP(&base, info.This()); SSL* ssl = base->ssl_; @@ -3449,7 +3453,7 @@ void CipherBase::Init(const char* cipher_type, nullptr, reinterpret_cast(key), reinterpret_cast(iv), - kind_ == kCipher); + encrypt); } @@ -3518,7 +3522,7 @@ void CipherBase::InitIv(const char* cipher_type, nullptr, reinterpret_cast(key), reinterpret_cast(iv), - kind_ == kCipher); + encrypt); } @@ -4694,14 +4698,17 @@ void DiffieHellman::Initialize(Environment* env, Local target) { env->SetProtoMethod(t, "setPublicKey", SetPublicKey); env->SetProtoMethod(t, "setPrivateKey", SetPrivateKey); - t->InstanceTemplate()->SetAccessor( + Local verify_error_getter_templ = + FunctionTemplate::New(env->isolate(), + DiffieHellman::VerifyErrorGetter, + env->as_external(), + Signature::New(env->isolate(), t)); + + t->InstanceTemplate()->SetAccessorProperty( env->verify_error_string(), - DiffieHellman::VerifyErrorGetter, - nullptr, - env->as_external(), - DEFAULT, - attributes, - AccessorSignature::New(env->isolate(), t)); + verify_error_getter_templ, + Local(), + attributes); target->Set(FIXED_ONE_BYTE_STRING(env->isolate(), "DiffieHellman"), t->GetFunction()); @@ -4716,14 +4723,17 @@ void DiffieHellman::Initialize(Environment* env, Local target) { env->SetProtoMethod(t2, "getPublicKey", GetPublicKey); env->SetProtoMethod(t2, "getPrivateKey", GetPrivateKey); - t2->InstanceTemplate()->SetAccessor( + Local verify_error_getter_templ2 = + FunctionTemplate::New(env->isolate(), + DiffieHellman::VerifyErrorGetter, + env->as_external(), + Signature::New(env->isolate(), t2)); + + t2->InstanceTemplate()->SetAccessorProperty( env->verify_error_string(), - DiffieHellman::VerifyErrorGetter, - nullptr, - env->as_external(), - DEFAULT, - attributes, - AccessorSignature::New(env->isolate(), t2)); + verify_error_getter_templ2, + Local(), + attributes); target->Set(FIXED_ONE_BYTE_STRING(env->isolate(), "DiffieHellmanGroup"), t2->GetFunction()); @@ -5037,8 +5047,7 @@ void DiffieHellman::SetPrivateKey(const FunctionCallbackInfo& args) { } -void DiffieHellman::VerifyErrorGetter(Local property, - const PropertyCallbackInfo& args) { +void DiffieHellman::VerifyErrorGetter(const FunctionCallbackInfo& args) { HandleScope scope(args.GetIsolate()); DiffieHellman* diffieHellman; @@ -5421,7 +5430,7 @@ void PBKDF2Request::Work(uv_work_t* work_req) { void PBKDF2Request::After(Local (*argv)[2]) { if (success_) { - (*argv)[0] = Undefined(env()->isolate()); + (*argv)[0] = Null(env()->isolate()); (*argv)[1] = Buffer::New(env(), key_, keylen_).ToLocalChecked(); key_ = nullptr; keylen_ = 0; diff --git a/src/node_crypto.h b/src/node_crypto.h index 41261910b94018..79e358aebe2ac3 100644 --- a/src/node_crypto.h +++ b/src/node_crypto.h @@ -141,8 +141,7 @@ class SecureContext : public BaseObject { const v8::FunctionCallbackInfo& args); static void EnableTicketKeyCallback( const v8::FunctionCallbackInfo& args); - static void CtxGetter(v8::Local property, - const v8::PropertyCallbackInfo& info); + static void CtxGetter(const v8::FunctionCallbackInfo& info); template static void GetCertificate(const v8::FunctionCallbackInfo& args); @@ -322,8 +321,7 @@ class SSLWrap { void* arg); static int TLSExtStatusCallback(SSL* s, void* arg); static int SSLCertCallback(SSL* s, void* arg); - static void SSLGetter(v8::Local property, - const v8::PropertyCallbackInfo& info); + static void SSLGetter(const v8::FunctionCallbackInfo& info); void DestroySSL(); void WaitForCertCb(CertCb cb, void* arg); @@ -689,8 +687,7 @@ class DiffieHellman : public BaseObject { static void SetPublicKey(const v8::FunctionCallbackInfo& args); static void SetPrivateKey(const v8::FunctionCallbackInfo& args); static void VerifyErrorGetter( - v8::Local property, - const v8::PropertyCallbackInfo& args); + const v8::FunctionCallbackInfo& args); DiffieHellman(Environment* env, v8::Local wrap) : BaseObject(env, wrap), diff --git a/src/node_file.cc b/src/node_file.cc index 9de7fe0e378642..39cce2ea6bdd64 100644 --- a/src/node_file.cc +++ b/src/node_file.cc @@ -951,14 +951,20 @@ static void ReadDir(const FunctionCallbackInfo& args) { name_v[name_idx++] = filename.ToLocalChecked(); if (name_idx >= arraysize(name_v)) { - fn->Call(env->context(), names, name_idx, name_v) - .ToLocalChecked(); + MaybeLocal ret = fn->Call(env->context(), names, name_idx, + name_v); + if (ret.IsEmpty()) { + return; + } name_idx = 0; } } if (name_idx > 0) { - fn->Call(env->context(), names, name_idx, name_v).ToLocalChecked(); + MaybeLocal ret = fn->Call(env->context(), names, name_idx, name_v); + if (ret.IsEmpty()) { + return; + } } args.GetReturnValue().Set(names); diff --git a/src/node_http2.cc b/src/node_http2.cc index 89d68de88f8cfe..b31878582301ed 100644 --- a/src/node_http2.cc +++ b/src/node_http2.cc @@ -3,12 +3,13 @@ #include "node_buffer.h" #include "node_http2.h" #include "node_http2_state.h" +#include "node_perf.h" -#include #include namespace node { +using v8::ArrayBuffer; using v8::Boolean; using v8::Context; using v8::Float64Array; @@ -21,18 +22,93 @@ using v8::Uint32; using v8::Uint32Array; using v8::Undefined; +using node::performance::PerformanceEntry; namespace http2 { +namespace { + +const char zero_bytes_256[256] = {}; + +inline Http2Stream* GetStream(Http2Session* session, + int32_t id, + nghttp2_data_source* source) { + Http2Stream* stream = static_cast(source->ptr); + if (stream == nullptr) + stream = session->FindStream(id); + CHECK_NE(stream, nullptr); + CHECK_EQ(id, stream->id()); + return stream; +} + +} // anonymous namespace + +// These configure the callbacks required by nghttp2 itself. There are +// two sets of callback functions, one that is used if a padding callback +// is set, and other that does not include the padding callback. const Http2Session::Callbacks Http2Session::callback_struct_saved[2] = { Callbacks(false), Callbacks(true)}; +// The Http2Scope object is used to queue a write to the i/o stream. It is +// used whenever any action is take on the underlying nghttp2 API that may +// push data into nghttp2 outbound data queue. +// +// For example: +// +// Http2Scope h2scope(session); +// nghttp2_submit_ping(**session, ... ); +// +// When the Http2Scope passes out of scope and is deconstructed, it will +// call Http2Session::MaybeScheduleWrite(). +Http2Scope::Http2Scope(Http2Stream* stream) : Http2Scope(stream->session()) {} + +Http2Scope::Http2Scope(Http2Session* session) { + if (session == nullptr) + return; + if (session->flags_ & (SESSION_STATE_HAS_SCOPE | + SESSION_STATE_WRITE_SCHEDULED)) { + // There is another scope further below on the stack, or it is already + // known that a write is scheduled. In either case, there is nothing to do. + return; + } + session->flags_ |= SESSION_STATE_HAS_SCOPE; + session_ = session; + + // Always keep the session object alive for at least as long as + // this scope is active. + session_handle_ = session->object(); + CHECK(!session_handle_.IsEmpty()); +} + +Http2Scope::~Http2Scope() { + if (session_ == nullptr) + return; + + session_->flags_ &= ~SESSION_STATE_HAS_SCOPE; + session_->MaybeScheduleWrite(); +} + +// The Http2Options object is used during the construction of Http2Session +// instances to configure an appropriate nghttp2_options struct. The class +// uses a single TypedArray instance that is shared with the JavaScript side +// to more efficiently pass values back and forth. Http2Options::Http2Options(Environment* env) { nghttp2_option_new(&options_); + // We manually handle flow control within a session in order to + // implement backpressure -- that is, we only send WINDOW_UPDATE + // frames to the remote peer as data is actually consumed by user + // code. This ensures that the flow of data over the connection + // does not move too quickly and limits the amount of data we + // are required to buffer. nghttp2_option_set_no_auto_window_update(options_, 1); + // Enable built in support for ALTSVC frames. Once we add support for + // other non-built in extension frames, this will need to be handled + // a bit differently. For now, let's let nghttp2 take care of it. + nghttp2_option_set_builtin_recv_extension_type(options_, NGHTTP2_ALTSVC); + AliasedBuffer& buffer = env->http2_state()->options_buffer; uint32_t flags = buffer[IDX_OPTIONS_FLAGS]; @@ -63,6 +139,10 @@ Http2Options::Http2Options(Environment* env) { buffer[IDX_OPTIONS_PEER_MAX_CONCURRENT_STREAMS]); } + // The padding strategy sets the mechanism by which we determine how much + // additional frame padding to apply to DATA and HEADERS frames. Currently + // this is set on a per-session basis, but eventually we may switch to + // a per-stream setting, giving users greater control if (flags & (1 << IDX_OPTIONS_PADDING_STRATEGY)) { padding_strategy_type strategy = static_cast( @@ -70,27 +150,56 @@ Http2Options::Http2Options(Environment* env) { SetPaddingStrategy(strategy); } + // The max header list pairs option controls the maximum number of + // header pairs the session may accept. This is a hard limit.. that is, + // if the remote peer sends more than this amount, the stream will be + // automatically closed with an RST_STREAM. if (flags & (1 << IDX_OPTIONS_MAX_HEADER_LIST_PAIRS)) { SetMaxHeaderPairs(buffer[IDX_OPTIONS_MAX_HEADER_LIST_PAIRS]); } + // The HTTP2 specification places no limits on the number of HTTP2 + // PING frames that can be sent. In order to prevent PINGS from being + // abused as an attack vector, however, we place a strict upper limit + // on the number of unacknowledged PINGS that can be sent at any given + // time. if (flags & (1 << IDX_OPTIONS_MAX_OUTSTANDING_PINGS)) { SetMaxOutstandingPings(buffer[IDX_OPTIONS_MAX_OUTSTANDING_PINGS]); } -} + // The HTTP2 specification places no limits on the number of HTTP2 + // SETTINGS frames that can be sent. In order to prevent PINGS from being + // abused as an attack vector, however, we place a strict upper limit + // on the number of unacknowledged SETTINGS that can be sent at any given + // time. + if (flags & (1 << IDX_OPTIONS_MAX_OUTSTANDING_SETTINGS)) { + SetMaxOutstandingSettings(buffer[IDX_OPTIONS_MAX_OUTSTANDING_SETTINGS]); + } -Http2Settings::Http2Settings(Environment* env) : env_(env) { + // The HTTP2 specification places no limits on the amount of memory + // that a session can consume. In order to prevent abuse, we place a + // cap on the amount of memory a session can consume at any given time. + // this is a credit based system. Existing streams may cause the limit + // to be temporarily exceeded but once over the limit, new streams cannot + // created. + // Important: The maxSessionMemory option in javascript is expressed in + // terms of MB increments (i.e. the value 1 == 1 MB) + if (flags & (1 << IDX_OPTIONS_MAX_SESSION_MEMORY)) { + SetMaxSessionMemory(buffer[IDX_OPTIONS_MAX_SESSION_MEMORY] * 1e6); + } +} + +void Http2Session::Http2Settings::Init() { entries_.AllocateSufficientStorage(IDX_SETTINGS_COUNT); AliasedBuffer& buffer = - env->http2_state()->settings_buffer; + env()->http2_state()->settings_buffer; uint32_t flags = buffer[IDX_SETTINGS_COUNT]; size_t n = 0; if (flags & (1 << IDX_SETTINGS_HEADER_TABLE_SIZE)) { uint32_t val = buffer[IDX_SETTINGS_HEADER_TABLE_SIZE]; - DEBUG_HTTP2("Http2Settings: setting header table size: %d\n", val); + DEBUG_HTTP2SESSION2(session_, "setting header table size: %d\n", val); entries_[n].settings_id = NGHTTP2_SETTINGS_HEADER_TABLE_SIZE; entries_[n].value = val; n++; @@ -98,7 +207,7 @@ Http2Settings::Http2Settings(Environment* env) : env_(env) { if (flags & (1 << IDX_SETTINGS_MAX_CONCURRENT_STREAMS)) { uint32_t val = buffer[IDX_SETTINGS_MAX_CONCURRENT_STREAMS]; - DEBUG_HTTP2("Http2Settings: setting max concurrent streams: %d\n", val); + DEBUG_HTTP2SESSION2(session_, "setting max concurrent streams: %d\n", val); entries_[n].settings_id = NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS; entries_[n].value = val; n++; @@ -106,7 +215,7 @@ Http2Settings::Http2Settings(Environment* env) : env_(env) { if (flags & (1 << IDX_SETTINGS_MAX_FRAME_SIZE)) { uint32_t val = buffer[IDX_SETTINGS_MAX_FRAME_SIZE]; - DEBUG_HTTP2("Http2Settings: setting max frame size: %d\n", val); + DEBUG_HTTP2SESSION2(session_, "setting max frame size: %d\n", val); entries_[n].settings_id = NGHTTP2_SETTINGS_MAX_FRAME_SIZE; entries_[n].value = val; n++; @@ -114,7 +223,7 @@ Http2Settings::Http2Settings(Environment* env) : env_(env) { if (flags & (1 << IDX_SETTINGS_INITIAL_WINDOW_SIZE)) { uint32_t val = buffer[IDX_SETTINGS_INITIAL_WINDOW_SIZE]; - DEBUG_HTTP2("Http2Settings: setting initial window size: %d\n", val); + DEBUG_HTTP2SESSION2(session_, "setting initial window size: %d\n", val); entries_[n].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE; entries_[n].value = val; n++; @@ -122,7 +231,7 @@ Http2Settings::Http2Settings(Environment* env) : env_(env) { if (flags & (1 << IDX_SETTINGS_MAX_HEADER_LIST_SIZE)) { uint32_t val = buffer[IDX_SETTINGS_MAX_HEADER_LIST_SIZE]; - DEBUG_HTTP2("Http2Settings: setting max header list size: %d\n", val); + DEBUG_HTTP2SESSION2(session_, "setting max header list size: %d\n", val); entries_[n].settings_id = NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE; entries_[n].value = val; n++; @@ -130,7 +239,7 @@ Http2Settings::Http2Settings(Environment* env) : env_(env) { if (flags & (1 << IDX_SETTINGS_ENABLE_PUSH)) { uint32_t val = buffer[IDX_SETTINGS_ENABLE_PUSH]; - DEBUG_HTTP2("Http2Settings: setting enable push: %d\n", val); + DEBUG_HTTP2SESSION2(session_, "setting enable push: %d\n", val); entries_[n].settings_id = NGHTTP2_SETTINGS_ENABLE_PUSH; entries_[n].value = val; n++; @@ -139,10 +248,46 @@ Http2Settings::Http2Settings(Environment* env) : env_(env) { count_ = n; } +Http2Session::Http2Settings::Http2Settings( + Environment* env) + : AsyncWrap(env, + env->http2settings_constructor_template() + ->NewInstance(env->context()) + .ToLocalChecked(), + AsyncWrap::PROVIDER_HTTP2SETTINGS), + session_(nullptr), + startTime_(0) { + Init(); +} + +// The Http2Settings class is used to configure a SETTINGS frame that is +// to be sent to the connected peer. The settings are set using a TypedArray +// that is shared with the JavaScript side. +Http2Session::Http2Settings::Http2Settings( + Http2Session* session) + : AsyncWrap(session->env(), + session->env()->http2settings_constructor_template() + ->NewInstance(session->env()->context()) + .ToLocalChecked(), + AsyncWrap::PROVIDER_HTTP2SETTINGS), + session_(session), + startTime_(uv_hrtime()) { + Init(); +} + +Http2Session::Http2Settings::~Http2Settings() { + if (!object().IsEmpty()) + ClearWrap(object()); + persistent().Reset(); + CHECK(persistent().IsEmpty()); +} -inline Local Http2Settings::Pack() { +// Generates a Buffer that contains the serialized payload of a SETTINGS +// frame. This can be used, for instance, to create the Base64-encoded +// content of an Http2-Settings header field. +inline Local Http2Session::Http2Settings::Pack() { const size_t len = count_ * 6; - Local buf = Buffer::New(env_, len).ToLocalChecked(); + Local buf = Buffer::New(env(), len).ToLocalChecked(); ssize_t ret = nghttp2_pack_settings_payload( reinterpret_cast(Buffer::Data(buf)), len, @@ -150,31 +295,32 @@ inline Local Http2Settings::Pack() { if (ret >= 0) return buf; else - return Undefined(env_->isolate()); + return Undefined(env()->isolate()); } - -inline void Http2Settings::Update(Environment* env, - Http2Session* session, - get_setting fn) { +// Updates the shared TypedArray with the current remote or local settings for +// the session. +inline void Http2Session::Http2Settings::Update(Environment* env, + Http2Session* session, + get_setting fn) { AliasedBuffer& buffer = env->http2_state()->settings_buffer; buffer[IDX_SETTINGS_HEADER_TABLE_SIZE] = - fn(session->session(), NGHTTP2_SETTINGS_HEADER_TABLE_SIZE); + fn(**session, NGHTTP2_SETTINGS_HEADER_TABLE_SIZE); buffer[IDX_SETTINGS_MAX_CONCURRENT_STREAMS] = - fn(session->session(), NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS); + fn(**session, NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS); buffer[IDX_SETTINGS_INITIAL_WINDOW_SIZE] = - fn(session->session(), NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE); + fn(**session, NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE); buffer[IDX_SETTINGS_MAX_FRAME_SIZE] = - fn(session->session(), NGHTTP2_SETTINGS_MAX_FRAME_SIZE); + fn(**session, NGHTTP2_SETTINGS_MAX_FRAME_SIZE); buffer[IDX_SETTINGS_MAX_HEADER_LIST_SIZE] = - fn(session->session(), NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE); + fn(**session, NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE); buffer[IDX_SETTINGS_ENABLE_PUSH] = - fn(session->session(), NGHTTP2_SETTINGS_ENABLE_PUSH); + fn(**session, NGHTTP2_SETTINGS_ENABLE_PUSH); } - -inline void Http2Settings::RefreshDefaults(Environment* env) { +// Initializes the shared TypedArray with the default settings values. +inline void Http2Session::Http2Settings::RefreshDefaults(Environment* env) { AliasedBuffer& buffer = env->http2_state()->settings_buffer; @@ -197,6 +343,27 @@ inline void Http2Settings::RefreshDefaults(Environment* env) { } +void Http2Session::Http2Settings::Send() { + Http2Scope h2scope(session_); + CHECK_EQ(nghttp2_submit_settings(**session_, NGHTTP2_FLAG_NONE, + *entries_, length()), 0); +} + +void Http2Session::Http2Settings::Done(bool ack) { + uint64_t end = uv_hrtime(); + double duration = (end - startTime_) / 1e6; + + Local argv[2] = { + Boolean::New(env()->isolate(), ack), + Number::New(env()->isolate(), duration) + }; + MakeCallback(env()->ondone_string(), arraysize(argv), argv); + delete this; +} + +// The Http2Priority class initializes an appropriate nghttp2_priority_spec +// struct used when either creating a stream or updating its priority +// settings. Http2Priority::Http2Priority(Environment* env, Local parent, Local weight, @@ -221,7 +388,8 @@ inline const char* Http2Session::TypeName() { } } - +// The Headers class initializes a proper array of nghttp2_nv structs +// containing the header name value pairs. Headers::Headers(Isolate* isolate, Local context, Local headers) { @@ -279,8 +447,11 @@ Headers::Headers(Isolate* isolate, } +// Sets the various callback functions that nghttp2 will use to notify us +// about significant events while processing http2 stuff. Http2Session::Callbacks::Callbacks(bool kHasGetPaddingCallback) { CHECK_EQ(nghttp2_session_callbacks_new(&callbacks), 0); + nghttp2_session_callbacks_set_on_begin_headers_callback( callbacks, OnBeginHeadersCallback); nghttp2_session_callbacks_set_on_header_callback2( @@ -297,6 +468,12 @@ Http2Session::Callbacks::Callbacks(bool kHasGetPaddingCallback) { callbacks, OnInvalidHeader); nghttp2_session_callbacks_set_error_callback( callbacks, OnNghttpError); + nghttp2_session_callbacks_set_send_data_callback( + callbacks, OnSendData); + nghttp2_session_callbacks_set_on_invalid_frame_recv_callback( + callbacks, OnInvalidFrame); + nghttp2_session_callbacks_set_on_frame_send_callback( + callbacks, OnFrameSent); if (kHasGetPaddingCallback) { nghttp2_session_callbacks_set_select_padding_callback( @@ -309,29 +486,32 @@ Http2Session::Callbacks::~Callbacks() { nghttp2_session_callbacks_del(callbacks); } - Http2Session::Http2Session(Environment* env, Local wrap, nghttp2_session_type type) : AsyncWrap(env, wrap, AsyncWrap::PROVIDER_HTTP2SESSION), session_type_(type) { MakeWeak(this); + statistics_.start_time = uv_hrtime(); + // Capture the configuration options for this session Http2Options opts(env); - int32_t maxHeaderPairs = opts.GetMaxHeaderPairs(); + max_session_memory_ = opts.GetMaxSessionMemory(); + + uint32_t maxHeaderPairs = opts.GetMaxHeaderPairs(); max_header_pairs_ = type == NGHTTP2_SESSION_SERVER - ? std::max(maxHeaderPairs, 4) // minimum # of request headers - : std::max(maxHeaderPairs, 1); // minimum # of response headers + ? std::max(maxHeaderPairs, 4U) // minimum # of request headers + : std::max(maxHeaderPairs, 1U); // minimum # of response headers max_outstanding_pings_ = opts.GetMaxOutstandingPings(); + max_outstanding_settings_ = opts.GetMaxOutstandingSettings(); padding_strategy_ = opts.GetPaddingStrategy(); bool hasGetPaddingCallback = - padding_strategy_ == PADDING_STRATEGY_MAX || - padding_strategy_ == PADDING_STRATEGY_CALLBACK; + padding_strategy_ != PADDING_STRATEGY_NONE; nghttp2_session_callbacks* callbacks = callback_struct_saved[hasGetPaddingCallback ? 1 : 0].callbacks; @@ -347,97 +527,210 @@ Http2Session::Http2Session(Environment* env, // fails. CHECK_EQ(fn(&session_, callbacks, this, *opts), 0); - Start(); + outgoing_storage_.reserve(4096); + outgoing_buffers_.reserve(32); } +void Http2Session::Unconsume() { + if (stream_ != nullptr) { + DEBUG_HTTP2SESSION(this, "unconsuming the i/o stream"); + stream_->set_destruct_cb({ nullptr, nullptr }); + stream_->set_alloc_cb({ nullptr, nullptr }); + stream_->set_read_cb({ nullptr, nullptr }); + stream_->Unconsume(); + stream_ = nullptr; + } +} Http2Session::~Http2Session() { + CHECK_EQ(flags_ & SESSION_STATE_HAS_SCOPE, 0); + if (!object().IsEmpty()) + ClearWrap(object()); + persistent().Reset(); CHECK(persistent().IsEmpty()); - Close(); -} - -// For every node::Http2Session instance, there is a uv_prepare_t handle -// whose callback is triggered on every tick of the event loop. When -// run, nghttp2 is prompted to send any queued data it may have stored. -// TODO(jasnell): Currently, this creates one uv_prepare_t per Http2Session, -// we should investigate to see if it's faster to create a -// single uv_prepare_t for all Http2Sessions, then iterate -// over each. -void Http2Session::Start() { - prep_ = new uv_prepare_t(); - uv_prepare_init(env()->event_loop(), prep_); - prep_->data = static_cast(this); - uv_prepare_start(prep_, [](uv_prepare_t* t) { - Http2Session* session = static_cast(t->data); - HandleScope scope(session->env()->isolate()); - Context::Scope context_scope(session->env()->context()); - - // Sending data may call arbitrary JS code, so keep track of - // async context. - InternalCallbackScope callback_scope(session); - session->SendPendingData(); - }); + Unconsume(); + DEBUG_HTTP2SESSION(this, "freeing nghttp2 session"); + nghttp2_session_del(session_); } -// Stop the uv_prep_t from further activity, destroy the handle -void Http2Session::Stop() { - DEBUG_HTTP2SESSION(this, "stopping uv_prep_t handle"); - CHECK_EQ(uv_prepare_stop(prep_), 0); - auto prep_close = [](uv_handle_t* handle) { - delete reinterpret_cast(handle); - }; - uv_close(reinterpret_cast(prep_), prep_close); - prep_ = nullptr; +inline bool HasHttp2Observer(Environment* env) { + AliasedBuffer& observers = + env->performance_state()->observers; + return observers[performance::NODE_PERFORMANCE_ENTRY_TYPE_HTTP2] != 0; } +inline void Http2Stream::EmitStatistics() { + if (!HasHttp2Observer(env())) + return; + Http2StreamPerformanceEntry* entry = + new Http2StreamPerformanceEntry(env(), id_, statistics_); + env()->SetImmediate([](Environment* env, void* data) { + Http2StreamPerformanceEntry* entry = + static_cast(data); + if (HasHttp2Observer(env)) { + AliasedBuffer& buffer = + env->http2_state()->stream_stats_buffer; + buffer[IDX_STREAM_STATS_ID] = entry->id(); + if (entry->first_byte() != 0) { + buffer[IDX_STREAM_STATS_TIMETOFIRSTBYTE] = + (entry->first_byte() - entry->startTimeNano()) / 1e6; + } else { + buffer[IDX_STREAM_STATS_TIMETOFIRSTBYTE] = 0; + } + if (entry->first_header() != 0) { + buffer[IDX_STREAM_STATS_TIMETOFIRSTHEADER] = + (entry->first_header() - entry->startTimeNano()) / 1e6; + } else { + buffer[IDX_STREAM_STATS_TIMETOFIRSTHEADER] = 0; + } + if (entry->first_byte_sent() != 0) { + buffer[IDX_STREAM_STATS_TIMETOFIRSTBYTESENT] = + (entry->first_byte_sent() - entry->startTimeNano()) / 1e6; + } else { + buffer[IDX_STREAM_STATS_TIMETOFIRSTBYTESENT] = 0; + } + buffer[IDX_STREAM_STATS_SENTBYTES] = entry->sent_bytes(); + buffer[IDX_STREAM_STATS_RECEIVEDBYTES] = entry->received_bytes(); + entry->Notify(entry->ToObject()); + } + delete entry; + }, static_cast(entry)); +} + +inline void Http2Session::EmitStatistics() { + if (!HasHttp2Observer(env())) + return; + Http2SessionPerformanceEntry* entry = + new Http2SessionPerformanceEntry(env(), statistics_, session_type_); + env()->SetImmediate([](Environment* env, void* data) { + Http2SessionPerformanceEntry* entry = + static_cast(data); + if (HasHttp2Observer(env)) { + AliasedBuffer& buffer = + env->http2_state()->session_stats_buffer; + buffer[IDX_SESSION_STATS_TYPE] = entry->type(); + buffer[IDX_SESSION_STATS_PINGRTT] = entry->ping_rtt() / 1e6; + buffer[IDX_SESSION_STATS_FRAMESRECEIVED] = entry->frame_count(); + buffer[IDX_SESSION_STATS_FRAMESSENT] = entry->frame_sent(); + buffer[IDX_SESSION_STATS_STREAMCOUNT] = entry->stream_count(); + buffer[IDX_SESSION_STATS_STREAMAVERAGEDURATION] = + entry->stream_average_duration(); + buffer[IDX_SESSION_STATS_DATA_SENT] = entry->data_sent(); + buffer[IDX_SESSION_STATS_DATA_RECEIVED] = entry->data_received(); + buffer[IDX_SESSION_STATS_MAX_CONCURRENT_STREAMS] = + entry->max_concurrent_streams(); + entry->Notify(entry->ToObject()); + } + delete entry; + }, static_cast(entry)); +} -void Http2Session::Close() { +// Closes the session and frees the associated resources +void Http2Session::Close(uint32_t code, bool socket_closed) { DEBUG_HTTP2SESSION(this, "closing session"); - if (!object().IsEmpty()) - ClearWrap(object()); - persistent().Reset(); - if (session_ == nullptr) + if (flags_ & SESSION_STATE_CLOSED) return; + flags_ |= SESSION_STATE_CLOSED; + + // Stop reading on the i/o stream + if (stream_ != nullptr) + stream_->ReadStop(); + + // If the socket is not closed, then attempt to send a closing GOAWAY + // frame. There is no guarantee that this GOAWAY will be received by + // the peer but the HTTP/2 spec recommends sendinng it anyway. We'll + // make a best effort. + if (!socket_closed) { + Http2Scope h2scope(this); + DEBUG_HTTP2SESSION2(this, "terminating session with code %d", code); + CHECK_EQ(nghttp2_session_terminate_session(session_, code), 0); + } else { + Unconsume(); + } - CHECK_EQ(nghttp2_session_terminate_session(session_, NGHTTP2_NO_ERROR), 0); - nghttp2_session_del(session_); - session_ = nullptr; - + // If there are outstanding pings, those will need to be canceled, do + // so on the next iteration of the event loop to avoid calling out into + // javascript since this may be called during garbage collection. while (!outstanding_pings_.empty()) { Http2Session::Http2Ping* ping = PopPing(); - ping->Done(false); + env()->SetImmediate([](Environment* env, void* data) { + static_cast(data)->Done(false); + }, static_cast(ping)); } - Stop(); + statistics_.end_time = uv_hrtime(); + EmitStatistics(); } - +// Locates an existing known stream by ID. nghttp2 has a similar method +// but this is faster and does not fail if the stream is not found. inline Http2Stream* Http2Session::FindStream(int32_t id) { auto s = streams_.find(id); return s != streams_.end() ? s->second : nullptr; } +inline bool Http2Session::CanAddStream() { + uint32_t maxConcurrentStreams = + nghttp2_session_get_local_settings( + session_, NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS); + size_t maxSize = + std::min(streams_.max_size(), static_cast(maxConcurrentStreams)); + // We can add a new stream so long as we are less than the current + // maximum on concurrent streams and there's enough available memory + return streams_.size() < maxSize && + IsAvailableSessionMemory(sizeof(Http2Stream)); +} inline void Http2Session::AddStream(Http2Stream* stream) { + CHECK_GE(++statistics_.stream_count, 0); streams_[stream->id()] = stream; + size_t size = streams_.size(); + if (size > statistics_.max_concurrent_streams) + statistics_.max_concurrent_streams = size; + IncrementCurrentSessionMemory(stream->self_size()); } -inline void Http2Session::RemoveStream(int32_t id) { - streams_.erase(id); +inline void Http2Session::RemoveStream(Http2Stream* stream) { + streams_.erase(stream->id()); + DecrementCurrentSessionMemory(stream->self_size()); } +// Used as one of the Padding Strategy functions. Will attempt to ensure +// that the total frame size, including header bytes, are 8-byte aligned. +// If maxPayloadLen is smaller than the number of bytes necessary to align, +// will return maxPayloadLen instead. +inline ssize_t Http2Session::OnDWordAlignedPadding(size_t frameLen, + size_t maxPayloadLen) { + size_t r = (frameLen + 9) % 8; + if (r == 0) return frameLen; // If already a multiple of 8, return. + + size_t pad = frameLen + (8 - r); + // If maxPayloadLen happens to be less than the calculated pad length, + // use the max instead, even tho this means the frame will not be + // aligned. + pad = std::min(maxPayloadLen, pad); + DEBUG_HTTP2SESSION2(this, "using frame size padding: %d", pad); + return pad; +} + +// Used as one of the Padding Strategy functions. Uses the maximum amount +// of padding allowed for the current frame. inline ssize_t Http2Session::OnMaxFrameSizePadding(size_t frameLen, size_t maxPayloadLen) { DEBUG_HTTP2SESSION2(this, "using max frame size padding: %d", maxPayloadLen); return maxPayloadLen; } - +// Used as one of the Padding Strategy functions. Uses a callback to JS land +// to determine the amount of padding for the current frame. This option is +// rather more expensive because of the JS boundary cross. It generally should +// not be the preferred option. inline ssize_t Http2Session::OnCallbackPadding(size_t frameLen, size_t maxPayloadLen) { + if (frameLen == 0) return 0; DEBUG_HTTP2SESSION(this, "using callback to determine padding"); Isolate* isolate = env()->isolate(); HandleScope handle_scope(isolate); @@ -462,38 +755,20 @@ inline ssize_t Http2Session::OnCallbackPadding(size_t frameLen, } -// Submits a graceful shutdown notice to nghttp -// See: https://nghttp2.org/documentation/nghttp2_submit_shutdown_notice.html -inline void Http2Session::SubmitShutdownNotice() { - // Only an HTTP2 Server is permitted to send a shutdown notice - if (session_type_ == NGHTTP2_SESSION_CLIENT) - return; - DEBUG_HTTP2SESSION(this, "sending shutdown notice"); - // The only situation where this should fail is if the system is - // out of memory, which will cause other problems. Go ahead and crash - // in that case. - CHECK_EQ(nghttp2_submit_shutdown_notice(session_), 0); -} - - -// Note: This *must* send a SETTINGS frame even if niv == 0 -inline void Http2Session::Settings(const nghttp2_settings_entry iv[], - size_t niv) { - DEBUG_HTTP2SESSION2(this, "submitting %d settings", niv); - // This will fail either if the system is out of memory, or if the settings - // values are not within the appropriate range. We should be catching the - // latter before it gets this far so crash in either case. - CHECK_EQ(nghttp2_submit_settings(session_, NGHTTP2_FLAG_NONE, iv, niv), 0); -} - - // Write data received from the i/o stream to the underlying nghttp2_session. +// On each call to nghttp2_session_mem_recv, nghttp2 will begin calling the +// various callback functions. Each of these will typically result in a call +// out to JavaScript so this particular function is rather hot and can be +// quite expensive. This is a potential performance optimization target later. inline ssize_t Http2Session::Write(const uv_buf_t* bufs, size_t nbufs) { size_t total = 0; // Note that nghttp2_session_mem_recv is a synchronous operation that // will trigger a number of other callbacks. Those will, in turn have // multiple side effects. for (size_t n = 0; n < nbufs; n++) { + DEBUG_HTTP2SESSION2(this, "receiving %d bytes [wants data? %d]", + bufs[n].len, + nghttp2_session_want_read(session_)); ssize_t ret = nghttp2_session_mem_recv(session_, reinterpret_cast(bufs[n].base), @@ -506,7 +781,9 @@ inline ssize_t Http2Session::Write(const uv_buf_t* bufs, size_t nbufs) { total += ret; } // Send any data that was queued up while processing the received data. - SendPendingData(); + if (!IsDestroyed()) { + SendPendingData(); + } return total; } @@ -519,6 +796,10 @@ inline int32_t GetFrameID(const nghttp2_frame* frame) { } +// Called by nghttp2 at the start of receiving a HEADERS frame. We use this +// callback to determine if a new stream is being created or if we are simply +// adding a new block of headers to an existing stream. The header pairs +// themselves are set in the OnHeaderCallback inline int Http2Session::OnBeginHeadersCallback(nghttp2_session* handle, const nghttp2_frame* frame, void* user_data) { @@ -528,14 +809,26 @@ inline int Http2Session::OnBeginHeadersCallback(nghttp2_session* handle, Http2Stream* stream = session->FindStream(id); if (stream == nullptr) { - new Http2Stream(session, id, frame->headers.cat); + if (session->CanAddStream()) { + new Http2Stream(session, id, frame->headers.cat); + } else { + // Too many concurrent streams being opened + nghttp2_submit_rst_stream(**session, NGHTTP2_FLAG_NONE, id, + NGHTTP2_ENHANCE_YOUR_CALM); + return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; + } } else { + // If the stream has already been destroyed, ignore. + if (stream->IsDestroyed()) + return 0; stream->StartHeaders(frame->headers.cat); } return 0; } - +// Called by nghttp2 for each header name/value pair in a HEADERS block. +// This had to have been preceded by a call to OnBeginHeadersCallback so +// the Http2Stream is guaranteed to already exist. inline int Http2Session::OnHeaderCallback(nghttp2_session* handle, const nghttp2_frame* frame, nghttp2_rcbuf* name, @@ -545,6 +838,10 @@ inline int Http2Session::OnHeaderCallback(nghttp2_session* handle, Http2Session* session = static_cast(user_data); int32_t id = GetFrameID(frame); Http2Stream* stream = session->FindStream(id); + CHECK_NE(stream, nullptr); + // If the stream has already been destroyed, ignore. + if (stream->IsDestroyed()) + return 0; if (!stream->AddHeader(name, value, flags)) { // This will only happen if the connected peer sends us more // than the allowed number of header items at any given time @@ -555,10 +852,13 @@ inline int Http2Session::OnHeaderCallback(nghttp2_session* handle, } +// Called by nghttp2 when a complete HTTP2 frame has been received. There are +// only a handful of frame types tha we care about handling here. inline int Http2Session::OnFrameReceive(nghttp2_session* handle, const nghttp2_frame* frame, void* user_data) { Http2Session* session = static_cast(user_data); + session->statistics_.frame_count++; DEBUG_HTTP2SESSION2(session, "complete frame received: type: %d", frame->hd.type); switch (frame->hd.type) { @@ -581,13 +881,48 @@ inline int Http2Session::OnFrameReceive(nghttp2_session* handle, break; case NGHTTP2_PING: session->HandlePingFrame(frame); + break; + case NGHTTP2_ALTSVC: + session->HandleAltSvcFrame(frame); + break; default: break; } return 0; } +inline int Http2Session::OnInvalidFrame(nghttp2_session* handle, + const nghttp2_frame *frame, + int lib_error_code, + void* user_data) { + Http2Session* session = static_cast(user_data); + + DEBUG_HTTP2SESSION2(session, "invalid frame received, code: %d", + lib_error_code); + + // If the error is fatal or if error code is ERR_STREAM_CLOSED... emit error + if (nghttp2_is_fatal(lib_error_code) || + lib_error_code == NGHTTP2_ERR_STREAM_CLOSED) { + Environment* env = session->env(); + Isolate* isolate = env->isolate(); + HandleScope scope(isolate); + Local context = env->context(); + Context::Scope context_scope(context); + Local argv[1] = { + Integer::New(isolate, lib_error_code), + }; + session->MakeCallback(env->error_string(), arraysize(argv), argv); + } + return 0; +} + +// If nghttp2 is unable to send a queued up frame, it will call this callback +// to let us know. If the failure occurred because we are in the process of +// closing down the session or stream, we go ahead and ignore it. We don't +// really care about those and there's nothing we can reasonably do about it +// anyway. Other types of failures are reported up to JavaScript. This should +// be exceedingly rare. inline int Http2Session::OnFrameNotSent(nghttp2_session* handle, const nghttp2_frame* frame, int error_code, @@ -615,7 +950,15 @@ inline int Http2Session::OnFrameNotSent(nghttp2_session* handle, return 0; } +inline int Http2Session::OnFrameSent(nghttp2_session* handle, + const nghttp2_frame* frame, + void* user_data) { + Http2Session* session = static_cast(user_data); + session->statistics_.frame_sent += 1; + return 0; +} +// Called by nghttp2 when a stream closes. inline int Http2Session::OnStreamClose(nghttp2_session* handle, int32_t id, uint32_t code, @@ -628,23 +971,34 @@ inline int Http2Session::OnStreamClose(nghttp2_session* handle, Context::Scope context_scope(context); DEBUG_HTTP2SESSION2(session, "stream %d closed with code: %d", id, code); Http2Stream* stream = session->FindStream(id); - // Intentionally ignore the callback if the stream does not exist - if (stream != nullptr) { + // Intentionally ignore the callback if the stream does not exist or has + // already been destroyed + if (stream != nullptr && !stream->IsDestroyed()) { stream->Close(code); // It is possible for the stream close to occur before the stream is - // ever passed on to the javascript side. If that happens, ignore this. + // ever passed on to the javascript side. If that happens, skip straight + // to destroying the stream. We can check this by looking for the + // onstreamclose function. If it exists, then the stream has already + // been passed on to javascript. Local fn = stream->object()->Get(context, env->onstreamclose_string()) .ToLocalChecked(); if (fn->IsFunction()) { - Local argv[1] = { Integer::NewFromUnsigned(isolate, code) }; + Local argv[] = { + Integer::NewFromUnsigned(isolate, code) + }; stream->MakeCallback(fn.As(), arraysize(argv), argv); + } else { + stream->Destroy(); } } return 0; } - +// Called by nghttp2 when an invalid header has been received. For now, we +// ignore these. If this callback was not provided, nghttp2 would handle +// invalid headers strictly and would shut down the stream. We are intentionally +// being more lenient here although we may want to revisit this choice later. inline int Http2Session::OnInvalidHeader(nghttp2_session* session, const nghttp2_frame* frame, nghttp2_rcbuf* name, @@ -655,7 +1009,10 @@ inline int Http2Session::OnInvalidHeader(nghttp2_session* session, return 0; } - +// When nghttp2 receives a DATA frame, it will deliver the data payload to +// us in discrete chunks. We push these into a linked list stored in the +// Http2Sttream which is flushed out to JavaScript as quickly as possible. +// This can be a particularly hot path. inline int Http2Session::OnDataChunkReceived(nghttp2_session* handle, uint8_t flags, int32_t id, @@ -665,33 +1022,74 @@ inline int Http2Session::OnDataChunkReceived(nghttp2_session* handle, Http2Session* session = static_cast(user_data); DEBUG_HTTP2SESSION2(session, "buffering data chunk for stream %d, size: " "%d, flags: %d", id, len, flags); + Environment* env = session->env(); + HandleScope scope(env->isolate()); // We should never actually get a 0-length chunk so this check is // only a precaution at this point. if (len > 0) { + // Notify nghttp2 that we've consumed a chunk of data on the connection + // so that it can send a WINDOW_UPDATE frame. This is a critical part of + // the flow control process in http2 CHECK_EQ(nghttp2_session_consume_connection(handle, len), 0); Http2Stream* stream = session->FindStream(id); - stream->AddChunk(data, len); + // If the stream has been destroyed, ignore this chunk + if (stream->IsDestroyed()) + return 0; + + stream->statistics_.received_bytes += len; + + // There is a single large array buffer for the entire data read from the + // network; create a slice of that array buffer and emit it as the + // received data buffer. + CHECK(!session->stream_buf_ab_.IsEmpty()); + size_t offset = reinterpret_cast(data) - session->stream_buf_; + // Verify that the data offset is inside the current read buffer. + CHECK_LE(offset, session->stream_buf_size_); + + Local buf = + Buffer::New(env, session->stream_buf_ab_, offset, len).ToLocalChecked(); + + stream->EmitData(len, buf, Local()); + if (!stream->IsReading()) + stream->inbound_consumed_data_while_paused_ += len; + else + nghttp2_session_consume_stream(handle, id, len); } return 0; } - -inline ssize_t Http2Session::OnSelectPadding(nghttp2_session* session, +// Called by nghttp2 when it needs to determine how much padding to use in +// a DATA or HEADERS frame. +inline ssize_t Http2Session::OnSelectPadding(nghttp2_session* handle, const nghttp2_frame* frame, size_t maxPayloadLen, void* user_data) { - Http2Session* handle = static_cast(user_data); + Http2Session* session = static_cast(user_data); ssize_t padding = frame->hd.length; - return handle->padding_strategy_ == PADDING_STRATEGY_MAX - ? handle->OnMaxFrameSizePadding(padding, maxPayloadLen) - : handle->OnCallbackPadding(padding, maxPayloadLen); + switch (session->padding_strategy_) { + case PADDING_STRATEGY_NONE: + // Fall-through + break; + case PADDING_STRATEGY_MAX: + padding = session->OnMaxFrameSizePadding(padding, maxPayloadLen); + break; + case PADDING_STRATEGY_ALIGNED: + padding = session->OnDWordAlignedPadding(padding, maxPayloadLen); + break; + case PADDING_STRATEGY_CALLBACK: + padding = session->OnCallbackPadding(padding, maxPayloadLen); + break; + } + return padding; } #define BAD_PEER_MESSAGE "Remote peer returned unexpected data while we " \ "expected SETTINGS frame. Perhaps, peer does not " \ "support HTTP/2 properly." +// We use this currently to determine when an attempt is made to use the http2 +// protocol with a non-http2 peer. inline int Http2Session::OnNghttpError(nghttp2_session* handle, const char* message, size_t len, @@ -715,9 +1113,11 @@ inline int Http2Session::OnNghttpError(nghttp2_session* handle, return 0; } - +// Once all of the DATA frames for a Stream have been sent, the GetTrailers +// method calls out to JavaScript to fetch the trailing headers that need +// to be sent. inline void Http2Session::GetTrailers(Http2Stream* stream, uint32_t* flags) { - if (stream->HasTrailers()) { + if (!stream->IsDestroyed() && stream->HasTrailers()) { Http2Stream::SubmitTrailers submit_trailers{this, stream, flags}; stream->OnTrailers(submit_trailers); } @@ -732,7 +1132,8 @@ Http2Stream::SubmitTrailers::SubmitTrailers( inline void Http2Stream::SubmitTrailers::Submit(nghttp2_nv* trailers, - size_t length) const { + size_t length) const { + Http2Scope h2scope(session_); if (length == 0) return; DEBUG_HTTP2SESSION2(session_, "sending trailers for stream %d, count: %d", @@ -743,6 +1144,9 @@ inline void Http2Stream::SubmitTrailers::Submit(nghttp2_nv* trailers, } +// Called by OnFrameReceived to notify JavaScript land that a complete +// HEADERS frame has been received and processed. This method converts the +// received headers into a JavaScript array and pushes those out to JS. inline void Http2Session::HandleHeadersFrame(const nghttp2_frame* frame) { Isolate* isolate = env()->isolate(); HandleScope scope(isolate); @@ -753,6 +1157,10 @@ inline void Http2Session::HandleHeadersFrame(const nghttp2_frame* frame) { DEBUG_HTTP2SESSION2(this, "handle headers frame for stream %d", id); Http2Stream* stream = FindStream(id); + // If the stream has already been destroyed, ignore. + if (stream->IsDestroyed()) + return; + nghttp2_header* headers = stream->headers(); size_t count = stream->headers_count(); @@ -804,6 +1212,10 @@ inline void Http2Session::HandleHeadersFrame(const nghttp2_frame* frame) { } +// Called by OnFrameReceived when a complete PRIORITY frame has been +// received. Notifies JS land about the priority change. Note that priorities +// are considered advisory only, so this has no real effect other than to +// simply let user code know that the priority has changed. inline void Http2Session::HandlePriorityFrame(const nghttp2_frame* frame) { Isolate* isolate = env()->isolate(); HandleScope scope(isolate); @@ -826,20 +1238,25 @@ inline void Http2Session::HandlePriorityFrame(const nghttp2_frame* frame) { } +// Called by OnFrameReceived when a complete DATA frame has been received. +// If we know that this was the last DATA frame (because the END_STREAM flag +// is set), then we'll terminate the readable side of the StreamBase. inline void Http2Session::HandleDataFrame(const nghttp2_frame* frame) { int32_t id = GetFrameID(frame); DEBUG_HTTP2SESSION2(this, "handling data frame for stream %d", id); Http2Stream* stream = FindStream(id); + // If the stream has already been destroyed, do nothing + if (stream->IsDestroyed()) + return; + if (frame->hd.flags & NGHTTP2_FLAG_END_STREAM) { - stream->AddChunk(nullptr, 0); + stream->EmitData(UV_EOF, Local(), Local()); } - - if (stream->IsReading()) - stream->FlushDataChunks(); } +// Called by OnFrameReceived when a complete GOAWAY frame has been received. inline void Http2Session::HandleGoawayFrame(const nghttp2_frame* frame) { Isolate* isolate = env()->isolate(); HandleScope scope(isolate); @@ -865,93 +1282,316 @@ inline void Http2Session::HandleGoawayFrame(const nghttp2_frame* frame) { MakeCallback(env()->ongoawaydata_string(), arraysize(argv), argv); } +// Called by OnFrameReceived when a complete ALTSVC frame has been received. +inline void Http2Session::HandleAltSvcFrame(const nghttp2_frame* frame) { + Isolate* isolate = env()->isolate(); + HandleScope scope(isolate); + Local context = env()->context(); + Context::Scope context_scope(context); + + int32_t id = GetFrameID(frame); + + nghttp2_extension ext = frame->ext; + nghttp2_ext_altsvc* altsvc = static_cast(ext.payload); + DEBUG_HTTP2SESSION(this, "handling altsvc frame"); + + Local argv[3] = { + Integer::New(isolate, id), + String::NewFromOneByte(isolate, + altsvc->origin, + v8::NewStringType::kNormal, + altsvc->origin_len).ToLocalChecked(), + String::NewFromOneByte(isolate, + altsvc->field_value, + v8::NewStringType::kNormal, + altsvc->field_value_len).ToLocalChecked(), + }; + + MakeCallback(env()->onaltsvc_string(), arraysize(argv), argv); +} + +// Called by OnFrameReceived when a complete PING frame has been received. inline void Http2Session::HandlePingFrame(const nghttp2_frame* frame) { bool ack = frame->hd.flags & NGHTTP2_FLAG_ACK; if (ack) { Http2Ping* ping = PopPing(); - if (ping != nullptr) + if (ping != nullptr) { ping->Done(true, frame->ping.opaque_data); + } else { + // PING Ack is unsolicited. Treat as a connection error. The HTTP/2 + // spec does not require this, but there is no legitimate reason to + // receive an unsolicited PING ack on a connection. Either the peer + // is buggy or malicious, and we're not going to tolerate such + // nonsense. + Isolate* isolate = env()->isolate(); + HandleScope scope(isolate); + Local context = env()->context(); + Context::Scope context_scope(context); + + Local argv[1] = { + Integer::New(isolate, NGHTTP2_ERR_PROTO), + }; + MakeCallback(env()->error_string(), arraysize(argv), argv); + } } } - +// Called by OnFrameReceived when a complete SETTINGS frame has been received. inline void Http2Session::HandleSettingsFrame(const nghttp2_frame* frame) { - Isolate* isolate = env()->isolate(); - HandleScope scope(isolate); - Local context = env()->context(); - Context::Scope context_scope(context); - bool ack = frame->hd.flags & NGHTTP2_FLAG_ACK; + if (ack) { + // If this is an acknowledgement, we should have an Http2Settings + // object for it. + Http2Settings* settings = PopSettings(); + if (settings != nullptr) { + settings->Done(true); + } else { + // SETTINGS Ack is unsolicited. Treat as a connection error. The HTTP/2 + // spec does not require this, but there is no legitimate reason to + // receive an unsolicited SETTINGS ack on a connection. Either the peer + // is buggy or malicious, and we're not going to tolerate such + // nonsense. + // Note that nghttp2 currently prevents this from happening for SETTINGS + // frames, so this block is purely defensive just in case that behavior + // changes. Specifically, unlike unsolicited PING acks, unsolicited + // SETTINGS acks should *never* make it this far. + Isolate* isolate = env()->isolate(); + HandleScope scope(isolate); + Local context = env()->context(); + Context::Scope context_scope(context); + + Local argv[1] = { + Integer::New(isolate, NGHTTP2_ERR_PROTO), + }; + MakeCallback(env()->error_string(), arraysize(argv), argv); + } + } else { + // Otherwise, notify the session about a new settings + MakeCallback(env()->onsettings_string(), 0, nullptr); + } +} + +// Callback used when data has been written to the stream. +void Http2Session::OnStreamAfterWriteImpl(WriteWrap* w, int status, void* ctx) { + Http2Session* session = static_cast(ctx); + DEBUG_HTTP2SESSION2(session, "write finished with status %d", status); - Local argv[1] = { Boolean::New(isolate, ack) }; - MakeCallback(env()->onsettings_string(), arraysize(argv), argv); + // Inform all pending writes about their completion. + session->ClearOutgoing(status); + + if (!(session->flags_ & SESSION_STATE_WRITE_SCHEDULED)) { + // Schedule a new write if nghttp2 wants to send data. + session->MaybeScheduleWrite(); + } } +// If the underlying nghttp2_session struct has data pending in its outbound +// queue, MaybeScheduleWrite will schedule a SendPendingData() call to occcur +// on the next iteration of the Node.js event loop (using the SetImmediate +// queue), but only if a write has not already been scheduled. +void Http2Session::MaybeScheduleWrite() { + CHECK_EQ(flags_ & SESSION_STATE_WRITE_SCHEDULED, 0); + if (session_ != nullptr && nghttp2_session_want_write(session_)) { + DEBUG_HTTP2SESSION(this, "scheduling write"); + flags_ |= SESSION_STATE_WRITE_SCHEDULED; + env()->SetImmediate([](Environment* env, void* data) { + Http2Session* session = static_cast(data); + if (session->session_ == nullptr || + !(session->flags_ & SESSION_STATE_WRITE_SCHEDULED)) { + // This can happen e.g. when a stream was reset before this turn + // of the event loop, in which case SendPendingData() is called early, + // or the session was destroyed in the meantime. + return; + } + + // Sending data may call arbitrary JS code, so keep track of + // async context. + InternalCallbackScope callback_scope(session); + session->SendPendingData(); + }, static_cast(this), object()); + } +} + +// Unset the sending state, finish up all current writes, and reset +// storage for data and metadata that was associated with these writes. +void Http2Session::ClearOutgoing(int status) { + CHECK_NE(flags_ & SESSION_STATE_SENDING, 0); + flags_ &= ~SESSION_STATE_SENDING; + + for (const nghttp2_stream_write& wr : outgoing_buffers_) { + WriteWrap* wrap = wr.req_wrap; + if (wrap != nullptr) + wrap->Done(status); + } -inline void Http2Session::SendPendingData() { + outgoing_buffers_.clear(); + outgoing_storage_.clear(); +} + +// Queue a given block of data for sending. This always creates a copy, +// so it is used for the cases in which nghttp2 requests sending of a +// small chunk of data. +void Http2Session::CopyDataIntoOutgoing(const uint8_t* src, size_t src_length) { + size_t offset = outgoing_storage_.size(); + outgoing_storage_.resize(offset + src_length); + memcpy(&outgoing_storage_[offset], src, src_length); + + // Store with a base of `nullptr` initially, since future resizes + // of the outgoing_buffers_ vector may invalidate the pointer. + // The correct base pointers will be set later, before writing to the + // underlying socket. + outgoing_buffers_.emplace_back(nghttp2_stream_write { + uv_buf_init(nullptr, src_length) + }); +} + +// Prompts nghttp2 to begin serializing it's pending data and pushes each +// chunk out to the i/o socket to be sent. This is a particularly hot method +// that will generally be called at least twice be event loop iteration. +// This is a potential performance optimization target later. +void Http2Session::SendPendingData() { DEBUG_HTTP2SESSION(this, "sending pending data"); // Do not attempt to send data on the socket if the destroying flag has // been set. That means everything is shutting down and the socket // will not be usable. - if (IsDestroying()) + if (IsDestroyed()) return; + flags_ &= ~SESSION_STATE_WRITE_SCHEDULED; - WriteWrap* req = nullptr; - char* dest = nullptr; - size_t destRemaining = 0; - size_t destLength = 0; // amount of data stored in dest - size_t destOffset = 0; // current write offset of dest - - const uint8_t* src; // pointer to the serialized data - ssize_t srcLength = 0; // length of serialized data chunk - - // While srcLength is greater than zero - while ((srcLength = nghttp2_session_mem_send(session_, &src)) > 0) { - if (req == nullptr) { - req = AllocateSend(); - destRemaining = req->ExtraSize(); - dest = req->Extra(); - } - DEBUG_HTTP2SESSION2(this, "nghttp2 has %d bytes to send", srcLength); - size_t srcRemaining = srcLength; - size_t srcOffset = 0; - - // The amount of data we have to copy is greater than the space - // remaining. Copy what we can into the remaining space, send it, - // the proceed with the rest. - while (srcRemaining > destRemaining) { - DEBUG_HTTP2SESSION2(this, "pushing %d bytes to the socket", - destLength + destRemaining); - memcpy(dest + destOffset, src + srcOffset, destRemaining); - destLength += destRemaining; - Send(req, dest, destLength); - destOffset = 0; - destLength = 0; - srcRemaining -= destRemaining; - srcOffset += destRemaining; - req = AllocateSend(); - destRemaining = req->ExtraSize(); - dest = req->Extra(); - } + // SendPendingData should not be called recursively. + if (flags_ & SESSION_STATE_SENDING) + return; + // This is cleared by ClearOutgoing(). + flags_ |= SESSION_STATE_SENDING; + + ssize_t src_length; + const uint8_t* src; + + CHECK_EQ(outgoing_buffers_.size(), 0); + CHECK_EQ(outgoing_storage_.size(), 0); + + // Part One: Gather data from nghttp2 - if (srcRemaining > 0) { - memcpy(dest + destOffset, src + srcOffset, srcRemaining); - destLength += srcRemaining; - destOffset += srcRemaining; - destRemaining -= srcRemaining; - srcRemaining = 0; - srcOffset = 0; + while ((src_length = nghttp2_session_mem_send(session_, &src)) > 0) { + DEBUG_HTTP2SESSION2(this, "nghttp2 has %d bytes to send", src_length); + CopyDataIntoOutgoing(src, src_length); + } + + CHECK_NE(src_length, NGHTTP2_ERR_NOMEM); + + if (stream_ == nullptr) { + // It would seem nice to bail out earlier, but `nghttp2_session_mem_send()` + // does take care of things like closing the individual streams after + // a socket has been torn down, so we still need to call it. + ClearOutgoing(UV_ECANCELED); + return; + } + + // Part Two: Pass Data to the underlying stream + + size_t count = outgoing_buffers_.size(); + if (count == 0) { + flags_ &= ~SESSION_STATE_SENDING; + return; + } + MaybeStackBuffer bufs; + bufs.AllocateSufficientStorage(count); + + // Set the buffer base pointers for copied data that ended up in the + // sessions's own storage since it might have shifted around during gathering. + // (Those are marked by having .base == nullptr.) + size_t offset = 0; + size_t i = 0; + for (const nghttp2_stream_write& write : outgoing_buffers_) { + statistics_.data_sent += write.buf.len; + if (write.buf.base == nullptr) { + bufs[i++] = uv_buf_init( + reinterpret_cast(outgoing_storage_.data() + offset), + write.buf.len); + offset += write.buf.len; + } else { + bufs[i++] = write.buf; } } - CHECK_NE(srcLength, NGHTTP2_ERR_NOMEM); - if (destLength > 0) { - DEBUG_HTTP2SESSION2(this, "pushing %d bytes to the socket", destLength); - Send(req, dest, destLength); + chunks_sent_since_last_write_++; + + // DoTryWrite may modify both the buffer list start itself and the + // base pointers/length of the individual buffers. + uv_buf_t* writebufs = *bufs; + if (stream_->DoTryWrite(&writebufs, &count) != 0 || count == 0) { + // All writes finished synchronously, nothing more to do here. + ClearOutgoing(0); + return; + } + + WriteWrap* req = AllocateSend(); + if (stream_->DoWrite(req, writebufs, count, nullptr) != 0) { + req->Dispose(); } + + DEBUG_HTTP2SESSION2(this, "wants data in return? %d", + nghttp2_session_want_read(session_)); } +// This callback is called from nghttp2 when it wants to send DATA frames for a +// given Http2Stream, when we set the `NGHTTP2_DATA_FLAG_NO_COPY` flag earlier +// in the Http2Stream::Provider::Stream::OnRead callback. +// We take the write information directly out of the stream's data queue. +int Http2Session::OnSendData( + nghttp2_session* session_, + nghttp2_frame* frame, + const uint8_t* framehd, + size_t length, + nghttp2_data_source* source, + void* user_data) { + Http2Session* session = static_cast(user_data); + Http2Stream* stream = GetStream(session, frame->hd.stream_id, source); + + // Send the frame header + a byte that indicates padding length. + session->CopyDataIntoOutgoing(framehd, 9); + if (frame->data.padlen > 0) { + uint8_t padding_byte = frame->data.padlen - 1; + CHECK_EQ(padding_byte, frame->data.padlen - 1); + session->CopyDataIntoOutgoing(&padding_byte, 1); + } + + DEBUG_HTTP2SESSION2(session, "nghttp2 has %d bytes to send directly", length); + while (length > 0) { + // nghttp2 thinks that there is data available (length > 0), which means + // we told it so, which means that we *should* have data available. + CHECK(!stream->queue_.empty()); + + nghttp2_stream_write& write = stream->queue_.front(); + if (write.buf.len <= length) { + // This write does not suffice by itself, so we can consume it completely. + length -= write.buf.len; + session->outgoing_buffers_.emplace_back(std::move(write)); + stream->queue_.pop(); + continue; + } + + // Slice off `length` bytes of the first write in the queue. + session->outgoing_buffers_.emplace_back(nghttp2_stream_write { + uv_buf_init(write.buf.base, length) + }); + write.buf.base += length; + write.buf.len -= length; + break; + } + + if (frame->data.padlen > 0) { + // Send padding if that was requested. + session->outgoing_buffers_.emplace_back(nghttp2_stream_write { + uv_buf_init(const_cast(zero_bytes_256), frame->data.padlen - 1) + }); + } + + return 0; +} + +// Creates a new Http2Stream and submits a new http2 request. inline Http2Stream* Http2Session::SubmitRequest( nghttp2_priority_spec* prispec, nghttp2_nv* nva, @@ -959,6 +1599,7 @@ inline Http2Stream* Http2Session::SubmitRequest( int32_t* ret, int options) { DEBUG_HTTP2SESSION(this, "submitting request"); + Http2Scope h2scope(this); Http2Stream* stream = nullptr; Http2Stream::Provider::Stream prov(options); *ret = nghttp2_submit_request(session_, prispec, nva, len, *prov, nullptr); @@ -972,75 +1613,117 @@ inline void Http2Session::SetChunksSinceLastWrite(size_t n) { chunks_sent_since_last_write_ = n; } - +// Allocates the data buffer used to pass outbound data to the i/o stream. WriteWrap* Http2Session::AllocateSend() { HandleScope scope(env()->isolate()); - auto AfterWrite = [](WriteWrap* req, int status) { - req->Dispose(); - }; Local obj = env()->write_wrap_constructor_function() ->NewInstance(env()->context()).ToLocalChecked(); - // Base the amount allocated on the remote peers max frame size - uint32_t size = - nghttp2_session_get_remote_settings( - session(), - NGHTTP2_SETTINGS_MAX_FRAME_SIZE); - // Max frame size + 9 bytes for the header - return WriteWrap::New(env(), obj, stream_, AfterWrite, size + 9); -} - -void Http2Session::Send(WriteWrap* req, char* buf, size_t length) { - DEBUG_HTTP2SESSION(this, "attempting to send data"); - if (stream_ == nullptr || !stream_->IsAlive() || stream_->IsClosing()) { - return; - } - - chunks_sent_since_last_write_++; - uv_buf_t actual = uv_buf_init(buf, length); - if (stream_->DoWrite(req, &actual, 1, nullptr)) { - req->Dispose(); - } + return WriteWrap::New(env(), obj, stream_); } - +// Allocates the data buffer used to receive inbound data from the i/o stream void Http2Session::OnStreamAllocImpl(size_t suggested_size, uv_buf_t* buf, void* ctx) { Http2Session* session = static_cast(ctx); - buf->base = session->stream_alloc(); - buf->len = kAllocBufferSize; + CHECK_EQ(session->stream_buf_, nullptr); + CHECK_EQ(session->stream_buf_size_, 0); + buf->base = session->stream_buf_ = Malloc(suggested_size); + buf->len = session->stream_buf_size_ = suggested_size; + session->IncrementCurrentSessionMemory(suggested_size); } - +// Callback used to receive inbound data from the i/o stream void Http2Session::OnStreamReadImpl(ssize_t nread, - const uv_buf_t* bufs, + const uv_buf_t* buf, uv_handle_type pending, void* ctx) { Http2Session* session = static_cast(ctx); - if (nread < 0) { - uv_buf_t tmp_buf; - tmp_buf.base = nullptr; - tmp_buf.len = 0; - session->prev_read_cb_.fn(nread, - &tmp_buf, - pending, - session->prev_read_cb_.ctx); - return; - } - if (nread > 0) { + Http2Scope h2scope(session); + CHECK_NE(session->stream_, nullptr); + DEBUG_HTTP2SESSION2(session, "receiving %d bytes", nread); + if (nread <= 0) { + free(session->stream_buf_); + if (nread < 0) { + uv_buf_t tmp_buf = uv_buf_init(nullptr, 0); + session->prev_read_cb_.fn(nread, + &tmp_buf, + pending, + session->prev_read_cb_.ctx); + } + } else { // Only pass data on if nread > 0 - uv_buf_t buf[] { uv_buf_init((*bufs).base, nread) }; - ssize_t ret = session->Write(buf, 1); - if (ret < 0) { - DEBUG_HTTP2SESSION2(session, "fatal error receiving data: %d", ret); - CHECK_EQ(nghttp2_session_terminate_session(session->session(), - NGHTTP2_PROTOCOL_ERROR), 0); + + // Verify that currently: There is memory allocated into which + // the data has been read, and that memory buffer is at least as large + // as the amount of data we have read, but we have not yet made an + // ArrayBuffer out of it. + CHECK_NE(session->stream_buf_, nullptr); + CHECK_EQ(session->stream_buf_, buf->base); + CHECK_EQ(session->stream_buf_size_, buf->len); + CHECK_GE(session->stream_buf_size_, static_cast(nread)); + CHECK(session->stream_buf_ab_.IsEmpty()); + + Environment* env = session->env(); + Isolate* isolate = env->isolate(); + HandleScope scope(isolate); + Local context = env->context(); + Context::Scope context_scope(context); + + // Create an array buffer for the read data. DATA frames will be emitted + // as slices of this array buffer to avoid having to copy memory. + session->stream_buf_ab_ = + ArrayBuffer::New(isolate, + session->stream_buf_, + session->stream_buf_size_, + v8::ArrayBufferCreationMode::kInternalized); + + uv_buf_t buf_ = uv_buf_init(buf->base, nread); + session->statistics_.data_received += nread; + ssize_t ret = session->Write(&buf_, 1); + + // Note: if ssize_t is not defined (e.g. on Win32), nghttp2 will typedef + // ssize_t to int. Cast here so that the < 0 check actually works on + // Windows. + if (static_cast(ret) < 0) { + DEBUG_HTTP2SESSION2(this, "fatal error receiving data: %d", ret); + + Local argv[1] = { + Integer::New(isolate, ret), + }; + session->MakeCallback(env->error_string(), arraysize(argv), argv); + } else { + DEBUG_HTTP2SESSION2(session, "processed %d bytes. wants more? %d", ret, + nghttp2_session_want_read(**session)); } } + + // Since we are finished handling this write, reset the stream buffer. + // The memory has either been free()d or was handed over to V8. + session->DecrementCurrentSessionMemory(session->stream_buf_size_); + session->stream_buf_ = nullptr; + session->stream_buf_size_ = 0; + session->stream_buf_ab_ = Local(); } +void Http2Session::OnStreamDestructImpl(void* ctx) { + Http2Session* session = static_cast(ctx); + session->stream_ = nullptr; +} + +bool Http2Session::HasWritesOnSocketForStream(Http2Stream* stream) { + for (const nghttp2_stream_write& wr : outgoing_buffers_) { + if (wr.req_wrap != nullptr && wr.req_wrap->stream() == stream) + return true; + } + return false; +} +// Every Http2Session session is tightly bound to a single i/o StreamBase +// (typically a net.Socket or tls.TLSSocket). The lifecycle of the two is +// tightly coupled with all data transfer between the two happening at the +// C++ layer via the StreamBase API. void Http2Session::Consume(Local external) { StreamBase* stream = static_cast(external->Value()); stream->Consume(); @@ -1049,24 +1732,12 @@ void Http2Session::Consume(Local external) { prev_read_cb_ = stream->read_cb(); stream->set_alloc_cb({ Http2Session::OnStreamAllocImpl, this }); stream->set_read_cb({ Http2Session::OnStreamReadImpl, this }); + stream->set_after_write_cb({ Http2Session::OnStreamAfterWriteImpl, this }); + stream->set_destruct_cb({ Http2Session::OnStreamDestructImpl, this }); DEBUG_HTTP2SESSION(this, "i/o stream consumed"); } -void Http2Session::Unconsume() { - if (prev_alloc_cb_.is_empty()) - return; - stream_->set_alloc_cb(prev_alloc_cb_); - stream_->set_read_cb(prev_read_cb_); - prev_alloc_cb_.clear(); - prev_read_cb_.clear(); - stream_ = nullptr; - DEBUG_HTTP2SESSION(this, "i/o stream unconsumed"); -} - - - - Http2Stream::Http2Stream( Http2Session* session, int32_t id, @@ -1081,6 +1752,7 @@ Http2Stream::Http2Stream( id_(id), current_headers_category_(category) { MakeWeak(this); + statistics_.start_time = uv_hrtime(); // Limit the number of header pairs max_header_pairs_ = session->GetMaxHeaderPairs(); @@ -1106,26 +1778,36 @@ Http2Stream::Http2Stream( Http2Stream::~Http2Stream() { - CHECK(persistent().IsEmpty()); - if (!object().IsEmpty()) - ClearWrap(object()); + DEBUG_HTTP2STREAM(this, "tearing down stream"); + if (session_ != nullptr) { + session_->RemoveStream(this); + session_ = nullptr; + } + persistent().Reset(); + CHECK(persistent().IsEmpty()); } +// Notify the Http2Stream that a new block of HEADERS is being processed. void Http2Stream::StartHeaders(nghttp2_headers_category category) { DEBUG_HTTP2STREAM2(this, "starting headers, category: %d", id_, category); + CHECK(!this->IsDestroyed()); current_headers_length_ = 0; current_headers_.clear(); current_headers_category_ = category; } + nghttp2_stream* Http2Stream::operator*() { return nghttp2_session_find_stream(**session_, id_); } +// Calls out to JavaScript land to fetch the actual trailer headers to send +// for this stream. void Http2Stream::OnTrailers(const SubmitTrailers& submit_trailers) { DEBUG_HTTP2STREAM(this, "prompting for trailers"); + CHECK(!this->IsDestroyed()); Isolate* isolate = env()->isolate(); HandleScope scope(isolate); Local context = env()->context(); @@ -1133,7 +1815,7 @@ void Http2Stream::OnTrailers(const SubmitTrailers& submit_trailers) { Local ret = MakeCallback(env()->ontrailers_string(), 0, nullptr).ToLocalChecked(); - if (!ret.IsEmpty()) { + if (!ret.IsEmpty() && !IsDestroyed()) { if (ret->IsArray()) { Local headers = ret.As(); if (headers->Length() > 0) { @@ -1144,38 +1826,8 @@ void Http2Stream::OnTrailers(const SubmitTrailers& submit_trailers) { } } - -inline void Http2Stream::AddChunk(const uint8_t* data, size_t len) { - char* buf = nullptr; - if (len > 0) { - buf = Malloc(len); - memcpy(buf, data, len); - } - data_chunks_.emplace(uv_buf_init(buf, len)); -} - - -int Http2Stream::DoWrite(WriteWrap* req_wrap, - uv_buf_t* bufs, - size_t count, - uv_stream_t* send_handle) { - session_->SetChunksSinceLastWrite(); - - nghttp2_stream_write_t* req = new nghttp2_stream_write_t; - req->data = req_wrap; - - auto AfterWrite = [](nghttp2_stream_write_t* req, int status) { - WriteWrap* wrap = static_cast(req->data); - wrap->Done(status); - delete req; - }; - req_wrap->Dispatched(); - Write(req, bufs, count, AfterWrite); - return 0; -} - - inline void Http2Stream::Close(int32_t code) { + CHECK(!this->IsDestroyed()); flags_ |= NGHTTP2_STREAM_FLAG_CLOSED; code_ = code; DEBUG_HTTP2STREAM2(this, "closed with code %d", code); @@ -1183,6 +1835,8 @@ inline void Http2Stream::Close(int32_t code) { inline void Http2Stream::Shutdown() { + CHECK(!this->IsDestroyed()); + Http2Scope h2scope(this); flags_ |= NGHTTP2_STREAM_FLAG_SHUT; CHECK_NE(nghttp2_session_resume_data(session_->session(), id_), NGHTTP2_ERR_NOMEM); @@ -1190,81 +1844,60 @@ inline void Http2Stream::Shutdown() { } int Http2Stream::DoShutdown(ShutdownWrap* req_wrap) { + CHECK(!this->IsDestroyed()); req_wrap->Dispatched(); Shutdown(); req_wrap->Done(0); return 0; } +// Destroy the Http2Stream and render it unusable. Actual resources for the +// Stream will not be freed until the next tick of the Node.js event loop +// using the SetImmediate queue. inline void Http2Stream::Destroy() { - DEBUG_HTTP2STREAM(this, "destroying stream"); // Do nothing if this stream instance is already destroyed if (IsDestroyed()) return; - flags_ |= NGHTTP2_STREAM_FLAG_DESTROYED; - Http2Session* session = this->session_; - - if (session != nullptr) { - session_->RemoveStream(id_); - session_ = nullptr; - } - - // Free any remaining incoming data chunks. - while (!data_chunks_.empty()) { - uv_buf_t buf = data_chunks_.front(); - free(buf.base); - data_chunks_.pop(); - } - - // Free any remaining outgoing data chunks. - while (!queue_.empty()) { - nghttp2_stream_write* head = queue_.front(); - head->cb(head->req, UV_ECANCELED); - delete head; - queue_.pop(); - } - - if (!object().IsEmpty()) - ClearWrap(object()); - persistent().Reset(); - - delete this; -} + DEBUG_HTTP2STREAM(this, "destroying stream"); -void Http2Stream::OnDataChunk( - uv_buf_t* chunk) { - Isolate* isolate = env()->isolate(); - HandleScope scope(isolate); - ssize_t len = -1; - Local buf; - if (chunk != nullptr) { - len = chunk->len; - buf = Buffer::New(isolate, chunk->base, len).ToLocalChecked(); - } - EmitData(len, buf, this->object()); -} + // Wait until the start of the next loop to delete because there + // may still be some pending operations queued for this stream. + env()->SetImmediate([](Environment* env, void* data) { + Http2Stream* stream = static_cast(data); + // Free any remaining outgoing data chunks here. This should be done + // here because it's possible for destroy to have been called while + // we still have queued outbound writes. + while (!stream->queue_.empty()) { + nghttp2_stream_write& head = stream->queue_.front(); + if (head.req_wrap != nullptr) + head.req_wrap->Done(UV_ECANCELED); + stream->queue_.pop(); + } + // We can destroy the stream now if there are no writes for it + // already on the socket. Otherwise, we'll wait for the garbage collector + // to take care of cleaning up. + if (!stream->session()->HasWritesOnSocketForStream(stream)) + delete stream; + }, this, this->object()); -inline void Http2Stream::FlushDataChunks() { - if (!data_chunks_.empty()) { - uv_buf_t buf = data_chunks_.front(); - data_chunks_.pop(); - if (buf.len > 0) { - CHECK_EQ(nghttp2_session_consume_stream(session_->session(), - id_, buf.len), 0); - OnDataChunk(&buf); - } else { - OnDataChunk(nullptr); - } - } + statistics_.end_time = uv_hrtime(); + session_->statistics_.stream_average_duration = + ((statistics_.end_time - statistics_.start_time) / + session_->statistics_.stream_count) / 1e6; + EmitStatistics(); } +// Initiates a response on the Http2Stream using data provided via the +// StreamBase Streams API. inline int Http2Stream::SubmitResponse(nghttp2_nv* nva, size_t len, int options) { + CHECK(!this->IsDestroyed()); + Http2Scope h2scope(this); DEBUG_HTTP2STREAM(this, "submitting response"); if (options & STREAM_OPTION_GET_TRAILERS) flags_ |= NGHTTP2_STREAM_FLAG_TRAILERS; @@ -1285,6 +1918,8 @@ inline int Http2Stream::SubmitFile(int fd, int64_t offset, int64_t length, int options) { + CHECK(!this->IsDestroyed()); + Http2Scope h2scope(this); DEBUG_HTTP2STREAM(this, "submitting file"); if (options & STREAM_OPTION_GET_TRAILERS) flags_ |= NGHTTP2_STREAM_FLAG_TRAILERS; @@ -1301,6 +1936,8 @@ inline int Http2Stream::SubmitFile(int fd, // Submit informational headers for a stream. inline int Http2Stream::SubmitInfo(nghttp2_nv* nva, size_t len) { + CHECK(!this->IsDestroyed()); + Http2Scope h2scope(this); DEBUG_HTTP2STREAM2(this, "sending %d informational headers", len); int ret = nghttp2_submit_headers(session_->session(), NGHTTP2_FLAG_NONE, @@ -1310,9 +1947,11 @@ inline int Http2Stream::SubmitInfo(nghttp2_nv* nva, size_t len) { return ret; } - +// Submit a PRIORITY frame to the connected peer. inline int Http2Stream::SubmitPriority(nghttp2_priority_spec* prispec, bool silent) { + CHECK(!this->IsDestroyed()); + Http2Scope h2scope(this); DEBUG_HTTP2STREAM(this, "sending priority spec"); int ret = silent ? nghttp2_session_change_stream_priority(session_->session(), @@ -1324,25 +1963,28 @@ inline int Http2Stream::SubmitPriority(nghttp2_priority_spec* prispec, return ret; } - -inline int Http2Stream::SubmitRstStream(const uint32_t code) { - DEBUG_HTTP2STREAM2(this, "sending rst-stream with code %d", code); +// Closes the Http2Stream by submitting an RST_STREAM frame to the connected +// peer. +inline void Http2Stream::SubmitRstStream(const uint32_t code) { + CHECK(!this->IsDestroyed()); + Http2Scope h2scope(this); + // Force a purge of any currently pending data here to make sure + // it is sent before closing the stream. session_->SendPendingData(); - CHECK_EQ(nghttp2_submit_rst_stream(session_->session(), - NGHTTP2_FLAG_NONE, - id_, - code), 0); - return 0; + CHECK_EQ(nghttp2_submit_rst_stream(**session_, NGHTTP2_FLAG_NONE, + id_, code), 0); } -// Submit a push promise. +// Submit a push promise and create the associated Http2Stream if successful. inline Http2Stream* Http2Stream::SubmitPushPromise(nghttp2_nv* nva, size_t len, int32_t* ret, int options) { + CHECK(!this->IsDestroyed()); + Http2Scope h2scope(this); DEBUG_HTTP2STREAM(this, "sending push promise"); - *ret = nghttp2_submit_push_promise(session_->session(), NGHTTP2_FLAG_NONE, + *ret = nghttp2_submit_push_promise(**session_, NGHTTP2_FLAG_NONE, id_, nva, len, nullptr); CHECK_NE(*ret, NGHTTP2_ERR_NOMEM); Http2Stream* stream = nullptr; @@ -1352,18 +1994,29 @@ inline Http2Stream* Http2Stream::SubmitPushPromise(nghttp2_nv* nva, return stream; } +// Switch the StreamBase into flowing mode to begin pushing chunks of data +// out to JS land. inline int Http2Stream::ReadStart() { + Http2Scope h2scope(this); + CHECK(!this->IsDestroyed()); flags_ |= NGHTTP2_STREAM_FLAG_READ_START; flags_ &= ~NGHTTP2_STREAM_FLAG_READ_PAUSED; - // Flush any queued data chunks immediately out to the JS layer - FlushDataChunks(); DEBUG_HTTP2STREAM(this, "reading starting"); + + // Tell nghttp2 about our consumption of the data that was handed + // off to JS land. + nghttp2_session_consume_stream(session_->session(), + id_, + inbound_consumed_data_while_paused_); + inbound_consumed_data_while_paused_ = 0; + return 0; } - +// Switch the StreamBase into paused mode. inline int Http2Stream::ReadStop() { + CHECK(!this->IsDestroyed()); if (!IsReading()) return 0; flags_ |= NGHTTP2_STREAM_FLAG_READ_PAUSED; @@ -1371,28 +2024,39 @@ inline int Http2Stream::ReadStop() { return 0; } +// The Http2Stream class is a subclass of StreamBase. The DoWrite method +// receives outbound chunks of data to send as outbound DATA frames. These +// are queued in an internal linked list of uv_buf_t structs that are sent +// when nghttp2 is ready to serialize the data frame. +// // Queue the given set of uv_but_t handles for writing to an -// nghttp2_stream. The callback will be invoked once the chunks -// of data have been flushed to the underlying nghttp2_session. +// nghttp2_stream. The WriteWrap's Done callback will be invoked once the +// chunks of data have been flushed to the underlying nghttp2_session. // Note that this does *not* mean that the data has been flushed // to the socket yet. -inline int Http2Stream::Write(nghttp2_stream_write_t* req, - const uv_buf_t bufs[], - unsigned int nbufs, - nghttp2_stream_write_cb cb) { +inline int Http2Stream::DoWrite(WriteWrap* req_wrap, + uv_buf_t* bufs, + size_t nbufs, + uv_stream_t* send_handle) { + CHECK(!this->IsDestroyed()); + CHECK_EQ(send_handle, nullptr); + Http2Scope h2scope(this); + session_->SetChunksSinceLastWrite(); + req_wrap->Dispatched(); if (!IsWritable()) { - if (cb != nullptr) - cb(req, UV_EOF); + req_wrap->Done(UV_EOF); return 0; } DEBUG_HTTP2STREAM2(this, "queuing %d buffers to send", id_, nbufs); - nghttp2_stream_write* item = new nghttp2_stream_write; - item->cb = cb; - item->req = req; - item->nbufs = nbufs; - item->bufs.AllocateSufficientStorage(nbufs); - memcpy(*(item->bufs), bufs, nbufs * sizeof(*bufs)); - queue_.push(item); + for (size_t i = 0; i < nbufs; ++i) { + // Store the req_wrap on the last write info in the queue, so that it is + // only marked as finished once all buffers associated with it are finished. + queue_.emplace(nghttp2_stream_write { + i == nbufs - 1 ? req_wrap : nullptr, + bufs[i] + }); + IncrementAvailableOutboundLength(bufs[i].len); + } CHECK_NE(nghttp2_session_resume_data(**session_, id_), NGHTTP2_ERR_NOMEM); return 0; } @@ -1401,11 +2065,22 @@ inline size_t GetBufferLength(nghttp2_rcbuf* buf) { return nghttp2_rcbuf_get_buf(buf).len; } +// Ads a header to the Http2Stream. Note that the header name and value are +// provided using a buffer structure provided by nghttp2 that allows us to +// avoid unnecessary memcpy's. Those buffers are ref counted. The ref count +// is incremented here and are decremented when the header name and values +// are garbage collected later. inline bool Http2Stream::AddHeader(nghttp2_rcbuf* name, nghttp2_rcbuf* value, uint8_t flags) { + CHECK(!this->IsDestroyed()); + if (this->statistics_.first_header == 0) + this->statistics_.first_header = uv_hrtime(); size_t length = GetBufferLength(name) + GetBufferLength(value) + 32; - if (current_headers_.size() == max_header_pairs_ || + // A header can only be added if we have not exceeded the maximum number + // of headers and the session has memory available for it. + if (!session_->IsAvailableSessionMemory(length) || + current_headers_.size() == max_header_pairs_ || current_headers_length_ + length > max_header_length_) { return false; } @@ -1420,19 +2095,9 @@ inline bool Http2Stream::AddHeader(nghttp2_rcbuf* name, return true; } - -Http2Stream* GetStream(Http2Session* session, - int32_t id, - nghttp2_data_source* source) { - Http2Stream* stream = static_cast(source->ptr); - if (stream == nullptr) - stream = session->FindStream(id); - CHECK_NE(stream, nullptr); - CHECK_EQ(id, stream->id()); - return stream; -} - +// A Provider is the thing that provides outbound DATA frame data. Http2Stream::Provider::Provider(Http2Stream* stream, int options) { + CHECK(!stream->IsDestroyed()); provider_.source.ptr = stream; empty_ = options & STREAM_OPTION_EMPTY_PAYLOAD; } @@ -1446,8 +2111,12 @@ Http2Stream::Provider::~Provider() { provider_.source.ptr = nullptr; } +// The FD Provider pulls data from a file descriptor using libuv. All of the +// data transfer occurs in C++, without any chunks being passed through JS +// land. Http2Stream::Provider::FD::FD(Http2Stream* stream, int options, int fd) : Http2Stream::Provider(stream, options) { + CHECK(!stream->IsDestroyed()); provider_.source.fd = fd; provider_.read_callback = Http2Stream::Provider::FD::OnRead; } @@ -1467,6 +2136,9 @@ ssize_t Http2Stream::Provider::FD::OnRead(nghttp2_session* handle, void* user_data) { Http2Session* session = static_cast(user_data); Http2Stream* stream = session->FindStream(id); + if (stream->statistics_.first_byte_sent == 0) + stream->statistics_.first_byte_sent = uv_hrtime(); + DEBUG_HTTP2SESSION2(session, "reading outbound file data for stream %d", id); CHECK_EQ(id, stream->id()); @@ -1501,16 +2173,27 @@ ssize_t Http2Stream::Provider::FD::OnRead(nghttp2_session* handle, stream->fd_offset_ += numchars; stream->fd_length_ -= numchars; + DEBUG_HTTP2SESSION2(session, "sending %d bytes", numchars); + // if numchars < length, assume that we are done. if (static_cast(numchars) < length || length <= 0) { DEBUG_HTTP2SESSION2(session, "no more data for stream %d", id); *flags |= NGHTTP2_DATA_FLAG_EOF; session->GetTrailers(stream, flags); + // If the stream or session gets destroyed during the GetTrailers + // callback, check that here and close down the stream + if (stream->IsDestroyed()) + return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; + if (session->IsDestroyed()) + return NGHTTP2_ERR_CALLBACK_FAILURE; } + stream->statistics_.sent_bytes += numchars; return numchars; } +// The Stream Provider pulls data from a linked list of uv_buf_t structs +// built via the StreamBase API and the Streams js API. Http2Stream::Provider::Stream::Stream(int options) : Http2Stream::Provider(options) { provider_.read_callback = Http2Stream::Provider::Stream::OnRead; @@ -1531,38 +2214,38 @@ ssize_t Http2Stream::Provider::Stream::OnRead(nghttp2_session* handle, Http2Session* session = static_cast(user_data); DEBUG_HTTP2SESSION2(session, "reading outbound data for stream %d", id); Http2Stream* stream = GetStream(session, id, source); + if (stream->statistics_.first_byte_sent == 0) + stream->statistics_.first_byte_sent = uv_hrtime(); CHECK_EQ(id, stream->id()); size_t amount = 0; // amount of data being sent in this data frame. - uv_buf_t current; + // Remove all empty chunks from the head of the queue. + // This is done here so that .write('', cb) is still a meaningful way to + // find out when the HTTP2 stream wants to consume data, and because the + // StreamBase API allows empty input chunks. + while (!stream->queue_.empty() && stream->queue_.front().buf.len == 0) { + WriteWrap* finished = stream->queue_.front().req_wrap; + stream->queue_.pop(); + if (finished != nullptr) + finished->Done(0); + } if (!stream->queue_.empty()) { DEBUG_HTTP2SESSION2(session, "stream %d has pending outbound data", id); - nghttp2_stream_write* head = stream->queue_.front(); - current = head->bufs[stream->queue_index_]; - size_t clen = current.len - stream->queue_offset_; - amount = std::min(clen, length); + amount = std::min(stream->available_outbound_length_, length); DEBUG_HTTP2SESSION2(session, "sending %d bytes for data frame on stream %d", amount, id); if (amount > 0) { - memcpy(buf, current.base + stream->queue_offset_, amount); - stream->queue_offset_ += amount; - } - if (stream->queue_offset_ == current.len) { - stream->queue_index_++; - stream->queue_offset_ = 0; - } - if (stream->queue_index_ == head->nbufs) { - head->cb(head->req, 0); - delete head; - stream->queue_.pop(); - stream->queue_offset_ = 0; - stream->queue_index_ = 0; + // Just return the length, let Http2Session::OnSendData take care of + // actually taking the buffers out of the queue. + *flags |= NGHTTP2_DATA_FLAG_NO_COPY; + stream->DecrementAvailableOutboundLength(amount); } } - if (amount == 0 && stream->IsWritable() && stream->queue_.empty()) { + if (amount == 0 && stream->IsWritable()) { + CHECK(stream->queue_.empty()); DEBUG_HTTP2SESSION2(session, "deferring stream %d", id); return NGHTTP2_ERR_DEFERRED; } @@ -1570,17 +2253,34 @@ ssize_t Http2Stream::Provider::Stream::OnRead(nghttp2_session* handle, if (stream->queue_.empty() && !stream->IsWritable()) { DEBUG_HTTP2SESSION2(session, "no more data for stream %d", id); *flags |= NGHTTP2_DATA_FLAG_EOF; - session->GetTrailers(stream, flags); + // If the stream or session gets destroyed during the GetTrailers + // callback, check that here and close down the stream + if (stream->IsDestroyed()) + return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; + if (session->IsDestroyed()) + return NGHTTP2_ERR_CALLBACK_FAILURE; } + stream->statistics_.sent_bytes += amount; return amount; } +inline void Http2Stream::IncrementAvailableOutboundLength(size_t amount) { + available_outbound_length_ += amount; + session_->IncrementCurrentSessionMemory(amount); +} + +inline void Http2Stream::DecrementAvailableOutboundLength(size_t amount) { + available_outbound_length_ -= amount; + session_->DecrementCurrentSessionMemory(amount); +} // Implementation of the JavaScript API +// Fetches the string description of a nghttp2 error code and passes that +// back to JS land void HttpErrorString(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); uint32_t val = args[0]->Uint32Value(env->context()).ToChecked(); @@ -1597,17 +2297,19 @@ void HttpErrorString(const FunctionCallbackInfo& args) { // output for an HTTP2-Settings header field. void PackSettings(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); - Http2Settings settings(env); + Http2Session::Http2Settings settings(env); args.GetReturnValue().Set(settings.Pack()); } - +// A TypedArray instance is shared between C++ and JS land to contain the +// default SETTINGS. RefreshDefaultSettings updates that TypedArray with the +// default values. void RefreshDefaultSettings(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); - Http2Settings::RefreshDefaults(env); + Http2Session::Http2Settings::RefreshDefaults(env); } - +// Sets the next stream ID the Http2Session. If successful, returns true. void Http2Session::SetNextStreamID(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); Http2Session* session; @@ -1621,7 +2323,9 @@ void Http2Session::SetNextStreamID(const FunctionCallbackInfo& args) { DEBUG_HTTP2SESSION2(session, "set next stream id to %d", id); } - +// A TypedArray instance is shared between C++ and JS land to contain the +// SETTINGS (either remote or local). RefreshSettings updates the current +// values established for each of the settings so those can be read in JS land. template void Http2Session::RefreshSettings(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); @@ -1631,7 +2335,9 @@ void Http2Session::RefreshSettings(const FunctionCallbackInfo& args) { DEBUG_HTTP2SESSION(session, "settings refreshed for session"); } - +// A TypedArray instance is shared between C++ and JS land to contain state +// information of the current Http2Session. This updates the values in the +// TypedRray so those can be read in JS land. void Http2Session::RefreshState(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); Http2Session* session; @@ -1664,6 +2370,7 @@ void Http2Session::RefreshState(const FunctionCallbackInfo& args) { } +// Constructor for new Http2Session instances. void Http2Session::New(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); CHECK(args.IsConstructCall()); @@ -1675,6 +2382,7 @@ void Http2Session::New(const FunctionCallbackInfo& args) { } +// Binds the Http2Session with a StreamBase used for i/o void Http2Session::Consume(const FunctionCallbackInfo& args) { Http2Session* session; ASSIGN_OR_RETURN_UNWRAP(&session, args.Holder()); @@ -1682,42 +2390,22 @@ void Http2Session::Consume(const FunctionCallbackInfo& args) { session->Consume(args[0].As()); } - +// Destroys the Http2Session instance and renders it unusable void Http2Session::Destroy(const FunctionCallbackInfo& args) { Http2Session* session; ASSIGN_OR_RETURN_UNWRAP(&session, args.Holder()); DEBUG_HTTP2SESSION(session, "destroying session"); - Environment* env = Environment::GetCurrent(args); Local context = env->context(); - bool skipUnconsume = args[0]->BooleanValue(context).ToChecked(); - - if (!skipUnconsume) - session->Unconsume(); - session->Close(); -} - - -void Http2Session::Destroying(const FunctionCallbackInfo& args) { - Http2Session* session; - ASSIGN_OR_RETURN_UNWRAP(&session, args.Holder()); - session->MarkDestroying(); - DEBUG_HTTP2SESSION(session, "preparing to destroy session"); -} - - -void Http2Session::Settings(const FunctionCallbackInfo& args) { - Http2Session* session; - ASSIGN_OR_RETURN_UNWRAP(&session, args.Holder()); - Environment* env = session->env(); + uint32_t code = args[0]->Uint32Value(context).ToChecked(); + bool socketDestroyed = args[1]->BooleanValue(context).ToChecked(); - Http2Settings settings(env); - session->Http2Session::Settings(*settings, settings.length()); - DEBUG_HTTP2SESSION(session, "settings submitted"); + session->Close(code, socketDestroyed); } - +// Submits a new request on the Http2Session and returns either an error code +// or the Http2Stream object. void Http2Session::Request(const FunctionCallbackInfo& args) { Http2Session* session; ASSIGN_OR_RETURN_UNWRAP(&session, args.Holder()); @@ -1749,48 +2437,50 @@ void Http2Session::Request(const FunctionCallbackInfo& args) { args.GetReturnValue().Set(stream->object()); } +// Submits a GOAWAY frame to signal that the Http2Session is in the process +// of shutting down. Note that this function does not actually alter the +// state of the Http2Session, it's simply a notification. +void Http2Session::Goaway(uint32_t code, + int32_t lastStreamID, + uint8_t* data, + size_t len) { + if (IsDestroyed()) + return; -void Http2Session::ShutdownNotice(const FunctionCallbackInfo& args) { - Http2Session* session; - ASSIGN_OR_RETURN_UNWRAP(&session, args.Holder()); - session->SubmitShutdownNotice(); - DEBUG_HTTP2SESSION(session, "shutdown notice sent"); + Http2Scope h2scope(this); + // the last proc stream id is the most recently created Http2Stream. + if (lastStreamID <= 0) + lastStreamID = nghttp2_session_get_last_proc_stream_id(session_); + DEBUG_HTTP2SESSION(this, "submitting goaway"); + nghttp2_submit_goaway(session_, NGHTTP2_FLAG_NONE, + lastStreamID, code, data, len); } - +// Submits a GOAWAY frame to signal that the Http2Session is in the process +// of shutting down. The opaque data argument is an optional TypedArray that +// can be used to send debugging data to the connected peer. void Http2Session::Goaway(const FunctionCallbackInfo& args) { - Http2Session* session; Environment* env = Environment::GetCurrent(args); Local context = env->context(); + Http2Session* session; ASSIGN_OR_RETURN_UNWRAP(&session, args.Holder()); - uint32_t errorCode = args[0]->Uint32Value(context).ToChecked(); + uint32_t code = args[0]->Uint32Value(context).ToChecked(); int32_t lastStreamID = args[1]->Int32Value(context).ToChecked(); Local opaqueData = args[2]; - uint8_t* data = nullptr; size_t length = 0; - if (opaqueData->BooleanValue(context).ToChecked()) { - THROW_AND_RETURN_UNLESS_BUFFER(env, opaqueData); - SPREAD_BUFFER_ARG(opaqueData, buf); - data = reinterpret_cast(buf_data); - length = buf_length; + if (Buffer::HasInstance(opaqueData)) { + data = reinterpret_cast(Buffer::Data(opaqueData)); + length = Buffer::Length(opaqueData); } - int status = nghttp2_submit_goaway(session->session(), - NGHTTP2_FLAG_NONE, - lastStreamID, - errorCode, - data, length); - CHECK_NE(status, NGHTTP2_ERR_NOMEM); - args.GetReturnValue().Set(status); - DEBUG_HTTP2SESSION2(session, "immediate shutdown initiated with " - "last stream id %d, code %d, and opaque-data length %d", - lastStreamID, errorCode, length); + session->Goaway(code, lastStreamID, data, length); } - +// Update accounting of data chunks. This is used primarily to manage timeout +// logic when using the FD Provider. void Http2Session::UpdateChunksSent(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); Isolate* isolate = env->isolate(); @@ -1807,18 +2497,21 @@ void Http2Session::UpdateChunksSent(const FunctionCallbackInfo& args) { args.GetReturnValue().Set(length); } - +// Submits an RST_STREAM frame effectively closing the Http2Stream. Note that +// this *WILL* alter the state of the stream, causing the OnStreamClose +// callback to the triggered. void Http2Stream::RstStream(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); Local context = env->context(); Http2Stream* stream; ASSIGN_OR_RETURN_UNWRAP(&stream, args.Holder()); uint32_t code = args[0]->Uint32Value(context).ToChecked(); - args.GetReturnValue().Set(stream->SubmitRstStream(code)); - DEBUG_HTTP2STREAM2(stream, "rst_stream code %d sent", code); + DEBUG_HTTP2STREAM2(stream, "sending rst_stream with code %d", code); + stream->SubmitRstStream(code); } - +// Initiates a response on the Http2Stream using the StreamBase API to provide +// outbound DATA frames. void Http2Stream::Respond(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); Local context = env->context(); @@ -1836,7 +2529,8 @@ void Http2Stream::Respond(const FunctionCallbackInfo& args) { DEBUG_HTTP2STREAM(stream, "response submitted"); } - +// Initiates a response on the Http2Stream using a file descriptor to provide +// outbound DATA frames. void Http2Stream::RespondFD(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); Local context = env->context(); @@ -1859,7 +2553,7 @@ void Http2Stream::RespondFD(const FunctionCallbackInfo& args) { DEBUG_HTTP2STREAM2(stream, "file response submitted for fd %d", fd); } - +// Submits informational headers on the Http2Stream void Http2Stream::Info(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); Local context = env->context(); @@ -1875,14 +2569,14 @@ void Http2Stream::Info(const FunctionCallbackInfo& args) { headers->Length()); } - +// Grab the numeric id of the Http2Stream void Http2Stream::GetID(const FunctionCallbackInfo& args) { Http2Stream* stream; ASSIGN_OR_RETURN_UNWRAP(&stream, args.Holder()); args.GetReturnValue().Set(stream->id()); } - +// Destroy the Http2Stream, rendering it no longer usable void Http2Stream::Destroy(const FunctionCallbackInfo& args) { Http2Stream* stream; ASSIGN_OR_RETURN_UNWRAP(&stream, args.Holder()); @@ -1890,7 +2584,7 @@ void Http2Stream::Destroy(const FunctionCallbackInfo& args) { stream->Destroy(); } - +// Prompt the Http2Stream to begin sending data to the JS land. void Http2Stream::FlushData(const FunctionCallbackInfo& args) { Http2Stream* stream; ASSIGN_OR_RETURN_UNWRAP(&stream, args.Holder()); @@ -1898,7 +2592,7 @@ void Http2Stream::FlushData(const FunctionCallbackInfo& args) { DEBUG_HTTP2STREAM(stream, "data flushed to js"); } - +// Initiate a Push Promise and create the associated Http2Stream void Http2Stream::PushPromise(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); Local context = env->context(); @@ -1924,7 +2618,7 @@ void Http2Stream::PushPromise(const FunctionCallbackInfo& args) { args.GetReturnValue().Set(stream->object()); } - +// Send a PRIORITY frame void Http2Stream::Priority(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); Local context = env->context(); @@ -1938,7 +2632,9 @@ void Http2Stream::Priority(const FunctionCallbackInfo& args) { DEBUG_HTTP2STREAM(stream, "priority submitted"); } - +// A TypedArray shared by C++ and JS land is used to communicate state +// information about the Http2Stream. This updates the values in that +// TypedArray so that the state can be read by JS. void Http2Stream::RefreshState(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); Http2Stream* stream; @@ -1975,11 +2671,52 @@ void Http2Stream::RefreshState(const FunctionCallbackInfo& args) { } } +void Http2Session::AltSvc(int32_t id, + uint8_t* origin, + size_t origin_len, + uint8_t* value, + size_t value_len) { + Http2Scope h2scope(this); + CHECK_EQ(nghttp2_submit_altsvc(session_, NGHTTP2_FLAG_NONE, id, + origin, origin_len, value, value_len), 0); +} + +// Submits an AltSvc frame to the sent to the connected peer. +void Http2Session::AltSvc(const FunctionCallbackInfo& args) { + Environment* env = Environment::GetCurrent(args); + Http2Session* session; + ASSIGN_OR_RETURN_UNWRAP(&session, args.Holder()); + + int32_t id = args[0]->Int32Value(env->context()).ToChecked(); + + // origin and value are both required to be ASCII, handle them as such. + Local origin_str = args[1]->ToString(env->context()).ToLocalChecked(); + Local value_str = args[2]->ToString(env->context()).ToLocalChecked(); + + size_t origin_len = origin_str->Length(); + size_t value_len = value_str->Length(); + + CHECK_LE(origin_len + value_len, 16382); // Max permitted for ALTSVC + // Verify that origin len != 0 if stream id == 0, or + // that origin len == 0 if stream id != 0 + CHECK((origin_len != 0 && id == 0) || (origin_len == 0 && id != 0)); + + MaybeStackBuffer origin(origin_len); + MaybeStackBuffer value(value_len); + origin_str->WriteOneByte(*origin); + value_str->WriteOneByte(*value); + + session->AltSvc(id, *origin, origin_len, *value, value_len); +} + +// Submits a PING frame to be sent to the connected peer. void Http2Session::Ping(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); Http2Session* session; ASSIGN_OR_RETURN_UNWRAP(&session, args.Holder()); + // A PING frame may have exactly 8 bytes of payload data. If not provided, + // then the current hrtime will be used as the payload. uint8_t* payload = nullptr; if (Buffer::HasInstance(args[0])) { payload = reinterpret_cast(Buffer::Data(args[0])); @@ -1990,20 +2727,48 @@ void Http2Session::Ping(const FunctionCallbackInfo& args) { Local obj = ping->object(); obj->Set(env->context(), env->ondone_string(), args[1]).FromJust(); + // To prevent abuse, we strictly limit the number of unacknowledged PING + // frames that may be sent at any given time. This is configurable in the + // Options when creating a Http2Session. if (!session->AddPing(ping)) { ping->Done(false); return args.GetReturnValue().Set(false); } + // The Ping itself is an Async resource. When the acknowledgement is received, + // the callback will be invoked and a notification sent out to JS land. The + // notification will include the duration of the ping, allowing the round + // trip to be measured. ping->Send(payload); args.GetReturnValue().Set(true); } +// Submits a SETTINGS frame for the Http2Session +void Http2Session::Settings(const FunctionCallbackInfo& args) { + Environment* env = Environment::GetCurrent(args); + Http2Session* session; + ASSIGN_OR_RETURN_UNWRAP(&session, args.Holder()); + + Http2Session::Http2Settings* settings = new Http2Settings(session); + Local obj = settings->object(); + obj->Set(env->context(), env->ondone_string(), args[0]).FromJust(); + + if (!session->AddSettings(settings)) { + settings->Done(false); + return args.GetReturnValue().Set(false); + } + + settings->Send(); + args.GetReturnValue().Set(true); +} + + Http2Session::Http2Ping* Http2Session::PopPing() { Http2Ping* ping = nullptr; if (!outstanding_pings_.empty()) { ping = outstanding_pings_.front(); outstanding_pings_.pop(); + DecrementCurrentSessionMemory(ping->self_size()); } return ping; } @@ -2012,6 +2777,25 @@ bool Http2Session::AddPing(Http2Session::Http2Ping* ping) { if (outstanding_pings_.size() == max_outstanding_pings_) return false; outstanding_pings_.push(ping); + IncrementCurrentSessionMemory(ping->self_size()); + return true; +} + +Http2Session::Http2Settings* Http2Session::PopSettings() { + Http2Settings* settings = nullptr; + if (!outstanding_settings_.empty()) { + settings = outstanding_settings_.front(); + outstanding_settings_.pop(); + DecrementCurrentSessionMemory(settings->self_size()); + } + return settings; +} + +bool Http2Session::AddSettings(Http2Session::Http2Settings* settings) { + if (outstanding_settings_.size() == max_outstanding_settings_) + return false; + outstanding_settings_.push(settings); + IncrementCurrentSessionMemory(settings->self_size()); return true; } @@ -2038,12 +2822,13 @@ void Http2Session::Http2Ping::Send(uint8_t* payload) { memcpy(&data, &startTime_, arraysize(data)); payload = data; } + Http2Scope h2scope(session_); CHECK_EQ(nghttp2_submit_ping(**session_, NGHTTP2_FLAG_NONE, payload), 0); } void Http2Session::Http2Ping::Done(bool ack, const uint8_t* payload) { - uint64_t end = uv_hrtime(); - double duration = (end - startTime_) / 1e6; + session_->statistics_.ping_rtt = (uv_hrtime() - startTime_); + double duration = (session_->statistics_.ping_rtt - startTime_) / 1e6; Local buf = Undefined(env()->isolate()); if (payload != nullptr) { @@ -2061,6 +2846,8 @@ void Http2Session::Http2Ping::Done(bool ack, const uint8_t* payload) { delete this; } + +// Set up the process.binding('http2') binding. void Initialize(Local target, Local unused, Local context, @@ -2089,6 +2876,10 @@ void Initialize(Local target, "settingsBuffer", state->settings_buffer.GetJSArray()); SET_STATE_TYPEDARRAY( "optionsBuffer", state->options_buffer.GetJSArray()); + SET_STATE_TYPEDARRAY( + "streamStats", state->stream_stats_buffer.GetJSArray()); + SET_STATE_TYPEDARRAY( + "sessionStats", state->session_stats_buffer.GetJSArray()); #undef SET_STATE_TYPEDARRAY env->set_http2_state(std::move(state)); @@ -2110,6 +2901,13 @@ void Initialize(Local target, pingt->SetInternalFieldCount(1); env->set_http2ping_constructor_template(pingt); + Local setting = FunctionTemplate::New(env->isolate()); + setting->SetClassName(FIXED_ONE_BYTE_STRING(env->isolate(), "Http2Setting")); + AsyncWrap::AddWrapMethods(env, setting); + Local settingt = setting->InstanceTemplate(); + settingt->SetInternalFieldCount(1); + env->set_http2settings_constructor_template(settingt); + Local stream = FunctionTemplate::New(env->isolate()); stream->SetClassName(FIXED_ONE_BYTE_STRING(env->isolate(), "Http2Stream")); env->SetProtoMethod(stream, "id", Http2Stream::GetID); @@ -2136,11 +2934,10 @@ void Initialize(Local target, session->SetClassName(http2SessionClassName); session->InstanceTemplate()->SetInternalFieldCount(1); AsyncWrap::AddWrapMethods(env, session); + env->SetProtoMethod(session, "altsvc", Http2Session::AltSvc); env->SetProtoMethod(session, "ping", Http2Session::Ping); env->SetProtoMethod(session, "consume", Http2Session::Consume); env->SetProtoMethod(session, "destroy", Http2Session::Destroy); - env->SetProtoMethod(session, "destroying", Http2Session::Destroying); - env->SetProtoMethod(session, "shutdownNotice", Http2Session::ShutdownNotice); env->SetProtoMethod(session, "goaway", Http2Session::Goaway); env->SetProtoMethod(session, "settings", Http2Session::Settings); env->SetProtoMethod(session, "request", Http2Session::Request); @@ -2223,6 +3020,7 @@ void Initialize(Local target, NODE_DEFINE_CONSTANT(constants, NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE); NODE_DEFINE_CONSTANT(constants, PADDING_STRATEGY_NONE); + NODE_DEFINE_CONSTANT(constants, PADDING_STRATEGY_ALIGNED); NODE_DEFINE_CONSTANT(constants, PADDING_STRATEGY_MAX); NODE_DEFINE_CONSTANT(constants, PADDING_STRATEGY_CALLBACK); diff --git a/src/node_http2.h b/src/node_http2.h index 429fbdcdf05e65..3deaf185996516 100644 --- a/src/node_http2.h +++ b/src/node_http2.h @@ -5,6 +5,7 @@ #include "nghttp2/nghttp2.h" #include "node_http2_state.h" +#include "node_perf.h" #include "stream_base-inl.h" #include "string_bytes.h" @@ -19,6 +20,8 @@ using v8::EscapableHandleScope; using v8::Isolate; using v8::MaybeLocal; +using performance::PerformanceEntry; + #ifdef NODE_DEBUG_HTTP2 // Adapted from nghttp2 own debug printer @@ -36,16 +39,20 @@ void inline debug_vfprintf(const char* format, ...) { #define DEBUG_HTTP2(...) debug_vfprintf(__VA_ARGS__); #define DEBUG_HTTP2SESSION(session, message) \ do { \ - DEBUG_HTTP2("Http2Session %s (%.0lf) " message "\n", \ - session->TypeName(), \ - session->get_async_id()); \ + if (session != nullptr) { \ + DEBUG_HTTP2("Http2Session %s (%.0lf) " message "\n", \ + session->TypeName(), \ + session->get_async_id()); \ + } \ } while (0) #define DEBUG_HTTP2SESSION2(session, message, ...) \ do { \ - DEBUG_HTTP2("Http2Session %s (%.0lf) " message "\n", \ - session->TypeName(), \ - session->get_async_id(), \ + if (session != nullptr) { \ + DEBUG_HTTP2("Http2Session %s (%.0lf) " message "\n", \ + session->TypeName(), \ + session->get_async_id(), \ __VA_ARGS__); \ + } \ } while (0) #define DEBUG_HTTP2STREAM(stream, message) \ do { \ @@ -70,7 +77,19 @@ void inline debug_vfprintf(const char* format, ...) { #define DEBUG_HTTP2STREAM2(...) do {} while (0) #endif +// We strictly limit the number of outstanding unacknowledged PINGS a user +// may send in order to prevent abuse. The current default cap is 10. The +// user may set a different limit using a per Http2Session configuration +// option. #define DEFAULT_MAX_PINGS 10 + +// Also strictly limit the number of outstanding SETTINGS frames a user sends +#define DEFAULT_MAX_SETTINGS 10 + +// Default maximum total memory cap for Http2Session. +#define DEFAULT_MAX_SESSION_MEMORY 1e7; + +// These are the standard HTTP/2 defaults as specified by the RFC #define DEFAULT_SETTINGS_HEADER_TABLE_SIZE 4096 #define DEFAULT_SETTINGS_ENABLE_PUSH 1 #define DEFAULT_SETTINGS_INITIAL_WINDOW_SIZE 65535 @@ -83,8 +102,6 @@ void inline debug_vfprintf(const char* format, ...) { #define MAX_MAX_HEADER_LIST_SIZE 16777215u #define DEFAULT_MAX_HEADER_LIST_PAIRS 128u -struct nghttp2_stream_write_t; - #define MAX_BUFFER_COUNT 16 enum nghttp2_session_type { @@ -109,24 +126,25 @@ enum nghttp2_stream_flags { // Stream is destroyed NGHTTP2_STREAM_FLAG_DESTROYED = 0x10, // Stream has trailers - NGHTTP2_STREAM_FLAG_TRAILERS = 0x20 + NGHTTP2_STREAM_FLAG_TRAILERS = 0x20, + // Stream has received all the data it can + NGHTTP2_STREAM_FLAG_EOS = 0x40 }; enum nghttp2_stream_options { + // Stream is not going to have any DATA frames STREAM_OPTION_EMPTY_PAYLOAD = 0x1, + // Stream might have trailing headers STREAM_OPTION_GET_TRAILERS = 0x2, }; -// Callbacks -typedef void (*nghttp2_stream_write_cb)( - nghttp2_stream_write_t* req, - int status); - struct nghttp2_stream_write { - unsigned int nbufs = 0; - nghttp2_stream_write_t* req = nullptr; - nghttp2_stream_write_cb cb = nullptr; - MaybeStackBuffer bufs; + WriteWrap* req_wrap = nullptr; + uv_buf_t buf; + + inline explicit nghttp2_stream_write(uv_buf_t buf_) : buf(buf_) {} + inline nghttp2_stream_write(WriteWrap* req, uv_buf_t buf_) : + req_wrap(req), buf(buf_) {} }; struct nghttp2_header { @@ -136,12 +154,6 @@ struct nghttp2_header { }; - -struct nghttp2_stream_write_t { - void* data; - int status; -}; - // Unlike the HTTP/1 implementation, the HTTP/2 implementation is not limited // to a fixed number of known supported HTTP methods. These constants, therefore // are provided strictly as a convenience to users and are exposed via the @@ -353,6 +365,8 @@ HTTP_STATUS_CODES(V) enum padding_strategy_type { // No padding strategy. This is the default. PADDING_STRATEGY_NONE, + // Attempts to ensure that the frame is 8-byte aligned + PADDING_STRATEGY_ALIGNED, // Padding will ensure all data frames are maxFrameSize PADDING_STRATEGY_MAX, // Padding will be determined via a JS callback. Note that this can be @@ -416,7 +430,10 @@ const char* nghttp2_errname(int rv) { enum session_state_flags { SESSION_STATE_NONE = 0x0, - SESSION_STATE_DESTROYING = 0x1 + SESSION_STATE_HAS_SCOPE = 0x1, + SESSION_STATE_WRITE_SCHEDULED = 0x2, + SESSION_STATE_CLOSED = 0x4, + SESSION_STATE_SENDING = 0x8, }; // This allows for 4 default-sized frames with their frame headers @@ -428,6 +445,20 @@ typedef uint32_t(*get_setting)(nghttp2_session* session, class Http2Session; class Http2Stream; +// This scope should be present when any call into nghttp2 that may schedule +// data to be written to the underlying transport is made, and schedules +// such a write automatically once the scope is exited. +class Http2Scope { + public: + explicit Http2Scope(Http2Stream* stream); + explicit Http2Scope(Http2Session* session); + ~Http2Scope(); + + private: + Http2Session* session_ = nullptr; + Local session_handle_; +}; + // The Http2Options class is used to parse the options object passed in to // a Http2Session object and convert those into an appropriate nghttp2_option // struct. This is the primary mechanism by which the Http2Session object is @@ -468,41 +499,29 @@ class Http2Options { return max_outstanding_pings_; } - private: - nghttp2_option* options_; - uint32_t max_header_pairs_ = DEFAULT_MAX_HEADER_LIST_PAIRS; - padding_strategy_type padding_strategy_ = PADDING_STRATEGY_NONE; - size_t max_outstanding_pings_ = DEFAULT_MAX_PINGS; -}; - -// The Http2Settings class is used to parse the settings passed in for -// an Http2Session, converting those into an array of nghttp2_settings_entry -// structs. -class Http2Settings { - public: - explicit Http2Settings(Environment* env); - - size_t length() const { return count_; } - - nghttp2_settings_entry* operator*() { - return *entries_; + void SetMaxOutstandingSettings(size_t max) { + max_outstanding_settings_ = max; } - // Returns a Buffer instance with the serialized SETTINGS payload - inline Local Pack(); + size_t GetMaxOutstandingSettings() { + return max_outstanding_settings_; + } - // Resets the default values in the settings buffer - static inline void RefreshDefaults(Environment* env); + void SetMaxSessionMemory(uint64_t max) { + max_session_memory_ = max; + } - // Update the local or remote settings for the given session - static inline void Update(Environment* env, - Http2Session* session, - get_setting fn); + uint64_t GetMaxSessionMemory() { + return max_session_memory_; + } private: - Environment* env_; - size_t count_ = 0; - MaybeStackBuffer entries_; + nghttp2_option* options_; + uint64_t max_session_memory_ = DEFAULT_MAX_SESSION_MEMORY; + uint32_t max_header_pairs_ = DEFAULT_MAX_HEADER_LIST_PAIRS; + padding_strategy_type padding_strategy_ = PADDING_STRATEGY_NONE; + size_t max_outstanding_pings_ = DEFAULT_MAX_PINGS; + size_t max_outstanding_settings_ = DEFAULT_MAX_SETTINGS; }; class Http2Priority { @@ -532,16 +551,7 @@ class Http2Stream : public AsyncWrap, Http2Session* session() { return session_; } - // Queue outbound chunks of data to be sent on this stream - inline int Write( - nghttp2_stream_write_t* req, - const uv_buf_t bufs[], - unsigned int nbufs, - nghttp2_stream_write_cb cb); - - inline void AddChunk(const uint8_t* data, size_t len); - - inline void FlushDataChunks(); + inline void EmitStatistics(); // Process a Data Chunk void OnDataChunk(uv_buf_t* chunk); @@ -576,7 +586,7 @@ class Http2Stream : public AsyncWrap, bool silent = false); // Submits an RST_STREAM frame using the given code - inline int SubmitRstStream(const uint32_t code); + inline void SubmitRstStream(const uint32_t code); // Submits a PUSH_PROMISE frame with this stream as the parent. inline Http2Stream* SubmitPushPromise( @@ -628,6 +638,9 @@ class Http2Stream : public AsyncWrap, // Returns the stream identifier for this stream inline int32_t id() const { return id_; } + inline void IncrementAvailableOutboundLength(size_t amount); + inline void DecrementAvailableOutboundLength(size_t amount); + inline bool AddHeader(nghttp2_rcbuf* name, nghttp2_rcbuf* value, uint8_t flags); @@ -697,6 +710,18 @@ class Http2Stream : public AsyncWrap, class Provider; + struct Statistics { + uint64_t start_time; + uint64_t end_time; + uint64_t first_header; // Time first header was received + uint64_t first_byte; // Time first DATA frame byte was received + uint64_t first_byte_sent; // Time first DATA frame byte was sent + uint64_t sent_bytes; + uint64_t received_bytes; + }; + + Statistics statistics_ = {}; + private: Http2Session* session_; // The Parent HTTP/2 Session int32_t id_; // The Stream Identifier @@ -713,16 +738,20 @@ class Http2Stream : public AsyncWrap, uint32_t current_headers_length_ = 0; // total number of octets std::vector current_headers_; - // Inbound Data... This is the data received via DATA frames for this stream. - std::queue data_chunks_; + // This keeps track of the amount of data read from the socket while the + // socket was in paused mode. When `ReadStart()` is called (and not before + // then), we tell nghttp2 that we consumed that data to get proper + // backpressure handling. + size_t inbound_consumed_data_while_paused_ = 0; // Outbound Data... This is the data written by the JS layer that is // waiting to be written out to the socket. - std::queue queue_; - unsigned int queue_index_ = 0; - size_t queue_offset_ = 0; + std::queue queue_; + size_t available_outbound_length_ = 0; int64_t fd_offset_ = 0; int64_t fd_length_ = -1; + + friend class Http2Session; }; class Http2Stream::Provider { @@ -781,12 +810,22 @@ class Http2Session : public AsyncWrap { ~Http2Session() override; class Http2Ping; + class Http2Settings; + + inline void EmitStatistics(); void Start(); void Stop(); - void Close(); + void Close(uint32_t code = NGHTTP2_NO_ERROR, + bool socket_closed = false); void Consume(Local external); void Unconsume(); + void Goaway(uint32_t code, int32_t lastStreamID, uint8_t* data, size_t len); + void AltSvc(int32_t id, + uint8_t* origin, + size_t origin_len, + uint8_t* value, + size_t value_len); bool Ping(v8::Local function); @@ -812,23 +851,26 @@ class Http2Session : public AsyncWrap { inline const char* TypeName(); - inline void MarkDestroying() { flags_ |= SESSION_STATE_DESTROYING; } - inline bool IsDestroying() { return flags_ & SESSION_STATE_DESTROYING; } + inline bool IsDestroyed() { + return (flags_ & SESSION_STATE_CLOSED) || session_ == nullptr; + } + + // Schedule a write if nghttp2 indicates it wants to write to the socket. + void MaybeScheduleWrite(); // Returns pointer to the stream, or nullptr if stream does not exist inline Http2Stream* FindStream(int32_t id); + inline bool CanAddStream(); + // Adds a stream instance to this session inline void AddStream(Http2Stream* stream); // Removes a stream instance from this session - inline void RemoveStream(int32_t id); - - // Sends a notice to the connected peer that the session is shutting down. - inline void SubmitShutdownNotice(); + inline void RemoveStream(Http2Stream* stream); - // Submits a SETTINGS frame to the connected peer. - inline void Settings(const nghttp2_settings_entry iv[], size_t niv); + // Indicates whether there currently exist outgoing buffers for this stream. + bool HasWritesOnSocketForStream(Http2Stream* stream); // Write data to the session inline ssize_t Write(const uv_buf_t* bufs, size_t nbufs); @@ -850,6 +892,8 @@ class Http2Session : public AsyncWrap { const uv_buf_t* bufs, uv_handle_type pending, void* ctx); + static void OnStreamAfterWriteImpl(WriteWrap* w, int status, void* ctx); + static void OnStreamDestructImpl(void* ctx); // The JavaScript API static void New(const FunctionCallbackInfo& args); @@ -860,11 +904,11 @@ class Http2Session : public AsyncWrap { static void Settings(const FunctionCallbackInfo& args); static void Request(const FunctionCallbackInfo& args); static void SetNextStreamID(const FunctionCallbackInfo& args); - static void ShutdownNotice(const FunctionCallbackInfo& args); static void Goaway(const FunctionCallbackInfo& args); static void UpdateChunksSent(const FunctionCallbackInfo& args); static void RefreshState(const FunctionCallbackInfo& args); static void Ping(const FunctionCallbackInfo& args); + static void AltSvc(const FunctionCallbackInfo& args); template static void RefreshSettings(const FunctionCallbackInfo& args); @@ -872,7 +916,6 @@ class Http2Session : public AsyncWrap { template static void GetSettings(const FunctionCallbackInfo& args); - void Send(WriteWrap* req, char* buf, size_t length); WriteWrap* AllocateSend(); uv_loop_t* event_loop() const { @@ -882,8 +925,52 @@ class Http2Session : public AsyncWrap { Http2Ping* PopPing(); bool AddPing(Http2Ping* ping); + Http2Settings* PopSettings(); + bool AddSettings(Http2Settings* settings); + + void IncrementCurrentSessionMemory(uint64_t amount) { + current_session_memory_ += amount; + } + + void DecrementCurrentSessionMemory(uint64_t amount) { + current_session_memory_ -= amount; + } + + // Returns the current session memory including the current size of both + // the inflate and deflate hpack headers, the current outbound storage + // queue, and pending writes. + uint64_t GetCurrentSessionMemory() { + uint64_t total = current_session_memory_ + sizeof(Http2Session); + total += nghttp2_session_get_hd_deflate_dynamic_table_size(session_); + total += nghttp2_session_get_hd_inflate_dynamic_table_size(session_); + total += outgoing_storage_.size(); + return total; + } + + // Return true if current_session_memory + amount is less than the max + bool IsAvailableSessionMemory(uint64_t amount) { + return GetCurrentSessionMemory() + amount <= max_session_memory_; + } + + struct Statistics { + uint64_t start_time; + uint64_t end_time; + uint64_t ping_rtt; + uint64_t data_sent; + uint64_t data_received; + uint32_t frame_count; + uint32_t frame_sent; + int32_t stream_count; + size_t max_concurrent_streams; + double stream_average_duration; + }; + + Statistics statistics_ = {}; + private: // Frame Padding Strategies + inline ssize_t OnDWordAlignedPadding(size_t frameLength, + size_t maxPayloadLen); inline ssize_t OnMaxFrameSizePadding(size_t frameLength, size_t maxPayloadLen); inline ssize_t OnCallbackPadding(size_t frame, @@ -896,6 +983,7 @@ class Http2Session : public AsyncWrap { inline void HandlePriorityFrame(const nghttp2_frame* frame); inline void HandleSettingsFrame(const nghttp2_frame* frame); inline void HandlePingFrame(const nghttp2_frame* frame); + inline void HandleAltSvcFrame(const nghttp2_frame* frame); // nghttp2 callbacks static inline int OnBeginHeadersCallback( @@ -918,6 +1006,10 @@ class Http2Session : public AsyncWrap { const nghttp2_frame* frame, int error_code, void* user_data); + static inline int OnFrameSent( + nghttp2_session* session, + const nghttp2_frame* frame, + void* user_data); static inline int OnStreamClose( nghttp2_session* session, int32_t id, @@ -947,23 +1039,17 @@ class Http2Session : public AsyncWrap { const char* message, size_t len, void* user_data); - - - static inline ssize_t OnStreamReadFD( + static inline int OnSendData( nghttp2_session* session, - int32_t id, - uint8_t* buf, + nghttp2_frame* frame, + const uint8_t* framehd, size_t length, - uint32_t* flags, nghttp2_data_source* source, void* user_data); - static inline ssize_t OnStreamRead( + static inline int OnInvalidFrame( nghttp2_session* session, - int32_t id, - uint8_t* buf, - size_t length, - uint32_t* flags, - nghttp2_data_source* source, + const nghttp2_frame *frame, + int lib_error_code, void* user_data); struct Callbacks { @@ -985,6 +1071,10 @@ class Http2Session : public AsyncWrap { // The maximum number of header pairs permitted for streams on this session uint32_t max_header_pairs_ = DEFAULT_MAX_HEADER_LIST_PAIRS; + // The maximum amount of memory allocated for this session + uint64_t max_session_memory_ = DEFAULT_MAX_SESSION_MEMORY; + uint64_t current_session_memory_ = 0; + // The collection of active Http2Streams associated with this session std::unordered_map streams_; @@ -999,11 +1089,104 @@ class Http2Session : public AsyncWrap { // use this to allow timeout tracking during long-lasting writes uint32_t chunks_sent_since_last_write_ = 0; - uv_prepare_t* prep_ = nullptr; - char stream_buf_[kAllocBufferSize]; + char* stream_buf_ = nullptr; + size_t stream_buf_size_ = 0; + v8::Local stream_buf_ab_; size_t max_outstanding_pings_ = DEFAULT_MAX_PINGS; std::queue outstanding_pings_; + + size_t max_outstanding_settings_ = DEFAULT_MAX_SETTINGS; + std::queue outstanding_settings_; + + std::vector outgoing_buffers_; + std::vector outgoing_storage_; + + void CopyDataIntoOutgoing(const uint8_t* src, size_t src_length); + void ClearOutgoing(int status); + + friend class Http2Scope; +}; + +class Http2SessionPerformanceEntry : public PerformanceEntry { + public: + Http2SessionPerformanceEntry( + Environment* env, + const Http2Session::Statistics& stats, + nghttp2_session_type type) : + PerformanceEntry(env, "Http2Session", "http2", + stats.start_time, + stats.end_time), + ping_rtt_(stats.ping_rtt), + data_sent_(stats.data_sent), + data_received_(stats.data_received), + frame_count_(stats.frame_count), + frame_sent_(stats.frame_sent), + stream_count_(stats.stream_count), + max_concurrent_streams_(stats.max_concurrent_streams), + stream_average_duration_(stats.stream_average_duration), + session_type_(type) { } + + uint64_t ping_rtt() const { return ping_rtt_; } + uint64_t data_sent() const { return data_sent_; } + uint64_t data_received() const { return data_received_; } + uint32_t frame_count() const { return frame_count_; } + uint32_t frame_sent() const { return frame_sent_; } + int32_t stream_count() const { return stream_count_; } + size_t max_concurrent_streams() const { return max_concurrent_streams_; } + double stream_average_duration() const { return stream_average_duration_; } + nghttp2_session_type type() const { return session_type_; } + + void Notify(Local obj) { + PerformanceEntry::Notify(env(), kind(), obj); + } + + private: + uint64_t ping_rtt_; + uint64_t data_sent_; + uint64_t data_received_; + uint32_t frame_count_; + uint32_t frame_sent_; + int32_t stream_count_; + size_t max_concurrent_streams_; + double stream_average_duration_; + nghttp2_session_type session_type_; +}; + +class Http2StreamPerformanceEntry : public PerformanceEntry { + public: + Http2StreamPerformanceEntry( + Environment* env, + int32_t id, + const Http2Stream::Statistics& stats) : + PerformanceEntry(env, "Http2Stream", "http2", + stats.start_time, + stats.end_time), + id_(id), + first_header_(stats.first_header), + first_byte_(stats.first_byte), + first_byte_sent_(stats.first_byte_sent), + sent_bytes_(stats.sent_bytes), + received_bytes_(stats.received_bytes) { } + + int32_t id() const { return id_; } + uint64_t first_header() const { return first_header_; } + uint64_t first_byte() const { return first_byte_; } + uint64_t first_byte_sent() const { return first_byte_sent_; } + uint64_t sent_bytes() const { return sent_bytes_; } + uint64_t received_bytes() const { return received_bytes_; } + + void Notify(Local obj) { + PerformanceEntry::Notify(env(), kind(), obj); + } + + private: + int32_t id_; + uint64_t first_header_; + uint64_t first_byte_; + uint64_t first_byte_sent_; + uint64_t sent_bytes_; + uint64_t received_bytes_; }; class Http2Session::Http2Ping : public AsyncWrap { @@ -1019,6 +1202,47 @@ class Http2Session::Http2Ping : public AsyncWrap { private: Http2Session* session_; uint64_t startTime_; + + friend class Http2Session; +}; + +// The Http2Settings class is used to parse the settings passed in for +// an Http2Session, converting those into an array of nghttp2_settings_entry +// structs. +class Http2Session::Http2Settings : public AsyncWrap { + public: + explicit Http2Settings(Environment* env); + explicit Http2Settings(Http2Session* session); + ~Http2Settings(); + + size_t self_size() const override { return sizeof(*this); } + + void Send(); + void Done(bool ack); + + size_t length() const { return count_; } + + nghttp2_settings_entry* operator*() { + return *entries_; + } + + // Returns a Buffer instance with the serialized SETTINGS payload + inline Local Pack(); + + // Resets the default values in the settings buffer + static inline void RefreshDefaults(Environment* env); + + // Update the local or remote settings for the given session + static inline void Update(Environment* env, + Http2Session* session, + get_setting fn); + + private: + void Init(); + Http2Session* session_; + uint64_t startTime_; + size_t count_ = 0; + MaybeStackBuffer entries_; }; class ExternalHeader : diff --git a/src/node_http2_state.h b/src/node_http2_state.h index a7ad23fb519886..ed88f068a04b16 100644 --- a/src/node_http2_state.h +++ b/src/node_http2_state.h @@ -49,6 +49,8 @@ namespace http2 { IDX_OPTIONS_PADDING_STRATEGY, IDX_OPTIONS_MAX_HEADER_LIST_PAIRS, IDX_OPTIONS_MAX_OUTSTANDING_PINGS, + IDX_OPTIONS_MAX_OUTSTANDING_SETTINGS, + IDX_OPTIONS_MAX_SESSION_MEMORY, IDX_OPTIONS_FLAGS }; @@ -59,6 +61,29 @@ namespace http2 { PADDING_BUF_FIELD_COUNT }; + enum Http2StreamStatisticsIndex { + IDX_STREAM_STATS_ID, + IDX_STREAM_STATS_TIMETOFIRSTBYTE, + IDX_STREAM_STATS_TIMETOFIRSTHEADER, + IDX_STREAM_STATS_TIMETOFIRSTBYTESENT, + IDX_STREAM_STATS_SENTBYTES, + IDX_STREAM_STATS_RECEIVEDBYTES, + IDX_STREAM_STATS_COUNT + }; + + enum Http2SessionStatisticsIndex { + IDX_SESSION_STATS_TYPE, + IDX_SESSION_STATS_PINGRTT, + IDX_SESSION_STATS_FRAMESRECEIVED, + IDX_SESSION_STATS_FRAMESSENT, + IDX_SESSION_STATS_STREAMCOUNT, + IDX_SESSION_STATS_STREAMAVERAGEDURATION, + IDX_SESSION_STATS_DATA_SENT, + IDX_SESSION_STATS_DATA_RECEIVED, + IDX_SESSION_STATS_MAX_CONCURRENT_STREAMS, + IDX_SESSION_STATS_COUNT + }; + class http2_state { public: explicit http2_state(v8::Isolate* isolate) : @@ -75,6 +100,16 @@ class http2_state { offsetof(http2_state_internal, stream_state_buffer), IDX_STREAM_STATE_COUNT, root_buffer), + stream_stats_buffer( + isolate, + offsetof(http2_state_internal, stream_stats_buffer), + IDX_STREAM_STATS_COUNT, + root_buffer), + session_stats_buffer( + isolate, + offsetof(http2_state_internal, session_stats_buffer), + IDX_SESSION_STATS_COUNT, + root_buffer), padding_buffer( isolate, offsetof(http2_state_internal, padding_buffer), @@ -95,6 +130,8 @@ class http2_state { AliasedBuffer root_buffer; AliasedBuffer session_state_buffer; AliasedBuffer stream_state_buffer; + AliasedBuffer stream_stats_buffer; + AliasedBuffer session_stats_buffer; AliasedBuffer padding_buffer; AliasedBuffer options_buffer; AliasedBuffer settings_buffer; @@ -104,6 +141,8 @@ class http2_state { // doubles first so that they are always sizeof(double)-aligned double session_state_buffer[IDX_SESSION_STATE_COUNT]; double stream_state_buffer[IDX_STREAM_STATE_COUNT]; + double stream_stats_buffer[IDX_STREAM_STATS_COUNT]; + double session_stats_buffer[IDX_SESSION_STATS_COUNT]; uint32_t padding_buffer[PADDING_BUF_FIELD_COUNT]; uint32_t options_buffer[IDX_OPTIONS_FLAGS + 1]; uint32_t settings_buffer[IDX_SETTINGS_COUNT + 1]; diff --git a/src/node_internals.h b/src/node_internals.h index 8f3fb4fb9aa27d..b2cef8c9abcad5 100644 --- a/src/node_internals.h +++ b/src/node_internals.h @@ -218,6 +218,11 @@ void GetSockOrPeerName(const v8::FunctionCallbackInfo& args) { args.GetReturnValue().Set(err); } +void FatalException(v8::Isolate* isolate, + v8::Local error, + v8::Local message); + + void SignalExit(int signo); #ifdef __POSIX__ void RegisterSignalHandler(int signal, @@ -325,6 +330,19 @@ v8::MaybeLocal New(Environment* env, // Mixing operator new and free() is undefined behavior so don't do that. v8::MaybeLocal New(Environment* env, char* data, size_t length); +inline +v8::MaybeLocal New(Environment* env, + v8::Local ab, + size_t byte_offset, + size_t length) { + v8::Local ui = v8::Uint8Array::New(ab, byte_offset, length); + v8::Maybe mb = + ui->SetPrototype(env->context(), env->buffer_prototype_object()); + if (mb.IsNothing()) + return v8::MaybeLocal(); + return ui; +} + // Construct a Buffer from a MaybeStackBuffer (and also its subclasses like // Utf8Value and TwoByteValue). // If |buf| is invalidated, an empty MaybeLocal is returned, and nothing is diff --git a/src/node_perf.cc b/src/node_perf.cc index 02145eeffdba12..d37f8ad002ec22 100644 --- a/src/node_perf.cc +++ b/src/node_perf.cc @@ -17,9 +17,8 @@ using v8::Integer; using v8::Isolate; using v8::Local; using v8::Name; +using v8::Number; using v8::Object; -using v8::ObjectTemplate; -using v8::PropertyCallbackInfo; using v8::String; using v8::Value; @@ -30,37 +29,78 @@ uint64_t performance_v8_start; uint64_t performance_last_gc_start_mark_ = 0; v8::GCType performance_last_gc_type_ = v8::GCType::kGCTypeAll; +// Initialize the performance entry object properties +inline void InitObject(const PerformanceEntry& entry, Local obj) { + Environment* env = entry.env(); + Isolate* isolate = env->isolate(); + Local context = env->context(); + v8::PropertyAttribute attr = + static_cast(v8::ReadOnly | v8::DontDelete); + obj->DefineOwnProperty(context, + env->name_string(), + String::NewFromUtf8(isolate, + entry.name().c_str(), + String::kNormalString), + attr).FromJust(); + obj->DefineOwnProperty(context, + FIXED_ONE_BYTE_STRING(isolate, "entryType"), + String::NewFromUtf8(isolate, + entry.type().c_str(), + String::kNormalString), + attr).FromJust(); + obj->DefineOwnProperty(context, + FIXED_ONE_BYTE_STRING(isolate, "startTime"), + Number::New(isolate, entry.startTime()), + attr).FromJust(); + obj->DefineOwnProperty(context, + FIXED_ONE_BYTE_STRING(isolate, "duration"), + Number::New(isolate, entry.duration()), + attr).FromJust(); +} + +// Create a new PerformanceEntry object +const Local PerformanceEntry::ToObject() const { + Local obj = + env_->performance_entry_template() + ->NewInstance(env_->context()).ToLocalChecked(); + InitObject(*this, obj); + return obj; +} + +// Allow creating a PerformanceEntry object from JavaScript void PerformanceEntry::New(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); Isolate* isolate = env->isolate(); Utf8Value name(isolate, args[0]); Utf8Value type(isolate, args[1]); uint64_t now = PERFORMANCE_NOW(); - new PerformanceEntry(env, args.This(), *name, *type, now, now); + PerformanceEntry entry(env, *name, *type, now, now); + Local obj = args.This(); + InitObject(entry, obj); + PerformanceEntry::Notify(env, entry.kind(), obj); } -void PerformanceEntry::NotifyObservers(Environment* env, - PerformanceEntry* entry) { +// Pass the PerformanceEntry object to the PerformanceObservers +void PerformanceEntry::Notify(Environment* env, + PerformanceEntryType type, + Local object) { + Context::Scope scope(env->context()); AliasedBuffer& observers = env->performance_state()->observers; - PerformanceEntryType type = ToPerformanceEntryTypeEnum(entry->type().c_str()); - if (type == NODE_PERFORMANCE_ENTRY_TYPE_INVALID || - !observers[type]) { - return; + if (type != NODE_PERFORMANCE_ENTRY_TYPE_INVALID && + observers[type]) { + node::MakeCallback(env->isolate(), + env->process_object(), + env->performance_entry_callback(), + 1, &object); } - Local context = env->context(); - Isolate* isolate = env->isolate(); - Local argv = entry->object(); - env->performance_entry_callback()->Call(context, - v8::Undefined(isolate), - 1, &argv).ToLocalChecked(); } +// Create a User Timing Mark void Mark(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); - Local context = env->context(); - Isolate* isolate = env->isolate(); - Utf8Value name(isolate, args[0]); + HandleScope scope(env->isolate()); + Utf8Value name(env->isolate(), args[0]); uint64_t now = PERFORMANCE_NOW(); auto marks = env->performance_marks(); (*marks)[*name] = now; @@ -68,25 +108,27 @@ void Mark(const FunctionCallbackInfo& args) { // TODO(jasnell): Once Tracing API is fully implemented, this should // record a trace event also. - Local fn = env->performance_entry_template(); - Local obj = fn->NewInstance(context).ToLocalChecked(); - new PerformanceEntry(env, obj, *name, "mark", now, now); + PerformanceEntry entry(env, *name, "mark", now, now); + Local obj = entry.ToObject(); + PerformanceEntry::Notify(env, entry.kind(), obj); args.GetReturnValue().Set(obj); } + inline uint64_t GetPerformanceMark(Environment* env, std::string name) { auto marks = env->performance_marks(); auto res = marks->find(name); return res != marks->end() ? res->second : 0; } +// Create a User Timing Measure. A Measure is a PerformanceEntry that +// measures the duration between two distinct user timing marks void Measure(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); - Local context = env->context(); - Isolate* isolate = env->isolate(); - Utf8Value name(isolate, args[0]); - Utf8Value startMark(isolate, args[1]); - Utf8Value endMark(isolate, args[2]); + HandleScope scope(env->isolate()); + Utf8Value name(env->isolate(), args[0]); + Utf8Value startMark(env->isolate(), args[1]); + Utf8Value endMark(env->isolate(), args[2]); AliasedBuffer& milestones = env->performance_state()->milestones; @@ -114,45 +156,13 @@ void Measure(const FunctionCallbackInfo& args) { // TODO(jasnell): Once Tracing API is fully implemented, this should // record a trace event also. - Local fn = env->performance_entry_template(); - Local obj = fn->NewInstance(context).ToLocalChecked(); - new PerformanceEntry(env, obj, *name, "measure", - startTimestamp, endTimestamp); + PerformanceEntry entry(env, *name, "measure", startTimestamp, endTimestamp); + Local obj = entry.ToObject(); + PerformanceEntry::Notify(env, entry.kind(), obj); args.GetReturnValue().Set(obj); } -void GetPerformanceEntryName(const Local prop, - const PropertyCallbackInfo& info) { - Isolate* isolate = info.GetIsolate(); - PerformanceEntry* entry; - ASSIGN_OR_RETURN_UNWRAP(&entry, info.Holder()); - info.GetReturnValue().Set( - String::NewFromUtf8(isolate, entry->name().c_str(), String::kNormalString)); -} - -void GetPerformanceEntryType(const Local prop, - const PropertyCallbackInfo& info) { - Isolate* isolate = info.GetIsolate(); - PerformanceEntry* entry; - ASSIGN_OR_RETURN_UNWRAP(&entry, info.Holder()); - info.GetReturnValue().Set( - String::NewFromUtf8(isolate, entry->type().c_str(), String::kNormalString)); -} - -void GetPerformanceEntryStartTime(const Local prop, - const PropertyCallbackInfo& info) { - PerformanceEntry* entry; - ASSIGN_OR_RETURN_UNWRAP(&entry, info.Holder()); - info.GetReturnValue().Set(entry->startTime()); -} - -void GetPerformanceEntryDuration(const Local prop, - const PropertyCallbackInfo& info) { - PerformanceEntry* entry; - ASSIGN_OR_RETURN_UNWRAP(&entry, info.Holder()); - info.GetReturnValue().Set(entry->duration()); -} - +// Allows specific Node.js lifecycle milestones to be set from JavaScript void MarkMilestone(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); Local context = env->context(); @@ -166,45 +176,36 @@ void MarkMilestone(const FunctionCallbackInfo& args) { } } + void SetupPerformanceObservers(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); CHECK(args[0]->IsFunction()); env->set_performance_entry_callback(args[0].As()); } -void PerformanceGCCallback(uv_async_t* handle) { - PerformanceEntry::Data* data = - static_cast(handle->data); - Environment* env = data->env(); - Isolate* isolate = env->isolate(); - HandleScope scope(isolate); +// Creates a GC Performance Entry and passes it to observers +void PerformanceGCCallback(Environment* env, void* ptr) { + GCPerformanceEntry* entry = static_cast(ptr); + HandleScope scope(env->isolate()); Local context = env->context(); - Context::Scope context_scope(context); - Local fn; - Local obj; - PerformanceGCKind kind = static_cast(data->data()); AliasedBuffer& observers = env->performance_state()->observers; - if (!observers[NODE_PERFORMANCE_ENTRY_TYPE_GC]) { - goto cleanup; + if (observers[NODE_PERFORMANCE_ENTRY_TYPE_GC]) { + Local obj = entry->ToObject(); + v8::PropertyAttribute attr = + static_cast(v8::ReadOnly | v8::DontDelete); + obj->DefineOwnProperty(context, + FIXED_ONE_BYTE_STRING(env->isolate(), "kind"), + Integer::New(env->isolate(), entry->gckind()), + attr).FromJust(); + PerformanceEntry::Notify(env, entry->kind(), obj); } - fn = env->performance_entry_template(); - obj = fn->NewInstance(context).ToLocalChecked(); - obj->Set(context, - FIXED_ONE_BYTE_STRING(isolate, "kind"), - Integer::New(isolate, kind)).FromJust(); - new PerformanceEntry(env, obj, data); - - cleanup: - delete data; - auto closeCB = [](uv_handle_t* handle) { - delete reinterpret_cast(handle); - }; - uv_close(reinterpret_cast(handle), closeCB); + delete entry; } +// Marks the start of a GC cycle void MarkGarbageCollectionStart(Isolate* isolate, v8::GCType type, v8::GCCallbackFlags flags) { @@ -212,28 +213,27 @@ void MarkGarbageCollectionStart(Isolate* isolate, performance_last_gc_type_ = type; } +// Marks the end of a GC cycle void MarkGarbageCollectionEnd(Isolate* isolate, v8::GCType type, v8::GCCallbackFlags flags, void* data) { Environment* env = static_cast(data); - uv_async_t* async = new uv_async_t(); // coverity[leaked_storage] - if (uv_async_init(env->event_loop(), async, PerformanceGCCallback)) - return delete async; - uv_unref(reinterpret_cast(async)); - async->data = - new PerformanceEntry::Data(env, "gc", "gc", - performance_last_gc_start_mark_, - PERFORMANCE_NOW(), type); - CHECK_EQ(0, uv_async_send(async)); + env->SetImmediate(PerformanceGCCallback, + new GCPerformanceEntry(env, + static_cast(type), + performance_last_gc_start_mark_, + PERFORMANCE_NOW())); } + inline void SetupGarbageCollectionTracking(Environment* env) { env->isolate()->AddGCPrologueCallback(MarkGarbageCollectionStart); env->isolate()->AddGCEpilogueCallback(MarkGarbageCollectionEnd, static_cast(env)); } +// Gets the name of a function inline Local GetName(Local fn) { Local val = fn->GetDebugName(); if (val.IsEmpty() || val->IsUndefined()) { @@ -245,6 +245,9 @@ inline Local GetName(Local fn) { return val; } +// Executes a wrapped Function and captures timing information, causing a +// Function PerformanceEntry to be emitted to PerformanceObservers after +// execution. void TimerFunctionCall(const FunctionCallbackInfo& args) { Isolate* isolate = args.GetIsolate(); HandleScope scope(isolate); @@ -254,9 +257,8 @@ void TimerFunctionCall(const FunctionCallbackInfo& args) { size_t count = args.Length(); size_t idx; std::vector> call_args; - for (size_t i = 0; i < count; ++i) { + for (size_t i = 0; i < count; ++i) call_args.push_back(args[i]); - } Utf8Value name(isolate, GetName(fn)); @@ -293,15 +295,14 @@ void TimerFunctionCall(const FunctionCallbackInfo& args) { if (!observers[NODE_PERFORMANCE_ENTRY_TYPE_FUNCTION]) return; - Local ctor = env->performance_entry_template(); - v8::MaybeLocal instance = ctor->NewInstance(context); - Local obj = instance.ToLocalChecked(); - for (idx = 0; idx < count; idx++) { - obj->Set(context, idx, args[idx]).ToChecked(); - } - new PerformanceEntry(env, obj, *name, "function", start, end); + PerformanceEntry entry(env, *name, "function", start, end); + Local obj = entry.ToObject(); + for (idx = 0; idx < count; idx++) + obj->Set(context, idx, args[idx]).FromJust(); + PerformanceEntry::Notify(env, entry.kind(), obj); } +// Wraps a Function in a TimerFunctionCall void Timerify(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); Local context = env->context(); @@ -314,6 +315,7 @@ void Timerify(const FunctionCallbackInfo& args) { args.GetReturnValue().Set(wrap); } + void Init(Local target, Local unused, Local context) { @@ -332,19 +334,10 @@ void Init(Local target, Local performanceEntryString = FIXED_ONE_BYTE_STRING(isolate, "PerformanceEntry"); - Local pe = env->NewFunctionTemplate(PerformanceEntry::New); - pe->InstanceTemplate()->SetInternalFieldCount(1); + Local pe = FunctionTemplate::New(isolate); pe->SetClassName(performanceEntryString); - Local ot = pe->InstanceTemplate(); - ot->SetAccessor(env->name_string(), GetPerformanceEntryName); - ot->SetAccessor(FIXED_ONE_BYTE_STRING(isolate, "entryType"), - GetPerformanceEntryType); - ot->SetAccessor(FIXED_ONE_BYTE_STRING(isolate, "startTime"), - GetPerformanceEntryStartTime); - ot->SetAccessor(FIXED_ONE_BYTE_STRING(isolate, "duration"), - GetPerformanceEntryDuration); Local fn = pe->GetFunction(); - target->Set(performanceEntryString, fn); + target->Set(context, performanceEntryString, fn).FromJust(); env->set_performance_entry_template(fn); env->SetMethod(target, "mark", Mark); diff --git a/src/node_perf.h b/src/node_perf.h index ba7a326471d695..6091c0752cd493 100644 --- a/src/node_perf.h +++ b/src/node_perf.h @@ -42,120 +42,51 @@ static inline PerformanceEntryType ToPerformanceEntryTypeEnum( NODE_EXTERN inline void MarkPerformanceMilestone( Environment* env, PerformanceMilestone milestone) { - env->performance_state()->milestones[milestone] = PERFORMANCE_NOW(); - } + env->performance_state()->milestones[milestone] = PERFORMANCE_NOW(); +} -class PerformanceEntry : public BaseObject { +class PerformanceEntry { public: - // Used for temporary storage of performance entry details when the - // object cannot be created immediately. - class Data { - public: - Data( - Environment* env, - const char* name, - const char* type, - uint64_t startTime, - uint64_t endTime, - int data = 0) : - env_(env), - name_(name), - type_(type), - startTime_(startTime), - endTime_(endTime), - data_(data) {} - - Environment* env() const { - return env_; - } - - const std::string& name() const { - return name_; - } - - const std::string& type() const { - return type_; - } - - uint64_t startTime() const { - return startTime_; - } - - uint64_t endTime() const { - return endTime_; - } - - int data() const { - return data_; - } - - private: - Environment* const env_; - const std::string name_; - const std::string type_; - const uint64_t startTime_; - const uint64_t endTime_; - const int data_; - }; - - static void NotifyObservers(Environment* env, PerformanceEntry* entry); + static void Notify(Environment* env, + PerformanceEntryType type, + Local object); static void New(const FunctionCallbackInfo& args); PerformanceEntry(Environment* env, - Local wrap, const char* name, const char* type, uint64_t startTime, - uint64_t endTime) : - BaseObject(env, wrap), - name_(name), - type_(type), - startTime_(startTime), - endTime_(endTime) { - MakeWeak(this); - NotifyObservers(env, this); - } + uint64_t endTime) : env_(env), + name_(name), + type_(type), + startTime_(startTime), + endTime_(endTime) { } - PerformanceEntry(Environment* env, - Local wrap, - Data* data) : - BaseObject(env, wrap), - name_(data->name()), - type_(data->type()), - startTime_(data->startTime()), - endTime_(data->endTime()) { - MakeWeak(this); - NotifyObservers(env, this); - } + virtual ~PerformanceEntry() { } - ~PerformanceEntry() {} + virtual const Local ToObject() const; - const std::string& name() const { - return name_; - } + Environment* env() const { return env_; } - const std::string& type() const { - return type_; - } + const std::string& name() const { return name_; } - double startTime() const { - return startTime_ / 1e6; - } + const std::string& type() const { return type_; } - double duration() const { - return durationNano() / 1e6; + PerformanceEntryType kind() { + return ToPerformanceEntryTypeEnum(type().c_str()); } - uint64_t startTimeNano() const { - return startTime_; - } + double startTime() const { return startTime_ / 1e6; } - uint64_t durationNano() const { - return endTime_ - startTime_; - } + double duration() const { return durationNano() / 1e6; } + + uint64_t startTimeNano() const { return startTime_; } + + uint64_t durationNano() const { return endTime_ - startTime_; } private: + Environment* env_; const std::string name_; const std::string type_; const uint64_t startTime_; @@ -169,6 +100,21 @@ enum PerformanceGCKind { NODE_PERFORMANCE_GC_WEAKCB = GCType::kGCTypeProcessWeakCallbacks }; +class GCPerformanceEntry : public PerformanceEntry { + public: + GCPerformanceEntry(Environment* env, + PerformanceGCKind gckind, + uint64_t startTime, + uint64_t endTime) : + PerformanceEntry(env, "gc", "gc", startTime, endTime), + gckind_(gckind) { } + + PerformanceGCKind gckind() const { return gckind_; } + + private: + PerformanceGCKind gckind_; +}; + } // namespace performance } // namespace node diff --git a/src/node_perf_common.h b/src/node_perf_common.h index 02c3cc2d6650f0..435a4cffe5a753 100644 --- a/src/node_perf_common.h +++ b/src/node_perf_common.h @@ -38,7 +38,8 @@ extern uint64_t performance_v8_start; V(MARK, "mark") \ V(MEASURE, "measure") \ V(GC, "gc") \ - V(FUNCTION, "function") + V(FUNCTION, "function") \ + V(HTTP2, "http2") enum PerformanceMilestone { #define V(name, _) NODE_PERFORMANCE_MILESTONE_##name, diff --git a/src/node_url.cc b/src/node_url.cc index d248454f9ca0d2..0f7992264ecf0f 100644 --- a/src/node_url.cc +++ b/src/node_url.cc @@ -92,6 +92,16 @@ class URLHost { Value value_; HostType type_ = HostType::H_FAILED; + inline void Reset() { + using string = std::string; + switch (type_) { + case HostType::H_DOMAIN: value_.domain.~string(); break; + case HostType::H_OPAQUE: value_.opaque.~string(); break; + default: break; + } + type_ = HostType::H_FAILED; + } + // Setting the string members of the union with = is brittle because // it relies on them being initialized to a state that requires no // destruction of old data. @@ -101,23 +111,20 @@ class URLHost { // These helpers are the easiest solution but we might want to consider // just not forcing strings into an union. inline void SetOpaque(std::string&& string) { + Reset(); type_ = HostType::H_OPAQUE; new(&value_.opaque) std::string(std::move(string)); } inline void SetDomain(std::string&& string) { + Reset(); type_ = HostType::H_DOMAIN; new(&value_.domain) std::string(std::move(string)); } }; URLHost::~URLHost() { - using string = std::string; - switch (type_) { - case HostType::H_DOMAIN: value_.domain.~string(); break; - case HostType::H_OPAQUE: value_.opaque.~string(); break; - default: break; - } + Reset(); } #define ARGS(XX) \ diff --git a/src/node_util.cc b/src/node_util.cc index a40401b71926b3..1e51fbfff4b6ae 100644 --- a/src/node_util.cc +++ b/src/node_util.cc @@ -36,7 +36,6 @@ using v8::Value; #define V(_, ucname) \ static void ucname(const FunctionCallbackInfo& args) { \ - CHECK_EQ(1, args.Length()); \ args.GetReturnValue().Set(args[0]->ucname()); \ } @@ -44,7 +43,6 @@ using v8::Value; #undef V static void IsAnyArrayBuffer(const FunctionCallbackInfo& args) { - CHECK_EQ(1, args.Length()); args.GetReturnValue().Set( args[0]->IsArrayBuffer() || args[0]->IsSharedArrayBuffer()); } diff --git a/src/node_version.h b/src/node_version.h index 5bb35d4f4df5f8..80125c2384e083 100644 --- a/src/node_version.h +++ b/src/node_version.h @@ -29,7 +29,7 @@ #define NODE_VERSION_IS_LTS 1 #define NODE_VERSION_LTS_CODENAME "Carbon" -#define NODE_VERSION_IS_RELEASE 0 +#define NODE_VERSION_IS_RELEASE 1 #ifndef NODE_STRINGIFY #define NODE_STRINGIFY(n) NODE_STRINGIFY_HELPER(n) @@ -107,4 +107,7 @@ */ #define NODE_MODULE_VERSION 57 +// the NAPI_VERSION provided by this version of the runtime +#define NAPI_VERSION 3 + #endif // SRC_NODE_VERSION_H_ diff --git a/src/stream_base-inl.h b/src/stream_base-inl.h index eacdb3832c0662..cdcff67cc55e66 100644 --- a/src/stream_base-inl.h +++ b/src/stream_base-inl.h @@ -11,7 +11,7 @@ namespace node { -using v8::AccessorSignature; +using v8::Signature; using v8::External; using v8::FunctionCallbackInfo; using v8::FunctionTemplate; @@ -34,31 +34,41 @@ void StreamBase::AddMethods(Environment* env, enum PropertyAttribute attributes = static_cast( v8::ReadOnly | v8::DontDelete | v8::DontEnum); - Local signature = - AccessorSignature::New(env->isolate(), t); - t->PrototypeTemplate()->SetAccessor(env->fd_string(), - GetFD, - nullptr, - env->as_external(), - v8::DEFAULT, - attributes, - signature); - - t->PrototypeTemplate()->SetAccessor(env->external_stream_string(), - GetExternal, - nullptr, - env->as_external(), - v8::DEFAULT, - attributes, - signature); - - t->PrototypeTemplate()->SetAccessor(env->bytes_read_string(), - GetBytesRead, - nullptr, - env->as_external(), - v8::DEFAULT, - attributes, - signature); + + Local signature = Signature::New(env->isolate(), t); + + Local get_fd_templ = + FunctionTemplate::New(env->isolate(), + GetFD, + env->as_external(), + signature); + + Local get_external_templ = + FunctionTemplate::New(env->isolate(), + GetExternal, + env->as_external(), + signature); + + Local get_bytes_read_templ = + FunctionTemplate::New(env->isolate(), + GetBytesRead, + env->as_external(), + signature); + + t->PrototypeTemplate()->SetAccessorProperty(env->fd_string(), + get_fd_templ, + Local(), + attributes); + + t->PrototypeTemplate()->SetAccessorProperty(env->external_stream_string(), + get_external_templ, + Local(), + attributes); + + t->PrototypeTemplate()->SetAccessorProperty(env->bytes_read_string(), + get_bytes_read_templ, + Local(), + attributes); env->SetProtoMethod(t, "readStart", JSMethod); env->SetProtoMethod(t, "readStop", JSMethod); @@ -85,8 +95,7 @@ void StreamBase::AddMethods(Environment* env, template -void StreamBase::GetFD(Local key, - const PropertyCallbackInfo& args) { +void StreamBase::GetFD(const FunctionCallbackInfo& args) { // Mimic implementation of StreamBase::GetFD() and UDPWrap::GetFD(). Base* handle; ASSIGN_OR_RETURN_UNWRAP(&handle, @@ -100,10 +109,8 @@ void StreamBase::GetFD(Local key, args.GetReturnValue().Set(wrap->GetFD()); } - template -void StreamBase::GetBytesRead(Local key, - const PropertyCallbackInfo& args) { +void StreamBase::GetBytesRead(const FunctionCallbackInfo& args) { // The handle instance hasn't been set. So no bytes could have been read. Base* handle; ASSIGN_OR_RETURN_UNWRAP(&handle, @@ -115,10 +122,8 @@ void StreamBase::GetBytesRead(Local key, args.GetReturnValue().Set(static_cast(wrap->bytes_read_)); } - template -void StreamBase::GetExternal(Local key, - const PropertyCallbackInfo& args) { +void StreamBase::GetExternal(const FunctionCallbackInfo& args) { Base* handle; ASSIGN_OR_RETURN_UNWRAP(&handle, args.This()); @@ -144,15 +149,19 @@ void StreamBase::JSMethod(const FunctionCallbackInfo& args) { } +inline void ShutdownWrap::OnDone(int status) { + stream()->AfterShutdown(this, status); +} + + WriteWrap* WriteWrap::New(Environment* env, Local obj, StreamBase* wrap, - DoneCb cb, size_t extra) { size_t storage_size = ROUND_UP(sizeof(WriteWrap), kAlignSize) + extra; char* storage = new char[storage_size]; - return new(storage) WriteWrap(env, obj, wrap, cb, storage_size); + return new(storage) WriteWrap(env, obj, wrap, storage_size); } @@ -172,6 +181,10 @@ size_t WriteWrap::ExtraSize() const { return storage_size_ - ROUND_UP(sizeof(*this), kAlignSize); } +inline void WriteWrap::OnDone(int status) { + stream()->AfterWrite(this, status); +} + } // namespace node #endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS diff --git a/src/stream_base.cc b/src/stream_base.cc index 34564cadbe777a..4d9e1dfc6b2dba 100644 --- a/src/stream_base.cc +++ b/src/stream_base.cc @@ -55,8 +55,7 @@ int StreamBase::Shutdown(const FunctionCallbackInfo& args) { AsyncHooks::DefaultTriggerAsyncIdScope(env, wrap->get_async_id()); ShutdownWrap* req_wrap = new ShutdownWrap(env, req_wrap_obj, - this, - AfterShutdown); + this); int err = DoShutdown(req_wrap); if (err) @@ -66,7 +65,6 @@ int StreamBase::Shutdown(const FunctionCallbackInfo& args) { void StreamBase::AfterShutdown(ShutdownWrap* req_wrap, int status) { - StreamBase* wrap = req_wrap->wrap(); Environment* env = req_wrap->env(); // The wrap and request objects should still be there. @@ -78,7 +76,7 @@ void StreamBase::AfterShutdown(ShutdownWrap* req_wrap, int status) { Local req_wrap_obj = req_wrap->object(); Local argv[3] = { Integer::New(env->isolate(), status), - wrap->GetObject(), + GetObject(), req_wrap_obj }; @@ -159,8 +157,7 @@ int StreamBase::Writev(const FunctionCallbackInfo& args) { CHECK_NE(wrap, nullptr); AsyncHooks::DefaultTriggerAsyncIdScope trigger_scope(env, wrap->get_async_id()); - req_wrap = WriteWrap::New(env, req_wrap_obj, this, AfterWrite, - storage_size); + req_wrap = WriteWrap::New(env, req_wrap_obj, this, storage_size); } offset = 0; @@ -252,7 +249,7 @@ int StreamBase::WriteBuffer(const FunctionCallbackInfo& args) { CHECK_NE(wrap, nullptr); AsyncHooks::DefaultTriggerAsyncIdScope trigger_scope(env, wrap->get_async_id()); - req_wrap = WriteWrap::New(env, req_wrap_obj, this, AfterWrite); + req_wrap = WriteWrap::New(env, req_wrap_obj, this); } err = DoWrite(req_wrap, bufs, count, nullptr); @@ -338,8 +335,7 @@ int StreamBase::WriteString(const FunctionCallbackInfo& args) { CHECK_NE(wrap, nullptr); AsyncHooks::DefaultTriggerAsyncIdScope trigger_scope(env, wrap->get_async_id()); - req_wrap = WriteWrap::New(env, req_wrap_obj, this, AfterWrite, - storage_size); + req_wrap = WriteWrap::New(env, req_wrap_obj, this, storage_size); } data = req_wrap->Extra(); @@ -401,7 +397,6 @@ int StreamBase::WriteString(const FunctionCallbackInfo& args) { void StreamBase::AfterWrite(WriteWrap* req_wrap, int status) { - StreamBase* wrap = req_wrap->wrap(); Environment* env = req_wrap->env(); HandleScope handle_scope(env->isolate()); @@ -413,19 +408,19 @@ void StreamBase::AfterWrite(WriteWrap* req_wrap, int status) { // Unref handle property Local req_wrap_obj = req_wrap->object(); req_wrap_obj->Delete(env->context(), env->handle_string()).FromJust(); - wrap->OnAfterWrite(req_wrap); + OnAfterWrite(req_wrap, status); Local argv[] = { Integer::New(env->isolate(), status), - wrap->GetObject(), + GetObject(), req_wrap_obj, Undefined(env->isolate()) }; - const char* msg = wrap->Error(); + const char* msg = Error(); if (msg != nullptr) { argv[3] = OneByteString(env->isolate(), msg); - wrap->ClearError(); + ClearError(); } if (req_wrap_obj->Has(env->context(), env->oncomplete_string()).FromJust()) diff --git a/src/stream_base.h b/src/stream_base.h index 94e4bfd73961da..8c0a302d76dd2d 100644 --- a/src/stream_base.h +++ b/src/stream_base.h @@ -16,27 +16,27 @@ namespace node { // Forward declarations class StreamBase; -template +template class StreamReq { public: - typedef void (*DoneCb)(Req* req, int status); - - explicit StreamReq(DoneCb cb) : cb_(cb) { + explicit StreamReq(StreamBase* stream) : stream_(stream) { } inline void Done(int status, const char* error_str = nullptr) { - Req* req = static_cast(this); + Base* req = static_cast(this); Environment* env = req->env(); if (error_str != nullptr) { req->object()->Set(env->error_string(), OneByteString(env->isolate(), error_str)); } - cb_(req, status); + req->OnDone(status); } + inline StreamBase* stream() const { return stream_; } + private: - DoneCb cb_; + StreamBase* const stream_; }; class ShutdownWrap : public ReqWrap, @@ -44,11 +44,9 @@ class ShutdownWrap : public ReqWrap, public: ShutdownWrap(Environment* env, v8::Local req_wrap_obj, - StreamBase* wrap, - DoneCb cb) + StreamBase* stream) : ReqWrap(env, req_wrap_obj, AsyncWrap::PROVIDER_SHUTDOWNWRAP), - StreamReq(cb), - wrap_(wrap) { + StreamReq(stream) { Wrap(req_wrap_obj, this); } @@ -60,27 +58,22 @@ class ShutdownWrap : public ReqWrap, return ContainerOf(&ShutdownWrap::req_, req); } - inline StreamBase* wrap() const { return wrap_; } size_t self_size() const override { return sizeof(*this); } - private: - StreamBase* const wrap_; + inline void OnDone(int status); // Just calls stream()->AfterShutdown() }; -class WriteWrap: public ReqWrap, - public StreamReq { +class WriteWrap : public ReqWrap, + public StreamReq { public: static inline WriteWrap* New(Environment* env, v8::Local obj, - StreamBase* wrap, - DoneCb cb, + StreamBase* stream, size_t extra = 0); inline void Dispose(); inline char* Extra(size_t offset = 0); inline size_t ExtraSize() const; - inline StreamBase* wrap() const { return wrap_; } - size_t self_size() const override { return storage_size_; } static WriteWrap* from_req(uv_write_t* req) { @@ -91,24 +84,22 @@ class WriteWrap: public ReqWrap, WriteWrap(Environment* env, v8::Local obj, - StreamBase* wrap, - DoneCb cb) + StreamBase* stream) : ReqWrap(env, obj, AsyncWrap::PROVIDER_WRITEWRAP), - StreamReq(cb), - wrap_(wrap), + StreamReq(stream), storage_size_(0) { Wrap(obj, this); } + inline void OnDone(int status); // Just calls stream()->AfterWrite() + protected: WriteWrap(Environment* env, v8::Local obj, - StreamBase* wrap, - DoneCb cb, + StreamBase* stream, size_t storage_size) : ReqWrap(env, obj, AsyncWrap::PROVIDER_WRITEWRAP), - StreamReq(cb), - wrap_(wrap), + StreamReq(stream), storage_size_(storage_size) { Wrap(obj, this); } @@ -129,7 +120,6 @@ class WriteWrap: public ReqWrap, // WriteWrap. Ensure this never happens. void operator delete(void* ptr) { UNREACHABLE(); } - StreamBase* const wrap_; const size_t storage_size_; }; @@ -151,7 +141,7 @@ class StreamResource { void* ctx; }; - typedef void (*AfterWriteCb)(WriteWrap* w, void* ctx); + typedef void (*AfterWriteCb)(WriteWrap* w, int status, void* ctx); typedef void (*AllocCb)(size_t size, uv_buf_t* buf, void* ctx); typedef void (*ReadCb)(ssize_t nread, const uv_buf_t* buf, @@ -176,9 +166,9 @@ class StreamResource { virtual void ClearError(); // Events - inline void OnAfterWrite(WriteWrap* w) { + inline void OnAfterWrite(WriteWrap* w, int status) { if (!after_write_cb_.is_empty()) - after_write_cb_.fn(w, after_write_cb_.ctx); + after_write_cb_.fn(w, status, after_write_cb_.ctx); } inline void OnAlloc(size_t size, uv_buf_t* buf) { @@ -208,14 +198,12 @@ class StreamResource { inline Callback read_cb() { return read_cb_; } inline Callback destruct_cb() { return destruct_cb_; } - private: + protected: Callback after_write_cb_; Callback alloc_cb_; Callback read_cb_; Callback destruct_cb_; uint64_t bytes_read_; - - friend class StreamBase; }; class StreamBase : public StreamResource { @@ -257,6 +245,10 @@ class StreamBase : public StreamResource { v8::Local buf, v8::Local handle); + // These are called by the respective {Write,Shutdown}Wrap class. + virtual void AfterShutdown(ShutdownWrap* req, int status); + virtual void AfterWrite(WriteWrap* req, int status); + protected: explicit StreamBase(Environment* env) : env_(env), consumed_(false) { } @@ -267,10 +259,6 @@ class StreamBase : public StreamResource { virtual AsyncWrap* GetAsyncWrap() = 0; virtual v8::Local GetObject(); - // Libuv callbacks - static void AfterShutdown(ShutdownWrap* req, int status); - static void AfterWrite(WriteWrap* req, int status); - // JS Methods int ReadStart(const v8::FunctionCallbackInfo& args); int ReadStop(const v8::FunctionCallbackInfo& args); @@ -281,16 +269,13 @@ class StreamBase : public StreamResource { int WriteString(const v8::FunctionCallbackInfo& args); template - static void GetFD(v8::Local key, - const v8::PropertyCallbackInfo& args); + static void GetFD(const v8::FunctionCallbackInfo& args); template - static void GetExternal(v8::Local key, - const v8::PropertyCallbackInfo& args); + static void GetExternal(const v8::FunctionCallbackInfo& args); template - static void GetBytesRead(v8::Local key, - const v8::PropertyCallbackInfo& args); + static void GetBytesRead(const v8::FunctionCallbackInfo& args); template & args) { int LibuvStreamWrap::DoShutdown(ShutdownWrap* req_wrap) { int err; - err = uv_shutdown(req_wrap->req(), stream(), AfterShutdown); + err = uv_shutdown(req_wrap->req(), stream(), AfterUvShutdown); req_wrap->Dispatched(); return err; } -void LibuvStreamWrap::AfterShutdown(uv_shutdown_t* req, int status) { +void LibuvStreamWrap::AfterUvShutdown(uv_shutdown_t* req, int status) { ShutdownWrap* req_wrap = ShutdownWrap::from_req(req); CHECK_NE(req_wrap, nullptr); HandleScope scope(req_wrap->env()->isolate()); @@ -360,9 +359,9 @@ int LibuvStreamWrap::DoWrite(WriteWrap* w, uv_stream_t* send_handle) { int r; if (send_handle == nullptr) { - r = uv_write(w->req(), stream(), bufs, count, AfterWrite); + r = uv_write(w->req(), stream(), bufs, count, AfterUvWrite); } else { - r = uv_write2(w->req(), stream(), bufs, count, send_handle, AfterWrite); + r = uv_write2(w->req(), stream(), bufs, count, send_handle, AfterUvWrite); } if (!r) { @@ -383,7 +382,7 @@ int LibuvStreamWrap::DoWrite(WriteWrap* w, } -void LibuvStreamWrap::AfterWrite(uv_write_t* req, int status) { +void LibuvStreamWrap::AfterUvWrite(uv_write_t* req, int status) { WriteWrap* req_wrap = WriteWrap::from_req(req); CHECK_NE(req_wrap, nullptr); HandleScope scope(req_wrap->env()->isolate()); @@ -392,9 +391,9 @@ void LibuvStreamWrap::AfterWrite(uv_write_t* req, int status) { } -void LibuvStreamWrap::OnAfterWriteImpl(WriteWrap* w, void* ctx) { - LibuvStreamWrap* wrap = static_cast(ctx); - wrap->UpdateWriteQueueSize(); +void LibuvStreamWrap::AfterWrite(WriteWrap* w, int status) { + StreamBase::AfterWrite(w, status); + UpdateWriteQueueSize(); } } // namespace node diff --git a/src/stream_wrap.h b/src/stream_wrap.h index 43df504e81b86e..df7349b093f3c2 100644 --- a/src/stream_wrap.h +++ b/src/stream_wrap.h @@ -103,17 +103,18 @@ class LibuvStreamWrap : public HandleWrap, public StreamBase { static void OnRead(uv_stream_t* handle, ssize_t nread, const uv_buf_t* buf); - static void AfterWrite(uv_write_t* req, int status); - static void AfterShutdown(uv_shutdown_t* req, int status); + static void AfterUvWrite(uv_write_t* req, int status); + static void AfterUvShutdown(uv_shutdown_t* req, int status); // Resource interface implementation - static void OnAfterWriteImpl(WriteWrap* w, void* ctx); static void OnAllocImpl(size_t size, uv_buf_t* buf, void* ctx); static void OnReadImpl(ssize_t nread, const uv_buf_t* buf, uv_handle_type pending, void* ctx); + void AfterWrite(WriteWrap* req_wrap, int status) override; + uv_stream_t* const stream_; }; diff --git a/src/tls_wrap.cc b/src/tls_wrap.cc index 3f5ed2c57580ff..c15cf166fb29d8 100644 --- a/src/tls_wrap.cc +++ b/src/tls_wrap.cc @@ -328,8 +328,7 @@ void TLSWrap::EncOut() { ->NewInstance(env()->context()).ToLocalChecked(); WriteWrap* write_req = WriteWrap::New(env(), req_wrap_obj, - this, - EncOutCb); + stream_); uv_buf_t buf[arraysize(data)]; for (size_t i = 0; i < count; i++) @@ -346,34 +345,31 @@ void TLSWrap::EncOut() { } -void TLSWrap::EncOutCb(WriteWrap* req_wrap, int status) { - TLSWrap* wrap = req_wrap->wrap()->Cast(); - req_wrap->Dispose(); - +void TLSWrap::EncOutAfterWrite(WriteWrap* req_wrap, int status) { // We should not be getting here after `DestroySSL`, because all queued writes // must be invoked with UV_ECANCELED - CHECK_NE(wrap->ssl_, nullptr); + CHECK_NE(ssl_, nullptr); // Handle error if (status) { // Ignore errors after shutdown - if (wrap->shutdown_) + if (shutdown_) return; // Notify about error - wrap->InvokeQueued(status); + InvokeQueued(status); return; } // Commit - crypto::NodeBIO::FromBIO(wrap->enc_out_)->Read(nullptr, wrap->write_size_); + crypto::NodeBIO::FromBIO(enc_out_)->Read(nullptr, write_size_); // Ensure that the progress will be made and `InvokeQueued` will be called. - wrap->ClearIn(); + ClearIn(); // Try writing more data - wrap->write_size_ = 0; - wrap->EncOut(); + write_size_ = 0; + EncOut(); } @@ -681,9 +677,9 @@ int TLSWrap::DoWrite(WriteWrap* w, } -void TLSWrap::OnAfterWriteImpl(WriteWrap* w, void* ctx) { +void TLSWrap::OnAfterWriteImpl(WriteWrap* w, int status, void* ctx) { TLSWrap* wrap = static_cast(ctx); - wrap->UpdateWriteQueueSize(); + wrap->EncOutAfterWrite(w, status); } diff --git a/src/tls_wrap.h b/src/tls_wrap.h index bd5a4d4028a408..96820e9b7201b1 100644 --- a/src/tls_wrap.h +++ b/src/tls_wrap.h @@ -112,7 +112,7 @@ class TLSWrap : public AsyncWrap, static void SSLInfoCallback(const SSL* ssl_, int where, int ret); void InitSSL(); void EncOut(); - static void EncOutCb(WriteWrap* req_wrap, int status); + void EncOutAfterWrite(WriteWrap* req_wrap, int status); bool ClearIn(); void ClearOut(); void MakePending(); @@ -135,7 +135,7 @@ class TLSWrap : public AsyncWrap, uint32_t UpdateWriteQueueSize(uint32_t write_queue_size = 0); // Resource implementation - static void OnAfterWriteImpl(WriteWrap* w, void* ctx); + static void OnAfterWriteImpl(WriteWrap* w, int status, void* ctx); static void OnAllocImpl(size_t size, uv_buf_t* buf, void* ctx); static void OnReadImpl(ssize_t nread, const uv_buf_t* buf, diff --git a/src/udp_wrap.cc b/src/udp_wrap.cc index 784b36fce95b21..5b3b7f9abc9bad 100644 --- a/src/udp_wrap.cc +++ b/src/udp_wrap.cc @@ -42,7 +42,7 @@ using v8::Integer; using v8::Local; using v8::Object; using v8::PropertyAttribute; -using v8::PropertyCallbackInfo; +using v8::Signature; using v8::String; using v8::Uint32; using v8::Undefined; @@ -111,12 +111,19 @@ void UDPWrap::Initialize(Local target, enum PropertyAttribute attributes = static_cast(v8::ReadOnly | v8::DontDelete); - t->PrototypeTemplate()->SetAccessor(env->fd_string(), - UDPWrap::GetFD, - nullptr, - env->as_external(), - v8::DEFAULT, - attributes); + + Local signature = Signature::New(env->isolate(), t); + + Local get_fd_templ = + FunctionTemplate::New(env->isolate(), + UDPWrap::GetFD, + env->as_external(), + signature); + + t->PrototypeTemplate()->SetAccessorProperty(env->fd_string(), + get_fd_templ, + Local(), + attributes); env->SetProtoMethod(t, "bind", Bind); env->SetProtoMethod(t, "send", Send); @@ -164,7 +171,7 @@ void UDPWrap::New(const FunctionCallbackInfo& args) { } -void UDPWrap::GetFD(Local, const PropertyCallbackInfo& args) { +void UDPWrap::GetFD(const FunctionCallbackInfo& args) { int fd = UV_EBADF; #if !defined(_WIN32) UDPWrap* wrap = Unwrap(args.This()); diff --git a/src/udp_wrap.h b/src/udp_wrap.h index f4cf3ad7f566df..15d46b3ebb8e90 100644 --- a/src/udp_wrap.h +++ b/src/udp_wrap.h @@ -41,8 +41,7 @@ class UDPWrap: public HandleWrap { static void Initialize(v8::Local target, v8::Local unused, v8::Local context); - static void GetFD(v8::Local, - const v8::PropertyCallbackInfo&); + static void GetFD(const v8::FunctionCallbackInfo& args); static void New(const v8::FunctionCallbackInfo& args); static void Bind(const v8::FunctionCallbackInfo& args); static void Send(const v8::FunctionCallbackInfo& args); diff --git a/test/abort/test-http-parser-consume.js b/test/abort/test-http-parser-consume.js index 9115aba70dbf17..673e04cfa3a573 100644 --- a/test/abort/test-http-parser-consume.js +++ b/test/abort/test-http-parser-consume.js @@ -11,12 +11,12 @@ if (process.argv[2] === 'child') { const rr = get({ port: server.address().port }, common.mustCall(() => { // This bad input (0) should abort the parser and the process rr.parser.consume(0); - // This line should be unreachanble. + // This line should be unreachable. assert.fail('this should be unreachable'); })); })); } else { - // super-proces + // super-process const child = spawn(process.execPath, [__filename, 'child']); child.stdout.on('data', common.mustNotCall()); diff --git a/test/addons-napi/8_passing_wrapped/binding.cc b/test/addons-napi/8_passing_wrapped/binding.cc index c284c85f9b4936..48e94f10ec4838 100644 --- a/test/addons-napi/8_passing_wrapped/binding.cc +++ b/test/addons-napi/8_passing_wrapped/binding.cc @@ -1,7 +1,9 @@ #include "myobject.h" #include "../common.h" -napi_value CreateObject(napi_env env, napi_callback_info info) { +extern size_t finalize_count; + +static napi_value CreateObject(napi_env env, napi_callback_info info) { size_t argc = 1; napi_value args[1]; NAPI_CALL(env, napi_get_cb_info(env, info, &argc, args, nullptr, nullptr)); @@ -12,7 +14,7 @@ napi_value CreateObject(napi_env env, napi_callback_info info) { return instance; } -napi_value Add(napi_env env, napi_callback_info info) { +static napi_value Add(napi_env env, napi_callback_info info) { size_t argc = 2; napi_value args[2]; NAPI_CALL(env, napi_get_cb_info(env, info, &argc, args, nullptr, nullptr)); @@ -29,12 +31,19 @@ napi_value Add(napi_env env, napi_callback_info info) { return sum; } -napi_value Init(napi_env env, napi_value exports) { +static napi_value FinalizeCount(napi_env env, napi_callback_info info) { + napi_value return_value; + NAPI_CALL(env, napi_create_uint32(env, finalize_count, &return_value)); + return return_value; +} + +static napi_value Init(napi_env env, napi_value exports) { MyObject::Init(env); napi_property_descriptor desc[] = { DECLARE_NAPI_PROPERTY("createObject", CreateObject), DECLARE_NAPI_PROPERTY("add", Add), + DECLARE_NAPI_PROPERTY("finalizeCount", FinalizeCount), }; NAPI_CALL(env, diff --git a/test/addons-napi/8_passing_wrapped/myobject.cc b/test/addons-napi/8_passing_wrapped/myobject.cc index 19cc7dd2a29493..0c9ca90f52f8f3 100644 --- a/test/addons-napi/8_passing_wrapped/myobject.cc +++ b/test/addons-napi/8_passing_wrapped/myobject.cc @@ -1,9 +1,14 @@ #include "myobject.h" #include "../common.h" +size_t finalize_count = 0; + MyObject::MyObject() : env_(nullptr), wrapper_(nullptr) {} -MyObject::~MyObject() { napi_delete_reference(env_, wrapper_); } +MyObject::~MyObject() { + finalize_count++; + napi_delete_reference(env_, wrapper_); +} void MyObject::Destructor( napi_env env, void* nativeObject, void* /*finalize_hint*/) { @@ -45,6 +50,11 @@ napi_value MyObject::New(napi_env env, napi_callback_info info) { } obj->env_ = env; + + // It is important that the below call to napi_wrap() be such that we request + // a reference to the wrapped object via the out-parameter, because this + // ensures that we test the code path that deals with a reference that is + // destroyed from its own finalizer. NAPI_CALL(env, napi_wrap(env, _this, obj, diff --git a/test/addons-napi/8_passing_wrapped/test.js b/test/addons-napi/8_passing_wrapped/test.js index 3d24fa5d9fdaa7..7793133f7750ba 100644 --- a/test/addons-napi/8_passing_wrapped/test.js +++ b/test/addons-napi/8_passing_wrapped/test.js @@ -1,9 +1,16 @@ 'use strict'; +// Flags: --expose-gc + const common = require('../../common'); const assert = require('assert'); const addon = require(`./build/${common.buildType}/binding`); -const obj1 = addon.createObject(10); +let obj1 = addon.createObject(10); const obj2 = addon.createObject(20); const result = addon.add(obj1, obj2); assert.strictEqual(result, 30); + +// Make sure the native destructor gets called. +obj1 = null; +global.gc(); +assert.strictEqual(addon.finalizeCount(), 1); diff --git a/test/addons-napi/test_async/test-uncaught.js b/test/addons-napi/test_async/test-uncaught.js new file mode 100644 index 00000000000000..fdcb3203f54410 --- /dev/null +++ b/test/addons-napi/test_async/test-uncaught.js @@ -0,0 +1,18 @@ +'use strict'; +const common = require('../../common'); +const assert = require('assert'); +const test_async = require(`./build/${common.buildType}/test_async`); + +process.on('uncaughtException', common.mustCall(function(err) { + try { + throw new Error('should not fail'); + } catch (err) { + assert.strictEqual(err.message, 'should not fail'); + } + assert.strictEqual(err.message, 'uncaught'); +})); + +// Successful async execution and completion callback. +test_async.Test(5, {}, common.mustCall(function() { + throw new Error('uncaught'); +})); diff --git a/test/addons-napi/test_buffer/test.js b/test/addons-napi/test_buffer/test.js index 713966775df18b..740b0474a79c60 100644 --- a/test/addons-napi/test_buffer/test.js +++ b/test/addons-napi/test_buffer/test.js @@ -9,14 +9,13 @@ assert.strictEqual(binding.newBuffer().toString(), binding.theText); assert.strictEqual(binding.newExternalBuffer().toString(), binding.theText); console.log('gc1'); global.gc(); -assert.strictEqual(binding.getDeleterCallCount(), 1, 'deleter was not called'); +assert.strictEqual(binding.getDeleterCallCount(), 1); assert.strictEqual(binding.copyBuffer().toString(), binding.theText); let buffer = binding.staticBuffer(); -assert.strictEqual(binding.bufferHasInstance(buffer), true, - 'buffer type checking fails'); +assert.strictEqual(binding.bufferHasInstance(buffer), true); assert.strictEqual(binding.bufferInfo(buffer), true); buffer = null; global.gc(); console.log('gc2'); -assert.strictEqual(binding.getDeleterCallCount(), 2, 'deleter was not called'); +assert.strictEqual(binding.getDeleterCallCount(), 2); diff --git a/test/addons-napi/test_callback_scope/binding.cc b/test/addons-napi/test_callback_scope/binding.cc new file mode 100644 index 00000000000000..e6631b6ac7bb52 --- /dev/null +++ b/test/addons-napi/test_callback_scope/binding.cc @@ -0,0 +1,138 @@ +#include "node_api.h" +#include "uv.h" +#include "../common.h" + +namespace { + +// the test needs to fake out the async structure, so we need to use +// the raw structure here and then cast as done behind the scenes +// in napi calls. +struct async_context { + double async_id; + double trigger_async_id; +}; + + +napi_value RunInCallbackScope(napi_env env, napi_callback_info info) { + size_t argc; + napi_value args[4]; + + NAPI_CALL(env, napi_get_cb_info(env, info, &argc, nullptr, nullptr, nullptr)); + NAPI_ASSERT(env, argc == 4 , "Wrong number of arguments"); + + NAPI_CALL(env, napi_get_cb_info(env, info, &argc, args, nullptr, nullptr)); + + napi_valuetype valuetype; + NAPI_CALL(env, napi_typeof(env, args[0], &valuetype)); + NAPI_ASSERT(env, valuetype == napi_object, + "Wrong type of arguments. Expects an object as first argument."); + + NAPI_CALL(env, napi_typeof(env, args[1], &valuetype)); + NAPI_ASSERT(env, valuetype == napi_number, + "Wrong type of arguments. Expects a number as second argument."); + + NAPI_CALL(env, napi_typeof(env, args[2], &valuetype)); + NAPI_ASSERT(env, valuetype == napi_number, + "Wrong type of arguments. Expects a number as third argument."); + + NAPI_CALL(env, napi_typeof(env, args[3], &valuetype)); + NAPI_ASSERT(env, valuetype == napi_function, + "Wrong type of arguments. Expects a function as third argument."); + + struct async_context context; + NAPI_CALL(env, napi_get_value_double(env, args[1], &context.async_id)); + NAPI_CALL(env, + napi_get_value_double(env, args[2], &context.trigger_async_id)); + + napi_callback_scope scope = nullptr; + NAPI_CALL( + env, + napi_open_callback_scope(env, + args[0], + reinterpret_cast(&context), + &scope)); + + // if the function has an exception pending after the call that is ok + // so we don't use NAPI_CALL as we must close the callback scope regardless + napi_value result = nullptr; + napi_status function_call_result = + napi_call_function(env, args[0], args[3], 0, nullptr, &result); + if (function_call_result != napi_ok) { + GET_AND_THROW_LAST_ERROR((env)); + } + + NAPI_CALL(env, napi_close_callback_scope(env, scope)); + + return result; +} + +static napi_env shared_env = nullptr; +static napi_deferred deferred = nullptr; + +static void Callback(uv_work_t* req, int ignored) { + napi_env env = shared_env; + + napi_handle_scope handle_scope = nullptr; + NAPI_CALL_RETURN_VOID(env, napi_open_handle_scope(env, &handle_scope)); + + napi_value resource_name; + NAPI_CALL_RETURN_VOID(env, napi_create_string_utf8( + env, "test", NAPI_AUTO_LENGTH, &resource_name)); + napi_async_context context; + NAPI_CALL_RETURN_VOID(env, + napi_async_init(env, nullptr, resource_name, &context)); + + napi_value resource_object; + NAPI_CALL_RETURN_VOID(env, napi_create_object(env, &resource_object)); + + napi_value undefined_value; + NAPI_CALL_RETURN_VOID(env, napi_get_undefined(env, &undefined_value)); + + napi_callback_scope scope = nullptr; + NAPI_CALL_RETURN_VOID(env, napi_open_callback_scope(env, + resource_object, + context, + &scope)); + + NAPI_CALL_RETURN_VOID(env, + napi_resolve_deferred(env, deferred, undefined_value)); + + NAPI_CALL_RETURN_VOID(env, napi_close_callback_scope(env, scope)); + + NAPI_CALL_RETURN_VOID(env, napi_close_handle_scope(env, handle_scope)); + delete req; +} + +napi_value TestResolveAsync(napi_env env, napi_callback_info info) { + napi_value promise = nullptr; + if (deferred == nullptr) { + shared_env = env; + NAPI_CALL(env, napi_create_promise(env, &deferred, &promise)); + + uv_loop_t* loop = nullptr; + NAPI_CALL(env, napi_get_uv_event_loop(env, &loop)); + + uv_work_t* req = new uv_work_t(); + uv_queue_work(loop, + req, + [](uv_work_t*) {}, + Callback); + } + return promise; +} + +napi_value Init(napi_env env, napi_value exports) { + napi_property_descriptor descriptors[] = { + DECLARE_NAPI_PROPERTY("runInCallbackScope", RunInCallbackScope), + DECLARE_NAPI_PROPERTY("testResolveAsync", TestResolveAsync) + }; + + NAPI_CALL(env, napi_define_properties( + env, exports, sizeof(descriptors) / sizeof(*descriptors), descriptors)); + + return exports; +} + +} // anonymous namespace + +NAPI_MODULE(NODE_GYP_MODULE_NAME, Init) diff --git a/test/addons-napi/test_callback_scope/binding.gyp b/test/addons-napi/test_callback_scope/binding.gyp new file mode 100644 index 00000000000000..7ede63d94a0d77 --- /dev/null +++ b/test/addons-napi/test_callback_scope/binding.gyp @@ -0,0 +1,9 @@ +{ + 'targets': [ + { + 'target_name': 'binding', + 'defines': [ 'V8_DEPRECATION_WARNINGS=1' ], + 'sources': [ 'binding.cc' ] + } + ] +} diff --git a/test/addons-napi/test_callback_scope/test-async-hooks.js b/test/addons-napi/test_callback_scope/test-async-hooks.js new file mode 100644 index 00000000000000..1a11bf60398f9b --- /dev/null +++ b/test/addons-napi/test_callback_scope/test-async-hooks.js @@ -0,0 +1,29 @@ +'use strict'; + +const common = require('../../common'); +const assert = require('assert'); +const async_hooks = require('async_hooks'); + +// The async_hook that we enable would register the process.emitWarning() +// call from loading the N-API addon as asynchronous activity because +// it contains a process.nextTick() call. Monkey patch it to be a no-op +// before we load the addon in order to avoid this. +process.emitWarning = () => {}; + +const { runInCallbackScope } = require(`./build/${common.buildType}/binding`); + +let insideHook = false; +async_hooks.createHook({ + before: common.mustCall((id) => { + assert.strictEqual(id, 1000); + insideHook = true; + }), + after: common.mustCall((id) => { + assert.strictEqual(id, 1000); + insideHook = false; + }) +}).enable(); + +runInCallbackScope({}, 1000, 1000, () => { + assert(insideHook); +}); diff --git a/test/addons-napi/test_callback_scope/test-resolve-async.js b/test/addons-napi/test_callback_scope/test-resolve-async.js new file mode 100644 index 00000000000000..77f25c9dde533f --- /dev/null +++ b/test/addons-napi/test_callback_scope/test-resolve-async.js @@ -0,0 +1,6 @@ +'use strict'; + +const common = require('../../common'); +const { testResolveAsync } = require(`./build/${common.buildType}/binding`); + +testResolveAsync().then(common.mustCall()); diff --git a/test/addons-napi/test_callback_scope/test.js b/test/addons-napi/test_callback_scope/test.js new file mode 100644 index 00000000000000..2f2efe5f47b98a --- /dev/null +++ b/test/addons-napi/test_callback_scope/test.js @@ -0,0 +1,17 @@ +'use strict'; + +const common = require('../../common'); +const assert = require('assert'); +const { runInCallbackScope } = require(`./build/${common.buildType}/binding`); + +assert.strictEqual(runInCallbackScope({}, 0, 0, () => 42), 42); + +{ + process.once('uncaughtException', common.mustCall((err) => { + assert.strictEqual(err.message, 'foo'); + })); + + runInCallbackScope({}, 0, 0, () => { + throw new Error('foo'); + }); +} diff --git a/test/addons-napi/test_constructor/test_constructor.c b/test/addons-napi/test_constructor/test_constructor.c index 70f53ec5a92f55..4ee8323dd6ed40 100644 --- a/test/addons-napi/test_constructor/test_constructor.c +++ b/test/addons-napi/test_constructor/test_constructor.c @@ -3,7 +3,6 @@ static double value_ = 1; static double static_value_ = 10; -napi_ref constructor_; napi_value GetValue(napi_env env, napi_callback_info info) { size_t argc = 0; @@ -80,8 +79,6 @@ napi_value Init(napi_env env, napi_value exports) { NAPI_CALL(env, napi_define_class(env, "MyObject", NAPI_AUTO_LENGTH, New, NULL, sizeof(properties)/sizeof(*properties), properties, &cons)); - NAPI_CALL(env, napi_create_reference(env, cons, 1, &constructor_)); - return cons; } diff --git a/test/addons-napi/test_dataview/test.js b/test/addons-napi/test_dataview/test.js index 711ab01ddb3cb6..a6be58494069e5 100644 --- a/test/addons-napi/test_dataview/test.js +++ b/test/addons-napi/test_dataview/test.js @@ -5,10 +5,20 @@ const assert = require('assert'); // Testing api calls for arrays const test_dataview = require(`./build/${common.buildType}/test_dataview`); -//create dataview -const buffer = new ArrayBuffer(128); -const template = Reflect.construct(DataView, [buffer]); +// Test for creating dataview +{ + const buffer = new ArrayBuffer(128); + const template = Reflect.construct(DataView, [buffer]); -const theDataview = test_dataview.CreateDataView(template); -assert.ok(theDataview instanceof DataView, - `Expect ${theDataview} to be a DataView`); + const theDataview = test_dataview.CreateDataViewFromJSDataView(template); + assert.ok(theDataview instanceof DataView, + `Expect ${theDataview} to be a DataView`); +} + +// Test for creating dataview with invalid range +{ + const buffer = new ArrayBuffer(128); + assert.throws(() => { + test_dataview.CreateDataView(buffer, 10, 200); + }, RangeError); +} diff --git a/test/addons-napi/test_dataview/test_dataview.c b/test/addons-napi/test_dataview/test_dataview.c index 5f95eef0f38032..4d29ed07e9e6f7 100644 --- a/test/addons-napi/test_dataview/test_dataview.c +++ b/test/addons-napi/test_dataview/test_dataview.c @@ -3,6 +3,53 @@ #include "../common.h" napi_value CreateDataView(napi_env env, napi_callback_info info) { + size_t argc = 3; + napi_value args [3]; + NAPI_CALL(env, napi_get_cb_info(env, info, &argc, args, NULL, NULL)); + + NAPI_ASSERT(env, argc == 3, "Wrong number of arguments"); + + napi_valuetype valuetype0; + napi_value arraybuffer = args[0]; + + NAPI_CALL(env, napi_typeof(env, arraybuffer, &valuetype0)); + NAPI_ASSERT(env, valuetype0 == napi_object, + "Wrong type of arguments. Expects a ArrayBuffer as the first " + "argument."); + + bool is_arraybuffer; + NAPI_CALL(env, napi_is_arraybuffer(env, arraybuffer, &is_arraybuffer)); + NAPI_ASSERT(env, is_arraybuffer, + "Wrong type of arguments. Expects a ArrayBuffer as the first " + "argument."); + + napi_valuetype valuetype1; + NAPI_CALL(env, napi_typeof(env, args[1], &valuetype1)); + + NAPI_ASSERT(env, valuetype1 == napi_number, + "Wrong type of arguments. Expects a number as second argument."); + + size_t byte_offset = 0; + NAPI_CALL(env, napi_get_value_uint32(env, args[1], (uint32_t*)(&byte_offset))); + + napi_valuetype valuetype2; + NAPI_CALL(env, napi_typeof(env, args[2], &valuetype2)); + + NAPI_ASSERT(env, valuetype2 == napi_number, + "Wrong type of arguments. Expects a number as third argument."); + + size_t length = 0; + NAPI_CALL(env, napi_get_value_uint32(env, args[2], (uint32_t*)(&length))); + + napi_value output_dataview; + NAPI_CALL(env, + napi_create_dataview(env, length, arraybuffer, + byte_offset, &output_dataview)); + + return output_dataview; +} + +napi_value CreateDataViewFromJSDataView(napi_env env, napi_callback_info info) { size_t argc = 1; napi_value args [1]; NAPI_CALL(env, napi_get_cb_info(env, info, &argc, args, NULL, NULL)); @@ -34,12 +81,15 @@ napi_value CreateDataView(napi_env env, napi_callback_info info) { napi_create_dataview(env, length, buffer, byte_offset, &output_dataview)); + return output_dataview; } napi_value Init(napi_env env, napi_value exports) { napi_property_descriptor descriptors[] = { - DECLARE_NAPI_PROPERTY("CreateDataView", CreateDataView) + DECLARE_NAPI_PROPERTY("CreateDataView", CreateDataView), + DECLARE_NAPI_PROPERTY("CreateDataViewFromJSDataView", + CreateDataViewFromJSDataView) }; NAPI_CALL(env, napi_define_properties( diff --git a/test/addons-napi/test_error/test.js b/test/addons-napi/test_error/test.js index bc2b7a8db75d3e..d5c92cb4c3cd2e 100644 --- a/test/addons-napi/test_error/test.js +++ b/test/addons-napi/test_error/test.js @@ -83,56 +83,34 @@ common.expectsError( let error = test_error.createError(); assert.ok(error instanceof Error, 'expected error to be an instance of Error'); -assert.strictEqual(error.message, 'error', 'expected message to be "error"'); +assert.strictEqual(error.message, 'error'); error = test_error.createRangeError(); assert.ok(error instanceof RangeError, 'expected error to be an instance of RangeError'); -assert.strictEqual(error.message, - 'range error', - 'expected message to be "range error"'); +assert.strictEqual(error.message, 'range error'); error = test_error.createTypeError(); assert.ok(error instanceof TypeError, 'expected error to be an instance of TypeError'); -assert.strictEqual(error.message, - 'type error', - 'expected message to be "type error"'); +assert.strictEqual(error.message, 'type error'); error = test_error.createErrorCode(); assert.ok(error instanceof Error, 'expected error to be an instance of Error'); -assert.strictEqual(error.code, - 'ERR_TEST_CODE', - 'expected code to be "ERR_TEST_CODE"'); -assert.strictEqual(error.message, - 'Error [error]', - 'expected message to be "Error [error]"'); -assert.strictEqual(error.name, - 'Error [ERR_TEST_CODE]', - 'expected name to be "Error [ERR_TEST_CODE]"'); +assert.strictEqual(error.code, 'ERR_TEST_CODE'); +assert.strictEqual(error.message, 'Error [error]'); +assert.strictEqual(error.name, 'Error [ERR_TEST_CODE]'); error = test_error.createRangeErrorCode(); assert.ok(error instanceof RangeError, 'expected error to be an instance of RangeError'); -assert.strictEqual(error.message, - 'RangeError [range error]', - 'expected message to be "RangeError [range error]"'); -assert.strictEqual(error.code, - 'ERR_TEST_CODE', - 'expected code to be "ERR_TEST_CODE"'); -assert.strictEqual(error.name, - 'RangeError [ERR_TEST_CODE]', - 'expected name to be "RangeError[ERR_TEST_CODE]"'); +assert.strictEqual(error.message, 'RangeError [range error]'); +assert.strictEqual(error.code, 'ERR_TEST_CODE'); +assert.strictEqual(error.name, 'RangeError [ERR_TEST_CODE]'); error = test_error.createTypeErrorCode(); assert.ok(error instanceof TypeError, 'expected error to be an instance of TypeError'); -assert.strictEqual(error.message, - 'TypeError [type error]', - 'expected message to be "TypeError [type error]"'); -assert.strictEqual(error.code, - 'ERR_TEST_CODE', - 'expected code to be "ERR_TEST_CODE"'); -assert.strictEqual(error.name, - 'TypeError [ERR_TEST_CODE]', - 'expected name to be "TypeError[ERR_TEST_CODE]"'); +assert.strictEqual(error.message, 'TypeError [type error]'); +assert.strictEqual(error.code, 'ERR_TEST_CODE'); +assert.strictEqual(error.name, 'TypeError [ERR_TEST_CODE]'); diff --git a/test/addons-napi/test_exception/test.js b/test/addons-napi/test_exception/test.js index 8bd2f50b12b15f..b9311add6c92d7 100644 --- a/test/addons-napi/test_exception/test.js +++ b/test/addons-napi/test_exception/test.js @@ -1,52 +1,80 @@ 'use strict'; +// Flags: --expose-gc const common = require('../../common'); -const test_exception = require(`./build/${common.buildType}/test_exception`); const assert = require('assert'); const theError = new Error('Some error'); -function throwTheError() { - throw theError; + +// The test module throws an error during Init, but in order for its exports to +// not be lost, it attaches them to the error's "bindings" property. This way, +// we can make sure that exceptions thrown during the module initialization +// phase are propagated through require() into JavaScript. +// https://github.com/nodejs/node/issues/19437 +const test_exception = (function() { + let resultingException; + try { + require(`./build/${common.buildType}/test_exception`); + } catch (anException) { + resultingException = anException; + } + assert.strictEqual(resultingException.message, 'Error during Init'); + return resultingException.binding; +})(); + +{ + const throwTheError = () => { throw theError; }; + + // Test that the native side successfully captures the exception + let returnedError = test_exception.returnException(throwTheError); + assert.strictEqual(theError, returnedError); + + // Test that the native side passes the exception through + assert.throws( + () => { test_exception.allowException(throwTheError); }, + (err) => err === theError + ); + + // Test that the exception thrown above was marked as pending + // before it was handled on the JS side + const exception_pending = test_exception.wasPending(); + assert.strictEqual(exception_pending, true, + 'Exception not pending as expected,' + + ` .wasPending() returned ${exception_pending}`); + + // Test that the native side does not capture a non-existing exception + returnedError = test_exception.returnException(common.mustCall()); + assert.strictEqual(returnedError, undefined, + 'Returned error should be undefined when no exception is' + + ` thrown, but ${returnedError} was passed`); } -let caughtError; - -// Test that the native side successfully captures the exception -let returnedError = test_exception.returnException(throwTheError); -assert.strictEqual(theError, returnedError); - -// Test that the native side passes the exception through -assert.throws( - () => { - test_exception.allowException(throwTheError); - }, - function(err) { - return err === theError; - }, - 'Thrown exception was allowed to pass through unhindered' -); - -// Test that the exception thrown above was marked as pending -// before it was handled on the JS side -assert.strictEqual(test_exception.wasPending(), true, - 'VM was marked as having an exception pending' + - ' when it was allowed through'); - -// Test that the native side does not capture a non-existing exception -returnedError = test_exception.returnException(common.mustCall()); -assert.strictEqual(undefined, returnedError, - 'Returned error should be undefined when no exception is' + - ` thrown, but ${returnedError} was passed`); - -// Test that no exception appears that was not thrown by us -try { - test_exception.allowException(common.mustCall()); -} catch (anError) { - caughtError = anError; + +{ + // Test that no exception appears that was not thrown by us + let caughtError; + try { + test_exception.allowException(common.mustCall()); + } catch (anError) { + caughtError = anError; + } + assert.strictEqual(caughtError, undefined, + 'No exception originated on the native side, but' + + ` ${caughtError} was passed`); + + // Test that the exception state remains clear when no exception is thrown + const exception_pending = test_exception.wasPending(); + assert.strictEqual(exception_pending, false, + 'Exception state did not remain clear as expected,' + + ` .wasPending() returned ${exception_pending}`); +} + +// Make sure that exceptions that occur during finalization are propagated. +function testFinalize(binding) { + let x = test_exception[binding](); + x = null; + assert.throws(() => { global.gc(); }, /Error during Finalize/); + + // To assuage the linter's concerns. + (function() {})(x); } -assert.strictEqual(undefined, caughtError, - 'No exception originated on the native side, but' + - ` ${caughtError} was passed`); - -// Test that the exception state remains clear when no exception is thrown -assert.strictEqual(test_exception.wasPending(), false, - 'VM was not marked as having an exception pending' + - ' when none was allowed through'); +testFinalize('createExternal'); +testFinalize('createExternalBuffer'); diff --git a/test/addons-napi/test_exception/test_exception.c b/test/addons-napi/test_exception/test_exception.c index 8b664210e902a5..61116d0603bfae 100644 --- a/test/addons-napi/test_exception/test_exception.c +++ b/test/addons-napi/test_exception/test_exception.c @@ -3,7 +3,7 @@ static bool exceptionWasPending = false; -napi_value returnException(napi_env env, napi_callback_info info) { +static napi_value returnException(napi_env env, napi_callback_info info) { size_t argc = 1; napi_value args[1]; NAPI_CALL(env, napi_get_cb_info(env, info, &argc, args, NULL, NULL)); @@ -22,7 +22,7 @@ napi_value returnException(napi_env env, napi_callback_info info) { return NULL; } -napi_value allowException(napi_env env, napi_callback_info info) { +static napi_value allowException(napi_env env, napi_callback_info info) { size_t argc = 1; napi_value args[1]; NAPI_CALL(env, napi_get_cb_info(env, info, &argc, args, NULL, NULL)); @@ -38,23 +38,55 @@ napi_value allowException(napi_env env, napi_callback_info info) { return NULL; } -napi_value wasPending(napi_env env, napi_callback_info info) { +static napi_value wasPending(napi_env env, napi_callback_info info) { napi_value result; NAPI_CALL(env, napi_get_boolean(env, exceptionWasPending, &result)); return result; } -napi_value Init(napi_env env, napi_value exports) { +static void finalizer(napi_env env, void *data, void *hint) { + NAPI_CALL_RETURN_VOID(env, + napi_throw_error(env, NULL, "Error during Finalize")); +} + +static napi_value createExternal(napi_env env, napi_callback_info info) { + napi_value external; + + NAPI_CALL(env, + napi_create_external(env, NULL, finalizer, NULL, &external)); + + return external; +} + +static char buffer_data[12]; + +static napi_value createExternalBuffer(napi_env env, napi_callback_info info) { + napi_value buffer; + NAPI_CALL(env, napi_create_external_buffer(env, sizeof(buffer_data), + buffer_data, finalizer, NULL, &buffer)); + return buffer; +} + +static napi_value Init(napi_env env, napi_value exports) { napi_property_descriptor descriptors[] = { DECLARE_NAPI_PROPERTY("returnException", returnException), DECLARE_NAPI_PROPERTY("allowException", allowException), DECLARE_NAPI_PROPERTY("wasPending", wasPending), + DECLARE_NAPI_PROPERTY("createExternal", createExternal), + DECLARE_NAPI_PROPERTY("createExternalBuffer", createExternalBuffer), }; - NAPI_CALL(env, napi_define_properties( env, exports, sizeof(descriptors) / sizeof(*descriptors), descriptors)); + napi_value error, code, message; + NAPI_CALL(env, napi_create_string_utf8(env, "Error during Init", + NAPI_AUTO_LENGTH, &message)); + NAPI_CALL(env, napi_create_string_utf8(env, "", NAPI_AUTO_LENGTH, &code)); + NAPI_CALL(env, napi_create_error(env, code, message, &error)); + NAPI_CALL(env, napi_set_named_property(env, error, "binding", exports)); + NAPI_CALL(env, napi_throw(env, error)); + return exports; } diff --git a/test/addons-napi/test_fatal_exception/binding.gyp b/test/addons-napi/test_fatal_exception/binding.gyp new file mode 100644 index 00000000000000..f4dc0a71ea2817 --- /dev/null +++ b/test/addons-napi/test_fatal_exception/binding.gyp @@ -0,0 +1,8 @@ +{ + "targets": [ + { + "target_name": "test_fatal_exception", + "sources": [ "test_fatal_exception.c" ] + } + ] +} diff --git a/test/addons-napi/test_fatal_exception/test.js b/test/addons-napi/test_fatal_exception/test.js new file mode 100644 index 00000000000000..f02b9bce1e8169 --- /dev/null +++ b/test/addons-napi/test_fatal_exception/test.js @@ -0,0 +1,11 @@ +'use strict'; +const common = require('../../common'); +const assert = require('assert'); +const test_fatal = require(`./build/${common.buildType}/test_fatal_exception`); + +process.on('uncaughtException', common.mustCall(function(err) { + assert.strictEqual(err.message, 'fatal error'); +})); + +const err = new Error('fatal error'); +test_fatal.Test(err); diff --git a/test/addons-napi/test_fatal_exception/test_fatal_exception.c b/test/addons-napi/test_fatal_exception/test_fatal_exception.c new file mode 100644 index 00000000000000..fd81c56d856db8 --- /dev/null +++ b/test/addons-napi/test_fatal_exception/test_fatal_exception.c @@ -0,0 +1,26 @@ +#include +#include "../common.h" + +napi_value Test(napi_env env, napi_callback_info info) { + napi_value err; + size_t argc = 1; + + NAPI_CALL(env, napi_get_cb_info(env, info, &argc, &err, NULL, NULL)); + + NAPI_CALL(env, napi_fatal_exception(env, err)); + + return NULL; +} + +napi_value Init(napi_env env, napi_value exports) { + napi_property_descriptor properties[] = { + DECLARE_NAPI_PROPERTY("Test", Test), + }; + + NAPI_CALL(env, napi_define_properties( + env, exports, sizeof(properties) / sizeof(*properties), properties)); + + return exports; +} + +NAPI_MODULE(NODE_GYP_MODULE_NAME, Init) diff --git a/test/addons-napi/test_general/test.js b/test/addons-napi/test_general/test.js index ee6618c8121289..4faf508d5db145 100644 --- a/test/addons-napi/test_general/test.js +++ b/test/addons-napi/test_general/test.js @@ -28,13 +28,13 @@ assert.strictEqual(test_general.testGetPrototype(baseObject), Object.getPrototypeOf(baseObject)); assert.strictEqual(test_general.testGetPrototype(extendedObject), Object.getPrototypeOf(extendedObject)); -assert.ok(test_general.testGetPrototype(baseObject) !== - test_general.testGetPrototype(extendedObject), - 'Prototypes for base and extended should be different'); +// Prototypes for base and extended should be different. +assert.notStrictEqual(test_general.testGetPrototype(baseObject), + test_general.testGetPrototype(extendedObject)); // test version management functions -// expected version is currently 1 -assert.strictEqual(test_general.testGetVersion(), 2); +// expected version is currently 3 +assert.strictEqual(test_general.testGetVersion(), 3); const [ major, minor, patch, release ] = test_general.testGetNodeVersion(); assert.strictEqual(process.version.split('-')[0], @@ -63,24 +63,25 @@ let w = {}; test_general.wrap(w); w = null; global.gc(); -assert.strictEqual(test_general.derefItemWasCalled(), true, +const derefItemWasCalled = test_general.derefItemWasCalled(); +assert.strictEqual(derefItemWasCalled, true, 'deref_item() was called upon garbage collecting a ' + - 'wrapped object'); + 'wrapped object. test_general.derefItemWasCalled() ' + + `returned ${derefItemWasCalled}`); + // Assert that wrapping twice fails. const x = {}; test_general.wrap(x); -assert.throws(function() { - test_general.wrap(x); -}, Error); +common.expectsError(() => test_general.wrap(x), + { type: Error, message: 'Invalid argument' }); // Ensure that wrapping, removing the wrap, and then wrapping again works. const y = {}; test_general.wrap(y); test_general.removeWrap(y); -assert.doesNotThrow(function() { - test_general.wrap(y); -}, Error, 'Wrapping twice succeeds if a remove_wrap() separates the instances'); +// Wrapping twice succeeds if a remove_wrap() separates the instances +assert.doesNotThrow(() => test_general.wrap(y)); // Ensure that removing a wrap and garbage collecting does not fire the // finalize callback. @@ -89,8 +90,11 @@ test_general.testFinalizeWrap(z); test_general.removeWrap(z); z = null; global.gc(); -assert.strictEqual(test_general.finalizeWasCalled(), false, - 'finalize callback was not called upon garbage collection'); +const finalizeWasCalled = test_general.finalizeWasCalled(); +assert.strictEqual(finalizeWasCalled, false, + 'finalize callback was not called upon garbage collection.' + + ' test_general.finalizeWasCalled() ' + + `returned ${finalizeWasCalled}`); // test napi_adjust_external_memory const adjustedValue = test_general.testAdjustExternalMemory(); diff --git a/test/addons-napi/test_make_callback/binding.cc b/test/addons-napi/test_make_callback/binding.c similarity index 80% rename from test/addons-napi/test_make_callback/binding.cc rename to test/addons-napi/test_make_callback/binding.c index 952dfcc1cb5bec..23750f56b838fc 100644 --- a/test/addons-napi/test_make_callback/binding.cc +++ b/test/addons-napi/test_make_callback/binding.c @@ -1,13 +1,13 @@ #include #include "../common.h" -#include -namespace { +#define MAX_ARGUMENTS 10 +static napi_value MakeCallback(napi_env env, napi_callback_info info) { - const int kMaxArgs = 10; - size_t argc = kMaxArgs; - napi_value args[kMaxArgs]; + size_t argc = MAX_ARGUMENTS; + size_t n; + napi_value args[MAX_ARGUMENTS]; // NOLINTNEXTLINE (readability/null_usage) NAPI_CALL(env, napi_get_cb_info(env, info, &argc, args, NULL, NULL)); @@ -16,9 +16,9 @@ napi_value MakeCallback(napi_env env, napi_callback_info info) { napi_value recv = args[0]; napi_value func = args[1]; - std::vector argv; - for (size_t n = 2; n < argc; n += 1) { - argv.push_back(args[n]); + napi_value argv[MAX_ARGUMENTS - 2]; + for (n = 2; n < argc; n += 1) { + argv[n - 2] = args[n]; } napi_valuetype func_type; @@ -35,7 +35,7 @@ napi_value MakeCallback(napi_env env, napi_callback_info info) { napi_value result; if (func_type == napi_function) { NAPI_CALL(env, napi_make_callback( - env, context, recv, func, argv.size(), argv.data(), &result)); + env, context, recv, func, argc - 2, argv, &result)); } else { NAPI_ASSERT(env, false, "Unexpected argument type"); } @@ -45,6 +45,7 @@ napi_value MakeCallback(napi_env env, napi_callback_info info) { return result; } +static napi_value Init(napi_env env, napi_value exports) { napi_value fn; NAPI_CALL(env, napi_create_function( @@ -54,6 +55,4 @@ napi_value Init(napi_env env, napi_value exports) { return exports; } -} // namespace - NAPI_MODULE(NODE_GYP_MODULE_NAME, Init) diff --git a/test/addons-napi/test_make_callback/binding.gyp b/test/addons-napi/test_make_callback/binding.gyp index 7ede63d94a0d77..23daf507916ff6 100644 --- a/test/addons-napi/test_make_callback/binding.gyp +++ b/test/addons-napi/test_make_callback/binding.gyp @@ -3,7 +3,7 @@ { 'target_name': 'binding', 'defines': [ 'V8_DEPRECATION_WARNINGS=1' ], - 'sources': [ 'binding.cc' ] + 'sources': [ 'binding.c' ] } ] } diff --git a/test/addons-napi/test_make_callback/test-async-hooks.js b/test/addons-napi/test_make_callback/test-async-hooks.js new file mode 100644 index 00000000000000..755a2389c68591 --- /dev/null +++ b/test/addons-napi/test_make_callback/test-async-hooks.js @@ -0,0 +1,44 @@ +'use strict'; + +const common = require('../../common'); +const assert = require('assert'); +const async_hooks = require('async_hooks'); +const binding = require(`./build/${common.buildType}/binding`); +const makeCallback = binding.makeCallback; + +// Check async hooks integration using async context. +const hook_result = { + id: null, + init_called: false, + before_called: false, + after_called: false, + destroy_called: false, +}; +const test_hook = async_hooks.createHook({ + init: (id, type) => { + if (type === 'test') { + hook_result.id = id; + hook_result.init_called = true; + } + }, + before: (id) => { + if (id === hook_result.id) hook_result.before_called = true; + }, + after: (id) => { + if (id === hook_result.id) hook_result.after_called = true; + }, + destroy: (id) => { + if (id === hook_result.id) hook_result.destroy_called = true; + }, +}); + +test_hook.enable(); +makeCallback(process, function() {}); + +assert.strictEqual(hook_result.init_called, true); +assert.strictEqual(hook_result.before_called, true); +assert.strictEqual(hook_result.after_called, true); +setImmediate(() => { + assert.strictEqual(hook_result.destroy_called, true); + test_hook.disable(); +}); diff --git a/test/addons-napi/test_make_callback/test.js b/test/addons-napi/test_make_callback/test.js index 0e94caf1d975f2..56e2b3f4e2b6c6 100644 --- a/test/addons-napi/test_make_callback/test.js +++ b/test/addons-napi/test_make_callback/test.js @@ -2,7 +2,6 @@ const common = require('../../common'); const assert = require('assert'); -const async_hooks = require('async_hooks'); const vm = require('vm'); const binding = require(`./build/${common.buildType}/binding`); const makeCallback = binding.makeCallback; @@ -81,40 +80,3 @@ function endpoint($Object) { } assert.strictEqual(Object, makeCallback(process, forward, endpoint)); - -// Check async hooks integration using async context. -const hook_result = { - id: null, - init_called: false, - before_called: false, - after_called: false, - destroy_called: false, -}; -const test_hook = async_hooks.createHook({ - init: (id, type) => { - if (type === 'test') { - hook_result.id = id; - hook_result.init_called = true; - } - }, - before: (id) => { - if (id === hook_result.id) hook_result.before_called = true; - }, - after: (id) => { - if (id === hook_result.id) hook_result.after_called = true; - }, - destroy: (id) => { - if (id === hook_result.id) hook_result.destroy_called = true; - }, -}); - -test_hook.enable(); -makeCallback(process, function() {}); - -assert.strictEqual(hook_result.init_called, true); -assert.strictEqual(hook_result.before_called, true); -assert.strictEqual(hook_result.after_called, true); -setImmediate(() => { - assert.strictEqual(hook_result.destroy_called, true); - test_hook.disable(); -}); diff --git a/test/addons-napi/test_make_callback_recurse/binding.cc b/test/addons-napi/test_make_callback_recurse/binding.cc index b99c583d31d9f9..bcf81d14e86e20 100644 --- a/test/addons-napi/test_make_callback_recurse/binding.cc +++ b/test/addons-napi/test_make_callback_recurse/binding.cc @@ -13,9 +13,22 @@ napi_value MakeCallback(napi_env env, napi_callback_info info) { napi_value recv = args[0]; napi_value func = args[1]; - napi_make_callback(env, nullptr /* async_context */, + napi_status status = napi_make_callback(env, nullptr /* async_context */, recv, func, 0 /* argc */, nullptr /* argv */, nullptr /* result */); + bool isExceptionPending; + NAPI_CALL(env, napi_is_exception_pending(env, &isExceptionPending)); + if (isExceptionPending && !(status == napi_pending_exception)) { + // if there is an exception pending we don't expect any + // other error + napi_value pending_error; + status = napi_get_and_clear_last_exception(env, &pending_error); + NAPI_CALL(env, + napi_throw_error((env), + nullptr, + "error when only pending exception expected")); + } + return recv; } diff --git a/test/addons-napi/test_new_target/binding.c b/test/addons-napi/test_new_target/binding.c new file mode 100644 index 00000000000000..a74d4bb2f877be --- /dev/null +++ b/test/addons-napi/test_new_target/binding.c @@ -0,0 +1,69 @@ +#include +#include "../common.h" + +napi_value BaseClass(napi_env env, napi_callback_info info) { + napi_value newTargetArg; + NAPI_CALL(env, napi_get_new_target(env, info, &newTargetArg)); + napi_value thisArg; + NAPI_CALL(env, napi_get_cb_info(env, info, NULL, NULL, &thisArg, NULL)); + napi_value undefined; + NAPI_CALL(env, napi_get_undefined(env, &undefined)); + + // this !== new.target since we are being invoked through super() + bool result; + NAPI_CALL(env, napi_strict_equals(env, newTargetArg, thisArg, &result)); + NAPI_ASSERT(env, !result, "this !== new.target"); + + // new.target !== undefined because we should be called as a new expression + NAPI_ASSERT(env, newTargetArg != NULL, "newTargetArg != NULL"); + NAPI_CALL(env, napi_strict_equals(env, newTargetArg, undefined, &result)); + NAPI_ASSERT(env, !result, "new.target !== undefined"); + + return thisArg; +} + +napi_value Constructor(napi_env env, napi_callback_info info) { + bool result; + napi_value newTargetArg; + NAPI_CALL(env, napi_get_new_target(env, info, &newTargetArg)); + size_t argc = 1; + napi_value argv; + napi_value thisArg; + NAPI_CALL(env, napi_get_cb_info(env, info, &argc, &argv, &thisArg, NULL)); + napi_value undefined; + NAPI_CALL(env, napi_get_undefined(env, &undefined)); + + // new.target !== undefined because we should be called as a new expression + NAPI_ASSERT(env, newTargetArg != NULL, "newTargetArg != NULL"); + NAPI_CALL(env, napi_strict_equals(env, newTargetArg, undefined, &result)); + NAPI_ASSERT(env, !result, "new.target !== undefined"); + + // arguments[0] should be Constructor itself (test harness passed it) + NAPI_CALL(env, napi_strict_equals(env, newTargetArg, argv, &result)); + NAPI_ASSERT(env, result, "new.target === Constructor"); + + return thisArg; +} + +napi_value OrdinaryFunction(napi_env env, napi_callback_info info) { + napi_value newTargetArg; + NAPI_CALL(env, napi_get_new_target(env, info, &newTargetArg)); + + NAPI_ASSERT(env, newTargetArg == NULL, "newTargetArg == NULL"); + + napi_value _true; + NAPI_CALL(env, napi_get_boolean(env, true, &_true)); + return _true; +} + +napi_value Init(napi_env env, napi_value exports) { + const napi_property_descriptor desc[] = { + DECLARE_NAPI_PROPERTY("BaseClass", BaseClass), + DECLARE_NAPI_PROPERTY("OrdinaryFunction", OrdinaryFunction), + DECLARE_NAPI_PROPERTY("Constructor", Constructor) + }; + NAPI_CALL(env, napi_define_properties(env, exports, 3, desc)); + return exports; +} + +NAPI_MODULE(NODE_GYP_MODULE_NAME, Init) diff --git a/test/addons-napi/test_new_target/binding.gyp b/test/addons-napi/test_new_target/binding.gyp new file mode 100644 index 00000000000000..23daf507916ff6 --- /dev/null +++ b/test/addons-napi/test_new_target/binding.gyp @@ -0,0 +1,9 @@ +{ + 'targets': [ + { + 'target_name': 'binding', + 'defines': [ 'V8_DEPRECATION_WARNINGS=1' ], + 'sources': [ 'binding.c' ] + } + ] +} diff --git a/test/addons-napi/test_new_target/test.js b/test/addons-napi/test_new_target/test.js new file mode 100644 index 00000000000000..702e8ca8b4387d --- /dev/null +++ b/test/addons-napi/test_new_target/test.js @@ -0,0 +1,21 @@ +'use strict'; + +const common = require('../../common'); +const assert = require('assert'); +const binding = require(`./build/${common.buildType}/binding`); + +class Class extends binding.BaseClass { + constructor() { + super(); + this.method(); + } + method() { + this.ok = true; + } +} + +assert.ok(new Class() instanceof binding.BaseClass); +assert.ok(new Class().ok); +assert.ok(binding.OrdinaryFunction()); +assert.ok( + new binding.Constructor(binding.Constructor) instanceof binding.Constructor); diff --git a/test/addons-napi/test_number/test.js b/test/addons-napi/test_number/test.js index b8ce6b7f995b66..3dbbb64932e502 100644 --- a/test/addons-napi/test_number/test.js +++ b/test/addons-napi/test_number/test.js @@ -5,43 +5,113 @@ const test_number = require(`./build/${common.buildType}/test_number`); // testing api calls for number -assert.strictEqual(0, test_number.Test(0)); -assert.strictEqual(1, test_number.Test(1)); -assert.strictEqual(-1, test_number.Test(-1)); -assert.strictEqual(100, test_number.Test(100)); -assert.strictEqual(2121, test_number.Test(2121)); -assert.strictEqual(-1233, test_number.Test(-1233)); -assert.strictEqual(986583, test_number.Test(986583)); -assert.strictEqual(-976675, test_number.Test(-976675)); +function testNumber(num) { + assert.strictEqual(num, test_number.Test(num)); +} -const num1 = 98765432213456789876546896323445679887645323232436587988766545658; -assert.strictEqual(num1, test_number.Test(num1)); +testNumber(0); +testNumber(-0); +testNumber(1); +testNumber(-1); +testNumber(100); +testNumber(2121); +testNumber(-1233); +testNumber(986583); +testNumber(-976675); -const num2 = -4350987086545760976737453646576078997096876957864353245245769809; -assert.strictEqual(num2, test_number.Test(num2)); +testNumber( + 98765432213456789876546896323445679887645323232436587988766545658); +testNumber( + -4350987086545760976737453646576078997096876957864353245245769809); +testNumber(Number.MIN_SAFE_INTEGER); +testNumber(Number.MAX_SAFE_INTEGER); +testNumber(Number.MAX_SAFE_INTEGER + 10); -const num3 = Number.MAX_SAFE_INTEGER; -assert.strictEqual(num3, test_number.Test(num3)); +testNumber(Number.MIN_VALUE); +testNumber(Number.MAX_VALUE); +testNumber(Number.MAX_VALUE + 10); -const num4 = Number.MAX_SAFE_INTEGER + 10; -assert.strictEqual(num4, test_number.Test(num4)); +testNumber(Number.POSITIVE_INFINITY); +testNumber(Number.NEGATIVE_INFINITY); +assert(Object.is(NaN, test_number.Test(NaN))); -const num5 = Number.MAX_VALUE; -assert.strictEqual(num5, test_number.Test(num5)); +// validate documented behavior when value is retrieved as 32-bit integer with +// `napi_get_value_int32` +function testInt32(input, expected = input) { + assert.strictEqual(expected, test_number.TestInt32Truncation(input)); +} -const num6 = Number.MAX_VALUE + 10; -assert.strictEqual(num6, test_number.Test(num6)); +// Test zero +testInt32(0.0, 0); +testInt32(-0.0, 0); -const num7 = Number.POSITIVE_INFINITY; -assert.strictEqual(num7, test_number.Test(num7)); +// Test min/max int32 range +testInt32(-Math.pow(2, 31)); +testInt32(Math.pow(2, 31) - 1); -const num8 = Number.NEGATIVE_INFINITY; -assert.strictEqual(num8, test_number.Test(num8)); +// Test overflow scenarios +testInt32(4294967297, 1); +testInt32(4294967296, 0); +testInt32(4294967295, -1); +testInt32(4294967296 * 5 + 3, 3); +// Test min/max safe integer range +testInt32(Number.MIN_SAFE_INTEGER, 1); +testInt32(Number.MAX_SAFE_INTEGER, -1); -// validate documented behaviour when value is retrieved -// as 32 bit integer with napi_get_value_int32 -assert.strictEqual(1, test_number.TestInt32Truncation(4294967297)); -assert.strictEqual(0, test_number.TestInt32Truncation(4294967296)); -assert.strictEqual(-1, test_number.TestInt32Truncation(4294967295)); -assert.strictEqual(3, test_number.TestInt32Truncation(4294967296 * 5 + 3)); +// Test within int64_t range (with precision loss) +testInt32(-Math.pow(2, 63) + (Math.pow(2, 9) + 1), 1024); +testInt32(Math.pow(2, 63) - (Math.pow(2, 9) + 1), -1024); + +// Test min/max double value +testInt32(-Number.MIN_VALUE, 0); +testInt32(Number.MIN_VALUE, 0); +testInt32(-Number.MAX_VALUE, 0); +testInt32(Number.MAX_VALUE, 0); + +// Test outside int64_t range +testInt32(-Math.pow(2, 63) + (Math.pow(2, 9)), 0); +testInt32(Math.pow(2, 63) - (Math.pow(2, 9)), 0); + +// Test non-finite numbers +testInt32(Number.POSITIVE_INFINITY, 0); +testInt32(Number.NEGATIVE_INFINITY, 0); +testInt32(Number.NaN, 0); + +// validate documented behavior when value is retrieved as 64-bit integer with +// `napi_get_value_int64` +function testInt64(input, expected = input) { + assert.strictEqual(expected, test_number.TestInt64Truncation(input)); +} + +// Both V8 and ChakraCore return a sentinel value of `0x8000000000000000` when +// the conversion goes out of range, but V8 treats it as unsigned in some cases. +const RANGEERROR_POSITIVE = Math.pow(2, 63); +const RANGEERROR_NEGATIVE = -Math.pow(2, 63); + +// Test zero +testInt64(0.0, 0); +testInt64(-0.0, 0); + +// Test min/max safe integer range +testInt64(Number.MIN_SAFE_INTEGER); +testInt64(Number.MAX_SAFE_INTEGER); + +// Test within int64_t range (with precision loss) +testInt64(-Math.pow(2, 63) + (Math.pow(2, 9) + 1)); +testInt64(Math.pow(2, 63) - (Math.pow(2, 9) + 1)); + +// Test min/max double value +testInt64(-Number.MIN_VALUE, 0); +testInt64(Number.MIN_VALUE, 0); +testInt64(-Number.MAX_VALUE, RANGEERROR_NEGATIVE); +testInt64(Number.MAX_VALUE, RANGEERROR_POSITIVE); + +// Test outside int64_t range +testInt64(-Math.pow(2, 63) + (Math.pow(2, 9)), RANGEERROR_NEGATIVE); +testInt64(Math.pow(2, 63) - (Math.pow(2, 9)), RANGEERROR_POSITIVE); + +// Test non-finite numbers +testInt64(Number.POSITIVE_INFINITY, 0); +testInt64(Number.NEGATIVE_INFINITY, 0); +testInt64(Number.NaN, 0); diff --git a/test/addons-napi/test_number/test_number.c b/test/addons-napi/test_number/test_number.c index 3707f1ee57d713..a1a70950083324 100644 --- a/test/addons-napi/test_number/test_number.c +++ b/test/addons-napi/test_number/test_number.c @@ -45,10 +45,33 @@ napi_value TestInt32Truncation(napi_env env, napi_callback_info info) { return output; } +napi_value TestInt64Truncation(napi_env env, napi_callback_info info) { + size_t argc = 1; + napi_value args[1]; + NAPI_CALL(env, napi_get_cb_info(env, info, &argc, args, NULL, NULL)); + + NAPI_ASSERT(env, argc >= 1, "Wrong number of arguments"); + + napi_valuetype valuetype0; + NAPI_CALL(env, napi_typeof(env, args[0], &valuetype0)); + + NAPI_ASSERT(env, valuetype0 == napi_number, + "Wrong type of arguments. Expects a number as first argument."); + + int64_t input; + NAPI_CALL(env, napi_get_value_int64(env, args[0], &input)); + + napi_value output; + NAPI_CALL(env, napi_create_int64(env, input, &output)); + + return output; +} + napi_value Init(napi_env env, napi_value exports) { napi_property_descriptor descriptors[] = { DECLARE_NAPI_PROPERTY("Test", Test), DECLARE_NAPI_PROPERTY("TestInt32Truncation", TestInt32Truncation), + DECLARE_NAPI_PROPERTY("TestInt64Truncation", TestInt64Truncation), }; NAPI_CALL(env, napi_define_properties( diff --git a/test/addons-napi/test_object/test_object.c b/test/addons-napi/test_object/test_object.c index 49a90dd3f99f45..ccf1573114a6f1 100644 --- a/test/addons-napi/test_object/test_object.c +++ b/test/addons-napi/test_object/test_object.c @@ -1,7 +1,6 @@ #include #include "../common.h" #include -#include static int test_value = 3; @@ -199,9 +198,7 @@ napi_value Wrap(napi_env env, napi_callback_info info) { napi_value arg; NAPI_CALL(env, napi_get_cb_info(env, info, &argc, &arg, NULL, NULL)); - int32_t* data = malloc(sizeof(int32_t)); - *data = test_value; - NAPI_CALL(env, napi_wrap(env, arg, data, NULL, NULL, NULL)); + NAPI_CALL(env, napi_wrap(env, arg, &test_value, NULL, NULL, NULL)); return NULL; } diff --git a/test/addons-napi/test_promise/test.js b/test/addons-napi/test_promise/test.js index b43ecd87363e44..477ceb75969d51 100644 --- a/test/addons-napi/test_promise/test.js +++ b/test/addons-napi/test_promise/test.js @@ -7,6 +7,8 @@ const common = require('../../common'); const assert = require('assert'); const test_promise = require(`./build/${common.buildType}/test_promise`); +common.crashOnUnhandledRejection(); + // A resolution { const expected_result = 42; @@ -43,8 +45,17 @@ const test_promise = require(`./build/${common.buildType}/test_promise`); test_promise.concludeCurrentPromise(Promise.resolve('chained answer'), true); } -assert.strictEqual(test_promise.isPromise(test_promise.createPromise()), true); -assert.strictEqual(test_promise.isPromise(Promise.reject(-1)), true); +const promiseTypeTestPromise = test_promise.createPromise(); +assert.strictEqual(test_promise.isPromise(promiseTypeTestPromise), true); +test_promise.concludeCurrentPromise(undefined, true); + +const rejectPromise = Promise.reject(-1); +const expected_reason = -1; +assert.strictEqual(test_promise.isPromise(rejectPromise), true); +rejectPromise.catch((reason) => { + assert.strictEqual(reason, expected_reason); +}); + assert.strictEqual(test_promise.isPromise(2.4), false); assert.strictEqual(test_promise.isPromise('I promise!'), false); assert.strictEqual(test_promise.isPromise(undefined), false); diff --git a/test/addons-napi/test_reference/test_reference.c b/test/addons-napi/test_reference/test_reference.c index f0ede447814e17..f34adc6693b6fe 100644 --- a/test/addons-napi/test_reference/test_reference.c +++ b/test/addons-napi/test_reference/test_reference.c @@ -1,6 +1,5 @@ #include #include "../common.h" -#include static int test_value = 1; static int finalize_count = 0; @@ -13,7 +12,9 @@ napi_value GetFinalizeCount(napi_env env, napi_callback_info info) { } void FinalizeExternal(napi_env env, void* data, void* hint) { - free(data); + int *actual_value = data; + NAPI_ASSERT_RETURN_VOID(env, actual_value == &test_value, + "The correct pointer was passed to the finalizer"); finalize_count++; } @@ -33,13 +34,10 @@ napi_value CreateExternal(napi_env env, napi_callback_info info) { } napi_value CreateExternalWithFinalize(napi_env env, napi_callback_info info) { - int* data = malloc(sizeof(int)); - *data = test_value; - napi_value result; NAPI_CALL(env, napi_create_external(env, - data, + &test_value, FinalizeExternal, NULL, /* finalize_hint */ &result)); diff --git a/test/addons-napi/test_symbol/test_symbol.c b/test/addons-napi/test_symbol/test_symbol.c index 6acc4c55db0b2b..c91b6ae54f4932 100644 --- a/test/addons-napi/test_symbol/test_symbol.c +++ b/test/addons-napi/test_symbol/test_symbol.c @@ -12,7 +12,7 @@ napi_value Test(napi_env env, napi_callback_info info) { NAPI_CALL(env, napi_typeof(env, args[0], &valuetype)); NAPI_ASSERT(env, valuetype == napi_symbol, - "Wrong type of argments. Expects a symbol."); + "Wrong type of arguments. Expects a symbol."); char buffer[128]; size_t buffer_size = 128; diff --git a/test/addons-napi/test_typedarray/test.js b/test/addons-napi/test_typedarray/test.js index 27ef054fe4635e..4c139d92200fe3 100644 --- a/test/addons-napi/test_typedarray/test.js +++ b/test/addons-napi/test_typedarray/test.js @@ -55,3 +55,21 @@ arrayTypes.forEach((currentType) => { assert.notStrictEqual(theArray, template); assert.strictEqual(theArray.buffer, buffer); }); + +arrayTypes.forEach((currentType) => { + const template = Reflect.construct(currentType, buffer); + assert.throws(() => { + test_typedarray.CreateTypedArray(template, buffer, 0, 136); + }, RangeError); +}); + +const nonByteArrayTypes = [ Int16Array, Uint16Array, Int32Array, Uint32Array, + Float32Array, Float64Array ]; +nonByteArrayTypes.forEach((currentType) => { + const template = Reflect.construct(currentType, buffer); + assert.throws(() => { + test_typedarray.CreateTypedArray(template, buffer, + currentType.BYTES_PER_ELEMENT + 1, 1); + console.log(`start of offset ${currentType}`); + }, RangeError); +}); diff --git a/test/addons-napi/test_typedarray/test_typedarray.c b/test/addons-napi/test_typedarray/test_typedarray.c index 0325faedd09f8e..2758a6f53298fe 100644 --- a/test/addons-napi/test_typedarray/test_typedarray.c +++ b/test/addons-napi/test_typedarray/test_typedarray.c @@ -13,20 +13,20 @@ napi_value Multiply(napi_env env, napi_callback_info info) { NAPI_CALL(env, napi_typeof(env, args[0], &valuetype0)); NAPI_ASSERT(env, valuetype0 == napi_object, - "Wrong type of argments. Expects a typed array as first argument."); + "Wrong type of arguments. Expects a typed array as first argument."); napi_value input_array = args[0]; bool is_typedarray; NAPI_CALL(env, napi_is_typedarray(env, input_array, &is_typedarray)); NAPI_ASSERT(env, is_typedarray, - "Wrong type of argments. Expects a typed array as first argument."); + "Wrong type of arguments. Expects a typed array as first argument."); napi_valuetype valuetype1; NAPI_CALL(env, napi_typeof(env, args[1], &valuetype1)); NAPI_ASSERT(env, valuetype1 == napi_number, - "Wrong type of argments. Expects a number as second argument."); + "Wrong type of arguments. Expects a number as second argument."); double multiplier; NAPI_CALL(env, napi_get_value_double(env, args[1], &multiplier)); @@ -97,37 +97,37 @@ napi_value External(napi_env env, napi_callback_info info) { } napi_value CreateTypedArray(napi_env env, napi_callback_info info) { - size_t argc = 2; - napi_value args[2]; + size_t argc = 4; + napi_value args[4]; NAPI_CALL(env, napi_get_cb_info(env, info, &argc, args, NULL, NULL)); - NAPI_ASSERT(env, argc == 2, "Wrong number of arguments"); + NAPI_ASSERT(env, argc == 2 || argc == 4, "Wrong number of arguments"); napi_value input_array = args[0]; napi_valuetype valuetype0; NAPI_CALL(env, napi_typeof(env, input_array, &valuetype0)); NAPI_ASSERT(env, valuetype0 == napi_object, - "Wrong type of argments. Expects a typed array as first argument."); + "Wrong type of arguments. Expects a typed array as first argument."); bool is_typedarray; NAPI_CALL(env, napi_is_typedarray(env, input_array, &is_typedarray)); NAPI_ASSERT(env, is_typedarray, - "Wrong type of argments. Expects a typed array as first argument."); + "Wrong type of arguments. Expects a typed array as first argument."); napi_valuetype valuetype1; napi_value input_buffer = args[1]; NAPI_CALL(env, napi_typeof(env, input_buffer, &valuetype1)); NAPI_ASSERT(env, valuetype1 == napi_object, - "Wrong type of argments. Expects an array buffer as second argument."); + "Wrong type of arguments. Expects an array buffer as second argument."); bool is_arraybuffer; NAPI_CALL(env, napi_is_arraybuffer(env, input_buffer, &is_arraybuffer)); NAPI_ASSERT(env, is_arraybuffer, - "Wrong type of argments. Expects an array buffer as second argument."); + "Wrong type of arguments. Expects an array buffer as second argument."); napi_typedarray_type type; napi_value in_array_buffer; @@ -136,6 +136,28 @@ napi_value CreateTypedArray(napi_env env, napi_callback_info info) { NAPI_CALL(env, napi_get_typedarray_info( env, input_array, &type, &length, NULL, &in_array_buffer, &byte_offset)); + if (argc == 4) { + napi_valuetype valuetype2; + NAPI_CALL(env, napi_typeof(env, args[2], &valuetype2)); + + NAPI_ASSERT(env, valuetype2 == napi_number, + "Wrong type of arguments. Expects a number as third argument."); + + uint32_t uint32_length; + NAPI_CALL(env, napi_get_value_uint32(env, args[2], &uint32_length)); + length = uint32_length; + + napi_valuetype valuetype3; + NAPI_CALL(env, napi_typeof(env, args[3], &valuetype3)); + + NAPI_ASSERT(env, valuetype3 == napi_number, + "Wrong type of arguments. Expects a number as third argument."); + + uint32_t uint32_byte_offset; + NAPI_CALL(env, napi_get_value_uint32(env, args[3], &uint32_byte_offset)); + byte_offset = uint32_byte_offset; + } + napi_value output_array; NAPI_CALL(env, napi_create_typedarray( env, type, length, input_buffer, byte_offset, &output_array)); diff --git a/test/addons-napi/test_uv_loop/test_uv_loop.cc b/test/addons-napi/test_uv_loop/test_uv_loop.cc index 44819f72bb6b9d..048e25af9ddfb3 100644 --- a/test/addons-napi/test_uv_loop/test_uv_loop.cc +++ b/test/addons-napi/test_uv_loop/test_uv_loop.cc @@ -24,6 +24,15 @@ void* SetImmediate(napi_env env, T&& cb) { assert(cb() != nullptr); }); + // Idle handle is needed only to stop the event loop from blocking in poll. + uv_idle_t* idle = new uv_idle_t; + uv_idle_init(loop, idle); + uv_idle_start(idle, [](uv_idle_t* idle) { + uv_close(reinterpret_cast(idle), [](uv_handle_t* handle) { + delete reinterpret_cast(handle); + }); + }); + return nullptr; } diff --git a/test/addons/load-long-path/test.js b/test/addons/load-long-path/test.js index accb90d2638274..ee09230676b5e8 100644 --- a/test/addons/load-long-path/test.js +++ b/test/addons/load-long-path/test.js @@ -7,12 +7,13 @@ const fs = require('fs'); const path = require('path'); const assert = require('assert'); -common.refreshTmpDir(); +const tmpdir = require('../../common/tmpdir'); +tmpdir.refresh(); // make a path that is more than 260 chars long. // Any given folder cannot have a name longer than 260 characters, // so create 10 nested folders each with 30 character long names. -let addonDestinationDir = path.resolve(common.tmpDir); +let addonDestinationDir = path.resolve(tmpdir.path); for (let i = 0; i < 10; i++) { addonDestinationDir = path.join(addonDestinationDir, 'x'.repeat(30)); diff --git a/test/addons/openssl-binding/binding.gyp b/test/addons/openssl-binding/binding.gyp index 425b38caa3f659..3bfe93350fe3bf 100644 --- a/test/addons/openssl-binding/binding.gyp +++ b/test/addons/openssl-binding/binding.gyp @@ -1,8 +1,4 @@ { - 'includes': ['../../../config.gypi'], - 'variables': { - 'node_target_type%': '', - }, 'targets': [ { 'target_name': 'binding', @@ -10,13 +6,6 @@ ['node_use_openssl=="true"', { 'sources': ['binding.cc'], 'include_dirs': ['../../../deps/openssl/openssl/include'], - 'conditions': [ - ['OS=="win" and node_target_type=="static_library"', { - 'libraries': [ - '../../../../$(Configuration)/lib/<(OPENSSL_PRODUCT)' - ], - }], - ], }] ] }, diff --git a/test/addons/symlinked-module/test.js b/test/addons/symlinked-module/test.js index d9455c027bd36b..53306399cb520b 100644 --- a/test/addons/symlinked-module/test.js +++ b/test/addons/symlinked-module/test.js @@ -12,10 +12,11 @@ const assert = require('assert'); // This test should pass in Node.js v4 and v5. This test will pass in Node.js // with https://github.com/nodejs/node/pull/5950 reverted. -common.refreshTmpDir(); +const tmpdir = require('../../common/tmpdir'); +tmpdir.refresh(); const addonPath = path.join(__dirname, 'build', common.buildType); -const addonLink = path.join(common.tmpDir, 'addon'); +const addonLink = path.join(tmpdir.path, 'addon'); try { fs.symlinkSync(addonPath, addonLink); diff --git a/test/addons/zlib-binding/binding.gyp b/test/addons/zlib-binding/binding.gyp index 24c3ae78a2440a..60a9bb82661820 100644 --- a/test/addons/zlib-binding/binding.gyp +++ b/test/addons/zlib-binding/binding.gyp @@ -1,22 +1,9 @@ { - 'includes': ['../../../config.gypi'], - 'variables': { - 'node_target_type%': '', - }, 'targets': [ { 'target_name': 'binding', 'sources': ['binding.cc'], 'include_dirs': ['../../../deps/zlib'], - 'conditions': [ - ['node_target_type=="static_library"', { - 'conditions': [ - ['OS=="win"', { - 'libraries': ['../../../../$(Configuration)/lib/zlib.lib'], - }], - ], - }], - ], }, ] } diff --git a/test/async-hooks/hook-checks.js b/test/async-hooks/hook-checks.js index 60f505a24a95de..2abed61555a158 100644 --- a/test/async-hooks/hook-checks.js +++ b/test/async-hooks/hook-checks.js @@ -11,7 +11,7 @@ require('../common'); * @param {Object} activity including timestamps for each life time event, * i.e. init, before ... * @param {Object} hooks the expected life time event invocations with a count - * indicating how oftn they should have been invoked, + * indicating how often they should have been invoked, * i.e. `{ init: 1, before: 2, after: 2 }` * @param {String} stage the name of the stage in the test at which we are * checking the invocations diff --git a/test/async-hooks/test-callback-error.js b/test/async-hooks/test-callback-error.js index 09eb2e0b478a6e..e50e069d64af24 100644 --- a/test/async-hooks/test-callback-error.js +++ b/test/async-hooks/test-callback-error.js @@ -94,7 +94,7 @@ assert.ok(!arg); assert.strictEqual(code, null); // most posix systems will show 'SIGABRT', but alpine34 does not if (signal !== 'SIGABRT') { - console.log(`parent recived signal ${signal}\nchild's stderr:`); + console.log(`parent received signal ${signal}\nchild's stderr:`); console.log(stderr); process.exit(1); } diff --git a/test/async-hooks/test-graph.pipeconnect.js b/test/async-hooks/test-graph.pipeconnect.js index b3ea5c6e4219e9..03d2902c835d48 100644 --- a/test/async-hooks/test-graph.pipeconnect.js +++ b/test/async-hooks/test-graph.pipeconnect.js @@ -6,7 +6,8 @@ const verifyGraph = require('./verify-graph'); const net = require('net'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); const hooks = initHooks(); hooks.enable(); diff --git a/test/async-hooks/test-graph.tcp.js b/test/async-hooks/test-graph.tcp.js index c2458ef1def769..c2253486ee651e 100644 --- a/test/async-hooks/test-graph.tcp.js +++ b/test/async-hooks/test-graph.tcp.js @@ -15,7 +15,7 @@ const server = net .createServer(common.mustCall(onconnection)) .on('listening', common.mustCall(onlistening)); -server.listen(common.PORT); +server.listen(0); net.connect({ port: server.address().port, host: '::1' }, common.mustCall(onconnected)); diff --git a/test/async-hooks/test-graph.tls-write.js b/test/async-hooks/test-graph.tls-write.js index 0c725d153d731b..26fe1ce41e955c 100644 --- a/test/async-hooks/test-graph.tls-write.js +++ b/test/async-hooks/test-graph.tls-write.js @@ -25,14 +25,14 @@ const server = tls }) .on('listening', common.mustCall(onlistening)) .on('secureConnection', common.mustCall(onsecureConnection)) - .listen(common.PORT); + .listen(0); function onlistening() { // // Creating client and connecting it to server // tls - .connect(common.PORT, { rejectUnauthorized: false }) + .connect(server.address().port, { rejectUnauthorized: false }) .on('secureConnect', common.mustCall(onsecureConnect)); } diff --git a/test/async-hooks/test-internal-nexttick-default-trigger.js b/test/async-hooks/test-internal-nexttick-default-trigger.js index ad352a8c147247..ed541868542206 100644 --- a/test/async-hooks/test-internal-nexttick-default-trigger.js +++ b/test/async-hooks/test-internal-nexttick-default-trigger.js @@ -3,7 +3,7 @@ const common = require('../common'); // This tests ensures that the triggerId of both the internal and external -// nexTick function sets the triggerAsyncId correctly. +// nextTick function sets the triggerAsyncId correctly. const assert = require('assert'); const async_hooks = require('async_hooks'); diff --git a/test/async-hooks/test-pipeconnectwrap.js b/test/async-hooks/test-pipeconnectwrap.js index 81a5abd42a3004..df4b8110e67ec4 100644 --- a/test/async-hooks/test-pipeconnectwrap.js +++ b/test/async-hooks/test-pipeconnectwrap.js @@ -8,7 +8,8 @@ const { checkInvocations } = require('./hook-checks'); const net = require('net'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); const hooks = initHooks(); hooks.enable(); @@ -53,7 +54,7 @@ function onlisten() { const awaitOnconnectCalls = new Set(['server', 'client']); function maybeOnconnect(source) { // both server and client must call onconnect. On most OS's waiting for - // the client is sufficient, but on CertOS 5 the sever needs to respond too. + // the client is sufficient, but on CentOS 5 the sever needs to respond too. assert.ok(awaitOnconnectCalls.size > 0); awaitOnconnectCalls.delete(source); if (awaitOnconnectCalls.size > 0) return; diff --git a/test/async-hooks/test-tcpwrap.js b/test/async-hooks/test-tcpwrap.js index e7d879caf70551..f3100aba0f0aaa 100644 --- a/test/async-hooks/test-tcpwrap.js +++ b/test/async-hooks/test-tcpwrap.js @@ -24,7 +24,7 @@ const server = net // Calling server.listen creates a TCPWRAP synchronously { - server.listen(common.PORT); + server.listen(0); const tcpsservers = hooks.activitiesOfTypes('TCPSERVERWRAP'); const tcpconnects = hooks.activitiesOfTypes('TCPCONNECTWRAP'); assert.strictEqual(tcpsservers.length, 1); diff --git a/test/async-hooks/test-writewrap.js b/test/async-hooks/test-writewrap.js index 65f7b6175fb63a..d349f635665ddd 100644 --- a/test/async-hooks/test-writewrap.js +++ b/test/async-hooks/test-writewrap.js @@ -23,7 +23,7 @@ const server = tls }) .on('listening', common.mustCall(onlistening)) .on('secureConnection', common.mustCall(onsecureConnection)) - .listen(common.PORT); + .listen(0); assert.strictEqual(hooks.activitiesOfTypes('WRITEWRAP').length, 0); @@ -33,7 +33,7 @@ function onlistening() { // Creating client and connecting it to server // tls - .connect(common.PORT, { rejectUnauthorized: false }) + .connect(server.address().port, { rejectUnauthorized: false }) .on('secureConnect', common.mustCall(onsecureConnect)); assert.strictEqual(hooks.activitiesOfTypes('WRITEWRAP').length, 0); diff --git a/test/async-hooks/verify-graph.js b/test/async-hooks/verify-graph.js index 451550d33fc8e1..95c55c15958b2c 100644 --- a/test/async-hooks/verify-graph.js +++ b/test/async-hooks/verify-graph.js @@ -32,7 +32,7 @@ function pruneTickObjects(activities) { foundTickObject = true; // point all triggerAsyncIds that point to the tickObject - // to its triggerAsyncId and findally remove it from the activities + // to its triggerAsyncId and finally remove it from the activities const tickObject = activities[tickObjectIdx]; const newTriggerId = tickObject.triggerAsyncId; const oldTriggerId = tickObject.uid; diff --git a/test/cctest/node_test_fixture.h b/test/cctest/node_test_fixture.h index 9de8d9e2f62ee2..f6f851973bb778 100644 --- a/test/cctest/node_test_fixture.h +++ b/test/cctest/node_test_fixture.h @@ -61,10 +61,6 @@ class NodeTestFixture : public ::testing::Test { protected: v8::Isolate* isolate_; - ~NodeTestFixture() { - TearDown(); - } - virtual void SetUp() { CHECK_EQ(0, uv_loop_init(¤t_loop)); platform_ = new node::NodePlatform(8, ¤t_loop, nullptr); @@ -76,7 +72,6 @@ class NodeTestFixture : public ::testing::Test { } virtual void TearDown() { - if (platform_ == nullptr) return; platform_->Shutdown(); while (uv_loop_alive(¤t_loop)) { uv_run(¤t_loop, UV_RUN_ONCE); diff --git a/test/cctest/test_aliased_buffer.cc b/test/cctest/test_aliased_buffer.cc index 8dfb02c181cffe..0eaddf773555db 100644 --- a/test/cctest/test_aliased_buffer.cc +++ b/test/cctest/test_aliased_buffer.cc @@ -5,16 +5,7 @@ using node::AliasedBuffer; -class AliasBufferTest : public NodeTestFixture { - protected: - void SetUp() override { - NodeTestFixture::SetUp(); - } - - void TearDown() override { - NodeTestFixture::TearDown(); - } -}; +class AliasBufferTest : public NodeTestFixture {}; template void CreateOracleValues(NativeT* buf, size_t count) { diff --git a/test/common/README.md b/test/common/README.md index 4026dc2de46b49..236b7e5515f042 100644 --- a/test/common/README.md +++ b/test/common/README.md @@ -10,6 +10,9 @@ This directory contains modules used to test the Node.js implementation. * [DNS module](#dns-module) * [Duplex pair helper](#duplex-pair-helper) * [Fixtures module](#fixtures-module) +* [HTTP2 module](#http2-module) +* [Internet module](#internet-module) +* [tmpdir module](#tmpdir-module) * [WPT module](#wpt-module) ## Benchmark Module @@ -259,6 +262,17 @@ fail. If `fn` is not provided, an empty function will be used. +### mustCallAsync([fn][, exact]) +* `fn` [<Function>] +* `exact` [<Number>] default = 1 +* return [<Function>] + +The same as `mustCall()`, except that it is also checked that the Promise +returned by the function is fulfilled for each invocation of the function. + +The return value of the wrapped function is the return value of the original +function, if necessary wrapped as a promise. + ### mustCallAtLeast([fn][, minimum]) * `fn` [<Function>] default = () => {} * `minimum` [<Number>] default = 1 @@ -312,11 +326,6 @@ A port number for tests to use if one is needed. Logs '1..0 # Skipped: ' + `msg` -### refreshTmpDir() -* return [<String>] - -Deletes the testing 'tmp' directory and recreates it. - ### restoreStderr() Restore the original `process.stderr.write`. Used to restore `stderr` to its @@ -369,11 +378,6 @@ Platform normalizes the `pwd` command. Synchronous version of `spawnPwd`. -### tmpDir -* [<String>] - -The realpath of the 'tmp' directory. - ## Countdown Module The `Countdown` module provides a simple countdown mechanism for tests that @@ -412,7 +416,26 @@ called before the callback is invoked. ## DNS Module -The `DNS` module provides a naïve DNS parser/serializer. +The `DNS` module provides utilities related to the `dns` built-in module. + +### errorLookupMock(code, syscall) + +* `code` [<String>] Defaults to `dns.mockedErrorCode`. +* `syscall` [<String>] Defaults to `dns.mockedSysCall`. +* return [<Function>] + + +A mock for the `lookup` option of `net.connect()` that would result in an error +with the `code` and the `syscall` specified. Returns a function that has the +same signature as `dns.lookup()`. + +### mockedErrorCode + +The default `code` of errors generated by `errorLookupMock`. + +### mockedSysCall + +The default `syscall` of errors generated by `errorLookupMock`. ### readDomainFromPacket(buffer, offset) @@ -492,6 +515,151 @@ Returns the result of Returns the result of `fs.readFileSync(path.join(fixtures.fixturesDir, 'keys', arg), 'enc')`. +## HTTP/2 Module + +The http2.js module provides a handful of utilities for creating mock HTTP/2 +frames for testing of HTTP/2 endpoints + + +```js +const http2 = require('../common/http2'); +``` + +### Class: Frame + +The `http2.Frame` is a base class that creates a `Buffer` containing a +serialized HTTP/2 frame header. + + +```js +// length is a 24-bit unsigned integer +// type is an 8-bit unsigned integer identifying the frame type +// flags is an 8-bit unsigned integer containing the flag bits +// id is the 32-bit stream identifier, if any. +const frame = new http2.Frame(length, type, flags, id); + +// Write the frame data to a socket +socket.write(frame.data); +``` + +The serialized `Buffer` may be retrieved using the `frame.data` property. + +### Class: DataFrame extends Frame + +The `http2.DataFrame` is a subclass of `http2.Frame` that serializes a `DATA` +frame. + + +```js +// id is the 32-bit stream identifier +// payload is a Buffer containing the DATA payload +// padlen is an 8-bit integer giving the number of padding bytes to include +// final is a boolean indicating whether the End-of-stream flag should be set, +// defaults to false. +const frame = new http2.DataFrame(id, payload, padlen, final); + +socket.write(frame.data); +``` + +### Class: HeadersFrame + +The `http2.HeadersFrame` is a subclass of `http2.Frame` that serializes a +`HEADERS` frame. + + +```js +// id is the 32-bit stream identifier +// payload is a Buffer containing the HEADERS payload (see either +// http2.kFakeRequestHeaders or http2.kFakeResponseHeaders). +// padlen is an 8-bit integer giving the number of padding bytes to include +// final is a boolean indicating whether the End-of-stream flag should be set, +// defaults to false. +const frame = new http2.HeadersFrame(id, payload, padlen, final); + +socket.write(frame.data); +``` + +### Class: SettingsFrame + +The `http2.SettingsFrame` is a subclass of `http2.Frame` that serializes an +empty `SETTINGS` frame. + + +```js +// ack is a boolean indicating whether or not to set the ACK flag. +const frame = new http2.SettingsFrame(ack); + +socket.write(frame.data); +``` + +### http2.kFakeRequestHeaders + +Set to a `Buffer` instance that contains a minimal set of serialized HTTP/2 +request headers to be used as the payload of a `http2.HeadersFrame`. + + +```js +const frame = new http2.HeadersFrame(1, http2.kFakeRequestHeaders, 0, true); + +socket.write(frame.data); +``` + +### http2.kFakeResponseHeaders + +Set to a `Buffer` instance that contains a minimal set of serialized HTTP/2 +response headers to be used as the payload a `http2.HeadersFrame`. + + +```js +const frame = new http2.HeadersFrame(1, http2.kFakeResponseHeaders, 0, true); + +socket.write(frame.data); +``` + +### http2.kClientMagic + +Set to a `Buffer` containing the preamble bytes an HTTP/2 client must send +upon initial establishment of a connection. + + +```js +socket.write(http2.kClientMagic); +``` + +## Internet Module + +The `common/internet` module provides utilities for working with +internet-related tests. + +### internet.addresses + +* [<Object>] + * `INET_HOST` [<String>] A generic host that has registered common + DNS records, supports both IPv4 and IPv6, and provides basic HTTP/HTTPS + services + * `INET4_HOST` [<String>] A host that provides IPv4 services + * `INET6_HOST` [<String>] A host that provides IPv6 services + * `INET4_IP` [<String>] An accessible IPv4 IP, defaults to the + Google Public DNS IPv4 address + * `INET6_IP` [<String>] An accessible IPv6 IP, defaults to the + Google Public DNS IPv6 address + * `INVALID_HOST` [<String>] An invalid host that cannot be resolved + * `MX_HOST` [<String>] A host with MX records registered + * `SRV_HOST` [<String>] A host with SRV records registered + * `PTR_HOST` [<String>] A host with PTR records registered + * `NAPTR_HOST` [<String>] A host with NAPTR records registered + * `SOA_HOST` [<String>] A host with SOA records registered + * `CNAME_HOST` [<String>] A host with CNAME records registered + * `NS_HOST` [<String>] A host with NS records registered + * `TXT_HOST` [<String>] A host with TXT records registered + * `DNS4_SERVER` [<String>] An accessible IPv4 DNS server + * `DNS6_SERVER` [<String>] An accessible IPv6 DNS server + +A set of addresses for internet-related tests. All properties are configurable +via `NODE_TEST_*` environment variables. For example, to configure +`internet.addresses.INET_HOST`, set the environment +variable `NODE_TEST_INET_HOST` to a specified host. + ## WPT Module The wpt.js module is a port of parts of @@ -501,6 +669,7 @@ Node.js implementation with tests from [W3C Web Platform Tests](https://github.com/w3c/web-platform-tests). + [<Array>]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array [<ArrayBufferView[]>]: https://developer.mozilla.org/en-US/docs/Web/API/ArrayBufferView [<Boolean>]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Data_structures#Boolean_type diff --git a/test/common/dns.js b/test/common/dns.js index 6ab3fcbc91278b..69c67ac541cf98 100644 --- a/test/common/dns.js +++ b/test/common/dns.js @@ -1,8 +1,6 @@ /* eslint-disable required-modules */ 'use strict'; -// Naïve DNS parser/serializer. - const assert = require('assert'); const os = require('os'); @@ -22,6 +20,8 @@ const classes = { IN: 1 }; +// Naïve DNS parser/serializer. + function readDomainFromPacket(buffer, offset) { assert.ok(offset < buffer.length); const length = buffer[offset]; @@ -287,4 +287,26 @@ function writeDNSPacket(parsed) { })); } -module.exports = { types, classes, writeDNSPacket, parseDNSPacket }; +const mockedErrorCode = 'ENOTFOUND'; +const mockedSysCall = 'getaddrinfo'; + +function errorLookupMock(code = mockedErrorCode, syscall = mockedSysCall) { + return function lookupWithError(host, dnsopts, cb) { + const err = new Error(`${syscall} ${code} ${host}`); + err.code = code; + err.errno = code; + err.syscall = syscall; + err.hostname = host; + cb(err); + }; +} + +module.exports = { + types, + classes, + writeDNSPacket, + parseDNSPacket, + errorLookupMock, + mockedErrorCode, + mockedSysCall +}; diff --git a/test/common/http2.js b/test/common/http2.js new file mode 100644 index 00000000000000..1d4c269fffd5b5 --- /dev/null +++ b/test/common/http2.js @@ -0,0 +1,139 @@ +/* eslint-disable required-modules */ +'use strict'; + +// An HTTP/2 testing tool used to create mock frames for direct testing +// of HTTP/2 endpoints. + +const kFrameData = Symbol('frame-data'); +const FLAG_EOS = 0x1; +const FLAG_ACK = 0x1; +const FLAG_EOH = 0x4; +const FLAG_PADDED = 0x8; +const PADDING = Buffer.alloc(255); + +const kClientMagic = Buffer.from('505249202a20485454502f322' + + 'e300d0a0d0a534d0d0a0d0a', 'hex'); + +const kFakeRequestHeaders = Buffer.from('828684410f7777772e65' + + '78616d706c652e636f6d', 'hex'); + + +const kFakeResponseHeaders = Buffer.from('4803333032580770726976617465611d' + + '4d6f6e2c203231204f63742032303133' + + '2032303a31333a323120474d546e1768' + + '747470733a2f2f7777772e6578616d70' + + '6c652e636f6d', 'hex'); + +function isUint32(val) { + return val >>> 0 === val; +} + +function isUint24(val) { + return val >>> 0 === val && val <= 0xFFFFFF; +} + +function isUint8(val) { + return val >>> 0 === val && val <= 0xFF; +} + +function write32BE(array, pos, val) { + if (!isUint32(val)) + throw new RangeError('val is not a 32-bit number'); + array[pos++] = (val >> 24) & 0xff; + array[pos++] = (val >> 16) & 0xff; + array[pos++] = (val >> 8) & 0xff; + array[pos++] = val & 0xff; +} + +function write24BE(array, pos, val) { + if (!isUint24(val)) + throw new RangeError('val is not a 24-bit number'); + array[pos++] = (val >> 16) & 0xff; + array[pos++] = (val >> 8) & 0xff; + array[pos++] = val & 0xff; +} + +function write8(array, pos, val) { + if (!isUint8(val)) + throw new RangeError('val is not an 8-bit number'); + array[pos] = val; +} + +class Frame { + constructor(length, type, flags, id) { + this[kFrameData] = Buffer.alloc(9); + write24BE(this[kFrameData], 0, length); + write8(this[kFrameData], 3, type); + write8(this[kFrameData], 4, flags); + write32BE(this[kFrameData], 5, id); + } + + get data() { + return this[kFrameData]; + } +} + +class SettingsFrame extends Frame { + constructor(ack = false) { + let flags = 0; + if (ack) + flags |= FLAG_ACK; + super(0, 4, flags, 0); + } +} + +class DataFrame extends Frame { + constructor(id, payload, padlen = 0, final = false) { + let len = payload.length; + let flags = 0; + if (final) flags |= FLAG_EOS; + const buffers = [payload]; + if (padlen > 0) { + buffers.unshift(Buffer.from([padlen])); + buffers.push(PADDING.slice(0, padlen)); + len += padlen + 1; + flags |= FLAG_PADDED; + } + super(len, 0, flags, id); + buffers.unshift(this[kFrameData]); + this[kFrameData] = Buffer.concat(buffers); + } +} + +class HeadersFrame extends Frame { + constructor(id, payload, padlen = 0, final = false) { + let len = payload.length; + let flags = FLAG_EOH; + if (final) flags |= FLAG_EOS; + const buffers = [payload]; + if (padlen > 0) { + buffers.unshift(Buffer.from([padlen])); + buffers.push(PADDING.slice(0, padlen)); + len += padlen + 1; + flags |= FLAG_PADDED; + } + super(len, 1, flags, id); + buffers.unshift(this[kFrameData]); + this[kFrameData] = Buffer.concat(buffers); + } +} + +class PingFrame extends Frame { + constructor(ack = false) { + const buffers = [Buffer.alloc(8)]; + super(8, 6, ack ? 1 : 0, 0); + buffers.unshift(this[kFrameData]); + this[kFrameData] = Buffer.concat(buffers); + } +} + +module.exports = { + Frame, + DataFrame, + HeadersFrame, + SettingsFrame, + PingFrame, + kFakeRequestHeaders, + kFakeResponseHeaders, + kClientMagic +}; diff --git a/test/common/index.js b/test/common/index.js index 5bebdcedff60ef..cb82cd6a93ee3a 100644 --- a/test/common/index.js +++ b/test/common/index.js @@ -30,15 +30,10 @@ const stream = require('stream'); const util = require('util'); const Timer = process.binding('timer_wrap').Timer; const { fixturesDir } = require('./fixtures'); - -const testRoot = process.env.NODE_TEST_DIR ? - fs.realpathSync(process.env.NODE_TEST_DIR) : path.resolve(__dirname, '..'); +const tmpdir = require('./tmpdir'); const noop = () => {}; -// Using a `.` prefixed name, which is the convention for "hidden" on POSIX, -// gets tools to ignore it by default or by simple rules, especially eslint. -let tmpDirName = '.tmp'; // PORT should match the definition in test/testpy/__init__.py. exports.PORT = +process.env.NODE_COMMON_PORT || 12346; exports.isWindows = process.platform === 'win32'; @@ -50,6 +45,7 @@ exports.isLinuxPPCBE = (process.platform === 'linux') && (os.endianness() === 'BE'); exports.isSunOS = process.platform === 'sunos'; exports.isFreeBSD = process.platform === 'freebsd'; +exports.isOpenBSD = process.platform === 'openbsd'; exports.isLinux = process.platform === 'linux'; exports.isOSX = process.platform === 'darwin'; @@ -82,7 +78,7 @@ if (process.env.NODE_TEST_WITH_ASYNC_HOOKS) { const async_wrap = process.binding('async_wrap'); process.on('exit', () => { - // itterate through handles to make sure nothing crashes + // iterate through handles to make sure nothing crashes for (const k in initHandles) util.inspect(initHandles[k]); }); @@ -121,63 +117,6 @@ if (process.env.NODE_TEST_WITH_ASYNC_HOOKS) { }).enable(); } -function rimrafSync(p) { - let st; - try { - st = fs.lstatSync(p); - } catch (e) { - if (e.code === 'ENOENT') - return; - } - - try { - if (st && st.isDirectory()) - rmdirSync(p, null); - else - fs.unlinkSync(p); - } catch (e) { - if (e.code === 'ENOENT') - return; - if (e.code === 'EPERM') - return rmdirSync(p, e); - if (e.code !== 'EISDIR') - throw e; - rmdirSync(p, e); - } -} - -function rmdirSync(p, originalEr) { - try { - fs.rmdirSync(p); - } catch (e) { - if (e.code === 'ENOTDIR') - throw originalEr; - if (e.code === 'ENOTEMPTY' || e.code === 'EEXIST' || e.code === 'EPERM') { - const enc = exports.isLinux ? 'buffer' : 'utf8'; - fs.readdirSync(p, enc).forEach((f) => { - if (f instanceof Buffer) { - const buf = Buffer.concat([Buffer.from(p), Buffer.from(path.sep), f]); - rimrafSync(buf); - } else { - rimrafSync(path.join(p, f)); - } - }); - fs.rmdirSync(p); - } - } -} - -exports.refreshTmpDir = function() { - rimrafSync(exports.tmpDir); - fs.mkdirSync(exports.tmpDir); -}; - -if (process.env.TEST_THREAD_ID) { - exports.PORT += process.env.TEST_THREAD_ID * 100; - tmpDirName += `.${process.env.TEST_THREAD_ID}`; -} -exports.tmpDir = path.join(testRoot, tmpDirName); - let opensslCli = null; let inFreeBSDJail = null; let localhostIPv4 = null; @@ -271,7 +210,7 @@ Object.defineProperty(exports, 'hasFipsCrypto', { }); { - const localRelative = path.relative(process.cwd(), `${exports.tmpDir}/`); + const localRelative = path.relative(process.cwd(), `${tmpdir.path}/`); const pipePrefix = exports.isWindows ? '\\\\.\\pipe\\' : localRelative; const pipeName = `node-test.${process.pid}.sock`; exports.PIPE = path.join(pipePrefix, pipeName); @@ -503,6 +442,12 @@ exports.mustCallAtLeast = function(fn, minimum) { return _mustCallInner(fn, minimum, 'minimum'); }; +exports.mustCallAsync = function(fn, exact) { + return exports.mustCall((...args) => { + return Promise.resolve(fn(...args)).then(exports.mustCall((val) => val)); + }, exact); +}; + function _mustCallInner(fn, criteria = 1, field) { if (process._exiting) throw new Error('Cannot use common.mustCall*() in process exit handler'); diff --git a/test/common/internet.js b/test/common/internet.js new file mode 100644 index 00000000000000..48b532ca8e6606 --- /dev/null +++ b/test/common/internet.js @@ -0,0 +1,54 @@ +/* eslint-disable required-modules */ +'use strict'; + +// Utilities for internet-related tests + +const addresses = { + // A generic host that has registered common DNS records, + // supports both IPv4 and IPv6, and provides basic HTTP/HTTPS services + INET_HOST: 'nodejs.org', + // A host that provides IPv4 services + INET4_HOST: 'nodejs.org', + // A host that provides IPv6 services + INET6_HOST: 'nodejs.org', + // An accessible IPv4 IP, + // defaults to the Google Public DNS IPv4 address + INET4_IP: '8.8.8.8', + // An accessible IPv6 IP, + // defaults to the Google Public DNS IPv6 address + INET6_IP: '2001:4860:4860::8888', + // An invalid host that cannot be resolved + // See https://tools.ietf.org/html/rfc2606#section-2 + INVALID_HOST: 'something.invalid', + // A host with MX records registered + MX_HOST: 'nodejs.org', + // A host with SRV records registered + SRV_HOST: '_jabber._tcp.google.com', + // A host with PTR records registered + PTR_HOST: '8.8.8.8.in-addr.arpa', + // A host with NAPTR records registered + NAPTR_HOST: 'sip2sip.info', + // A host with SOA records registered + SOA_HOST: 'nodejs.org', + // A host with CNAME records registered + CNAME_HOST: 'blog.nodejs.org', + // A host with NS records registered + NS_HOST: 'nodejs.org', + // A host with TXT records registered + TXT_HOST: 'nodejs.org', + // An accessible IPv4 DNS server + DNS4_SERVER: '8.8.8.8', + // An accessible IPv4 DNS server + DNS6_SERVER: '2001:4860:4860::8888' +}; + +for (const key of Object.keys(addresses)) { + const envName = `NODE_TEST_${key}`; + if (process.env[envName]) { + addresses[key] = process.env[envName]; + } +} + +module.exports = { + addresses +}; diff --git a/test/common/tmpdir.js b/test/common/tmpdir.js new file mode 100644 index 00000000000000..ed731b3e7a1ffb --- /dev/null +++ b/test/common/tmpdir.js @@ -0,0 +1,67 @@ +/* eslint-disable required-modules */ +'use strict'; + +const fs = require('fs'); +const path = require('path'); + +function rimrafSync(p) { + let st; + try { + st = fs.lstatSync(p); + } catch (e) { + if (e.code === 'ENOENT') + return; + } + + try { + if (st && st.isDirectory()) + rmdirSync(p, null); + else + fs.unlinkSync(p); + } catch (e) { + if (e.code === 'ENOENT') + return; + if (e.code === 'EPERM') + return rmdirSync(p, e); + if (e.code !== 'EISDIR') + throw e; + rmdirSync(p, e); + } +} + +function rmdirSync(p, originalEr) { + try { + fs.rmdirSync(p); + } catch (e) { + if (e.code === 'ENOTDIR') + throw originalEr; + if (e.code === 'ENOTEMPTY' || e.code === 'EEXIST' || e.code === 'EPERM') { + const enc = process.platform === 'linux' ? 'buffer' : 'utf8'; + fs.readdirSync(p, enc).forEach((f) => { + if (f instanceof Buffer) { + const buf = Buffer.concat([Buffer.from(p), Buffer.from(path.sep), f]); + rimrafSync(buf); + } else { + rimrafSync(path.join(p, f)); + } + }); + fs.rmdirSync(p); + } + } +} + +const testRoot = process.env.NODE_TEST_DIR ? + fs.realpathSync(process.env.NODE_TEST_DIR) : path.resolve(__dirname, '..'); + +// Using a `.` prefixed name, which is the convention for "hidden" on POSIX, +// gets tools to ignore it by default or by simple rules, especially eslint. +let tmpdirName = '.tmp'; +if (process.env.TEST_THREAD_ID) { + tmpdirName += `.${process.env.TEST_THREAD_ID}`; +} +exports.path = path.join(testRoot, tmpdirName); + +exports.refresh = () => { + rimrafSync(exports.path); + fs.mkdirSync(exports.path); +}; diff --git a/test/es-module/test-esm-preserve-symlinks.js b/test/es-module/test-esm-preserve-symlinks.js index eea5bf061b2fa3..e8473c36473bd2 100644 --- a/test/es-module/test-esm-preserve-symlinks.js +++ b/test/es-module/test-esm-preserve-symlinks.js @@ -7,8 +7,9 @@ const assert = require('assert'); const path = require('path'); const fs = require('fs'); -common.refreshTmpDir(); -const tmpDir = common.tmpDir; +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); +const tmpDir = tmpdir.path; const entry = path.join(tmpDir, 'entry.js'); const real = path.join(tmpDir, 'real.js'); diff --git a/test/es-module/test-esm-symlink.js b/test/es-module/test-esm-symlink.js index 3b7d689bf8f5f2..074230ac06c4b5 100644 --- a/test/es-module/test-esm-symlink.js +++ b/test/es-module/test-esm-symlink.js @@ -6,8 +6,9 @@ const assert = require('assert'); const path = require('path'); const fs = require('fs'); -common.refreshTmpDir(); -const tmpDir = common.tmpDir; +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); +const tmpDir = tmpdir.path; const entry = path.join(tmpDir, 'entry.mjs'); const real = path.join(tmpDir, 'index.mjs'); diff --git a/test/fixtures/loop.js b/test/fixtures/loop.js index 461fb393583e68..1f093bdf574660 100644 --- a/test/fixtures/loop.js +++ b/test/fixtures/loop.js @@ -4,7 +4,7 @@ console.log('A message', 5); while (t > 0) { if (t++ === 1000) { t = 0; - console.log(`Outputed message #${k++}`); + console.log(`Outputted message #${k++}`); } } process.exit(55); diff --git a/test/fixtures/net-fd-passing-receiver.js b/test/fixtures/net-fd-passing-receiver.js index 8559f116c54956..7d328ac28c9029 100644 --- a/test/fixtures/net-fd-passing-receiver.js +++ b/test/fixtures/net-fd-passing-receiver.js @@ -45,7 +45,7 @@ receiver = net.createServer(function(socket) { }); }); -/* To signal the test runne we're up and listening */ +/* To signal the test runner we're up and listening */ receiver.on('listening', function() { console.log('ready'); }); diff --git a/test/internet/test-dns-any.js b/test/internet/test-dns-any.js index dd80e48bf44e91..a83040801f38f4 100644 --- a/test/internet/test-dns-any.js +++ b/test/internet/test-dns-any.js @@ -59,9 +59,6 @@ const checkers = { checkTXT(r) { assert.ok(Array.isArray(r.entries)); assert.ok(r.entries.length > 0); - r.entries.forEach((txt) => { - assert(txt.startsWith('v=spf1')); - }); assert.strictEqual(r.type, 'TXT'); }, checkSOA(r) { diff --git a/test/internet/test-dns-cares-domains.js b/test/internet/test-dns-cares-domains.js index 62c1847ea29adf..6609758a7daf17 100644 --- a/test/internet/test-dns-cares-domains.js +++ b/test/internet/test-dns-cares-domains.js @@ -1,5 +1,6 @@ 'use strict'; -require('../common'); +const common = require('../common'); +const { addresses } = require('../common/internet'); const assert = require('assert'); const dns = require('dns'); const domain = require('domain'); @@ -20,8 +21,8 @@ const methods = [ methods.forEach(function(method) { const d = domain.create(); d.run(function() { - dns[method]('google.com', function() { + dns[method](addresses.INET_HOST, common.mustCall(() => { assert.strictEqual(process.domain, d, `${method} retains domain`); - }); + })); }); }); diff --git a/test/internet/test-dns-ipv4.js b/test/internet/test-dns-ipv4.js index d3d5ba22009675..4c6e0ae6865e3f 100644 --- a/test/internet/test-dns-ipv4.js +++ b/test/internet/test-dns-ipv4.js @@ -1,5 +1,6 @@ 'use strict'; const common = require('../common'); +const { addresses } = require('../common/internet'); const assert = require('assert'); const dns = require('dns'); const net = require('net'); @@ -38,68 +39,72 @@ function checkWrap(req) { } TEST(function test_resolve4(done) { - const req = dns.resolve4('www.google.com', - common.mustCall((err, ips) => { - assert.ifError(err); + const req = dns.resolve4( + addresses.INET4_HOST, + common.mustCall((err, ips) => { + assert.ifError(err); - assert.ok(ips.length > 0); + assert.ok(ips.length > 0); - for (let i = 0; i < ips.length; i++) { - assert.ok(isIPv4(ips[i])); - } + for (let i = 0; i < ips.length; i++) { + assert.ok(isIPv4(ips[i])); + } - done(); - })); + done(); + })); checkWrap(req); }); TEST(function test_reverse_ipv4(done) { - const req = dns.reverse('8.8.8.8', - common.mustCall((err, domains) => { - assert.ifError(err); + const req = dns.reverse( + addresses.INET4_IP, + common.mustCall((err, domains) => { + assert.ifError(err); - assert.ok(domains.length > 0); + assert.ok(domains.length > 0); - for (let i = 0; i < domains.length; i++) { - assert.ok(domains[i]); - assert.ok(typeof domains[i] === 'string'); - } + for (let i = 0; i < domains.length; i++) { + assert.ok(domains[i]); + assert.ok(typeof domains[i] === 'string'); + } - done(); - })); + done(); + })); checkWrap(req); }); TEST(function test_lookup_ipv4_explicit(done) { - const req = dns.lookup('www.google.com', 4, - common.mustCall((err, ip, family) => { - assert.ifError(err); - assert.ok(net.isIPv4(ip)); - assert.strictEqual(family, 4); + const req = dns.lookup( + addresses.INET4_HOST, 4, + common.mustCall((err, ip, family) => { + assert.ifError(err); + assert.ok(net.isIPv4(ip)); + assert.strictEqual(family, 4); - done(); - })); + done(); + })); checkWrap(req); }); TEST(function test_lookup_ipv4_implicit(done) { - const req = dns.lookup('www.google.com', - common.mustCall((err, ip, family) => { - assert.ifError(err); - assert.ok(net.isIPv4(ip)); - assert.strictEqual(family, 4); + const req = dns.lookup( + addresses.INET4_HOST, + common.mustCall((err, ip, family) => { + assert.ifError(err); + assert.ok(net.isIPv4(ip)); + assert.strictEqual(family, 4); - done(); - })); + done(); + })); checkWrap(req); }); TEST(function test_lookup_ipv4_explicit_object(done) { - const req = dns.lookup('www.google.com', { + const req = dns.lookup(addresses.INET4_HOST, { family: 4 }, common.mustCall((err, ip, family) => { assert.ifError(err); @@ -113,7 +118,7 @@ TEST(function test_lookup_ipv4_explicit_object(done) { }); TEST(function test_lookup_ipv4_hint_addrconfig(done) { - const req = dns.lookup('www.google.com', { + const req = dns.lookup(addresses.INET4_HOST, { hints: dns.ADDRCONFIG }, common.mustCall((err, ip, family) => { assert.ifError(err); @@ -154,7 +159,7 @@ TEST(function test_lookup_localhost_ipv4(done) { TEST(function test_lookup_all_ipv4(done) { const req = dns.lookup( - 'www.google.com', + addresses.INET4_HOST, { all: true, family: 4 }, common.mustCall((err, ips) => { assert.ifError(err); diff --git a/test/internet/test-dns-ipv6.js b/test/internet/test-dns-ipv6.js index a91b108456c027..8b1a8936802729 100644 --- a/test/internet/test-dns-ipv6.js +++ b/test/internet/test-dns-ipv6.js @@ -1,5 +1,6 @@ 'use strict'; const common = require('../common'); +const { addresses } = require('../common/internet'); if (!common.hasIPv6) common.skip('this test, no IPv6 support'); @@ -38,53 +39,57 @@ function checkWrap(req) { } TEST(function test_resolve6(done) { - const req = dns.resolve6('ipv6.google.com', - common.mustCall((err, ips) => { - assert.ifError(err); + const req = dns.resolve6( + addresses.INET6_HOST, + common.mustCall((err, ips) => { + assert.ifError(err); - assert.ok(ips.length > 0); + assert.ok(ips.length > 0); - for (let i = 0; i < ips.length; i++) - assert.ok(isIPv6(ips[i])); + for (let i = 0; i < ips.length; i++) + assert.ok(isIPv6(ips[i])); - done(); - })); + done(); + })); checkWrap(req); }); TEST(function test_reverse_ipv6(done) { - const req = dns.reverse('2001:4860:4860::8888', - common.mustCall((err, domains) => { - assert.ifError(err); + const req = dns.reverse( + addresses.INET6_IP, + common.mustCall((err, domains) => { + assert.ifError(err); - assert.ok(domains.length > 0); + assert.ok(domains.length > 0); - for (let i = 0; i < domains.length; i++) - assert.ok(typeof domains[i] === 'string'); + for (let i = 0; i < domains.length; i++) + assert.ok(typeof domains[i] === 'string'); - done(); - })); + done(); + })); checkWrap(req); }); TEST(function test_lookup_ipv6_explicit(done) { - const req = dns.lookup('ipv6.google.com', 6, - common.mustCall((err, ip, family) => { - assert.ifError(err); - assert.ok(isIPv6(ip)); - assert.strictEqual(family, 6); + const req = dns.lookup( + addresses.INET6_HOST, + 6, + common.mustCall((err, ip, family) => { + assert.ifError(err); + assert.ok(isIPv6(ip)); + assert.strictEqual(family, 6); - done(); - })); + done(); + })); checkWrap(req); }); /* This ends up just being too problematic to test TEST(function test_lookup_ipv6_implicit(done) { - var req = dns.lookup('ipv6.google.com', function(err, ip, family) { + var req = dns.lookup(addresses.INET6_HOST, function(err, ip, family) { assert.ifError(err); assert.ok(net.isIPv6(ip)); assert.strictEqual(family, 6); @@ -97,7 +102,7 @@ TEST(function test_lookup_ipv6_implicit(done) { */ TEST(function test_lookup_ipv6_explicit_object(done) { - const req = dns.lookup('ipv6.google.com', { + const req = dns.lookup(addresses.INET6_HOST, { family: 6 }, common.mustCall((err, ip, family) => { assert.ifError(err); @@ -111,7 +116,7 @@ TEST(function test_lookup_ipv6_explicit_object(done) { }); TEST(function test_lookup_ipv6_hint(done) { - const req = dns.lookup('www.google.com', { + const req = dns.lookup(addresses.INET6_HOST, { family: 6, hints: dns.V4MAPPED }, common.mustCall((err, ip, family) => { @@ -120,7 +125,7 @@ TEST(function test_lookup_ipv6_hint(done) { if (common.isFreeBSD) { assert(err instanceof Error); assert.strictEqual(err.code, 'EAI_BADFLAGS'); - assert.strictEqual(err.hostname, 'www.google.com'); + assert.strictEqual(err.hostname, addresses.INET_HOST); assert.ok(/getaddrinfo EAI_BADFLAGS/.test(err.message)); done(); return; @@ -139,21 +144,22 @@ TEST(function test_lookup_ipv6_hint(done) { }); TEST(function test_lookup_ip_ipv6(done) { - const req = dns.lookup('::1', - common.mustCall((err, ip, family) => { - assert.ifError(err); - assert.ok(isIPv6(ip)); - assert.strictEqual(family, 6); + const req = dns.lookup( + '::1', + common.mustCall((err, ip, family) => { + assert.ifError(err); + assert.ok(isIPv6(ip)); + assert.strictEqual(family, 6); - done(); - })); + done(); + })); checkWrap(req); }); TEST(function test_lookup_all_ipv6(done) { const req = dns.lookup( - 'www.google.com', + addresses.INET6_HOST, { all: true, family: 6 }, common.mustCall((err, ips) => { assert.ifError(err); diff --git a/test/internet/test-dns-setserver-in-callback-of-resolve4.js b/test/internet/test-dns-setserver-in-callback-of-resolve4.js index 222ac4dcc8ee31..58b3327efe9475 100644 --- a/test/internet/test-dns-setserver-in-callback-of-resolve4.js +++ b/test/internet/test-dns-setserver-in-callback-of-resolve4.js @@ -5,11 +5,14 @@ // a crash or not. If it doesn't crash, the test succeeded. const common = require('../common'); +const { addresses } = require('../common/internet'); const dns = require('dns'); -dns.resolve4('google.com', common.mustCall(function(/* err, nameServers */) { - dns.setServers([ '8.8.8.8' ]); -})); +dns.resolve4( + addresses.INET4_HOST, + common.mustCall(function(/* err, nameServers */) { + dns.setServers([ addresses.DNS4_SERVER ]); + })); // Test https://github.com/nodejs/node/issues/14734 -dns.resolve4('google.com', common.mustCall()); +dns.resolve4(addresses.INET4_HOST, common.mustCall()); diff --git a/test/internet/test-dns.js b/test/internet/test-dns.js index e2214433c51436..054de88dd45f90 100644 --- a/test/internet/test-dns.js +++ b/test/internet/test-dns.js @@ -21,6 +21,7 @@ 'use strict'; const common = require('../common'); +const { addresses } = require('../common/internet'); const assert = require('assert'); const dns = require('dns'); const net = require('net'); @@ -74,7 +75,9 @@ TEST(function test_reverse_bogus(done) { }); TEST(function test_resolve4_ttl(done) { - const req = dns.resolve4('google.com', { ttl: true }, function(err, result) { + const req = dns.resolve4(addresses.INET4_HOST, { + ttl: true + }, function(err, result) { assert.ifError(err); assert.ok(result.length > 0); @@ -95,7 +98,9 @@ TEST(function test_resolve4_ttl(done) { }); TEST(function test_resolve6_ttl(done) { - const req = dns.resolve6('google.com', { ttl: true }, function(err, result) { + const req = dns.resolve6(addresses.INET6_HOST, { + ttl: true + }, function(err, result) { assert.ifError(err); assert.ok(result.length > 0); @@ -116,7 +121,7 @@ TEST(function test_resolve6_ttl(done) { }); TEST(function test_resolveMx(done) { - const req = dns.resolveMx('gmail.com', function(err, result) { + const req = dns.resolveMx(addresses.MX_HOST, function(err, result) { assert.ifError(err); assert.ok(result.length > 0); @@ -138,7 +143,7 @@ TEST(function test_resolveMx(done) { }); TEST(function test_resolveMx_failure(done) { - const req = dns.resolveMx('something.invalid', function(err, result) { + const req = dns.resolveMx(addresses.INVALID_HOST, function(err, result) { assert.ok(err instanceof Error); assert.strictEqual(err.errno, 'ENOTFOUND'); @@ -151,7 +156,7 @@ TEST(function test_resolveMx_failure(done) { }); TEST(function test_resolveNs(done) { - const req = dns.resolveNs('rackspace.com', function(err, names) { + const req = dns.resolveNs(addresses.NS_HOST, function(err, names) { assert.ifError(err); assert.ok(names.length > 0); @@ -168,7 +173,7 @@ TEST(function test_resolveNs(done) { }); TEST(function test_resolveNs_failure(done) { - const req = dns.resolveNs('something.invalid', function(err, result) { + const req = dns.resolveNs(addresses.INVALID_HOST, function(err, result) { assert.ok(err instanceof Error); assert.strictEqual(err.errno, 'ENOTFOUND'); @@ -181,7 +186,7 @@ TEST(function test_resolveNs_failure(done) { }); TEST(function test_resolveSrv(done) { - const req = dns.resolveSrv('_jabber._tcp.google.com', function(err, result) { + const req = dns.resolveSrv(addresses.SRV_HOST, function(err, result) { assert.ifError(err); assert.ok(result.length > 0); @@ -205,7 +210,7 @@ TEST(function test_resolveSrv(done) { }); TEST(function test_resolveSrv_failure(done) { - const req = dns.resolveSrv('something.invalid', function(err, result) { + const req = dns.resolveSrv(addresses.INVALID_HOST, function(err, result) { assert.ok(err instanceof Error); assert.strictEqual(err.errno, 'ENOTFOUND'); @@ -218,7 +223,7 @@ TEST(function test_resolveSrv_failure(done) { }); TEST(function test_resolvePtr(done) { - const req = dns.resolvePtr('8.8.8.8.in-addr.arpa', function(err, result) { + const req = dns.resolvePtr(addresses.PTR_HOST, function(err, result) { assert.ifError(err); assert.ok(result.length > 0); @@ -235,7 +240,7 @@ TEST(function test_resolvePtr(done) { }); TEST(function test_resolvePtr_failure(done) { - const req = dns.resolvePtr('something.invalid', function(err, result) { + const req = dns.resolvePtr(addresses.INVALID_HOST, function(err, result) { assert.ok(err instanceof Error); assert.strictEqual(err.errno, 'ENOTFOUND'); @@ -248,7 +253,7 @@ TEST(function test_resolvePtr_failure(done) { }); TEST(function test_resolveNaptr(done) { - const req = dns.resolveNaptr('sip2sip.info', function(err, result) { + const req = dns.resolveNaptr(addresses.NAPTR_HOST, function(err, result) { assert.ifError(err); assert.ok(result.length > 0); @@ -272,7 +277,7 @@ TEST(function test_resolveNaptr(done) { }); TEST(function test_resolveNaptr_failure(done) { - const req = dns.resolveNaptr('something.invalid', function(err, result) { + const req = dns.resolveNaptr(addresses.INVALID_HOST, function(err, result) { assert.ok(err instanceof Error); assert.strictEqual(err.errno, 'ENOTFOUND'); @@ -285,7 +290,7 @@ TEST(function test_resolveNaptr_failure(done) { }); TEST(function test_resolveSoa(done) { - const req = dns.resolveSoa('nodejs.org', function(err, result) { + const req = dns.resolveSoa(addresses.SOA_HOST, function(err, result) { assert.ifError(err); assert.ok(result); assert.strictEqual(typeof result, 'object'); @@ -318,7 +323,7 @@ TEST(function test_resolveSoa(done) { }); TEST(function test_resolveSoa_failure(done) { - const req = dns.resolveSoa('something.invalid', function(err, result) { + const req = dns.resolveSoa(addresses.INVALID_HOST, function(err, result) { assert.ok(err instanceof Error); assert.strictEqual(err.errno, 'ENOTFOUND'); @@ -331,7 +336,7 @@ TEST(function test_resolveSoa_failure(done) { }); TEST(function test_resolveCname(done) { - const req = dns.resolveCname('www.microsoft.com', function(err, names) { + const req = dns.resolveCname(addresses.CNAME_HOST, function(err, names) { assert.ifError(err); assert.ok(names.length > 0); @@ -348,7 +353,7 @@ TEST(function test_resolveCname(done) { }); TEST(function test_resolveCname_failure(done) { - const req = dns.resolveCname('something.invalid', function(err, result) { + const req = dns.resolveCname(addresses.INVALID_HOST, function(err, result) { assert.ok(err instanceof Error); assert.strictEqual(err.errno, 'ENOTFOUND'); @@ -362,7 +367,7 @@ TEST(function test_resolveCname_failure(done) { TEST(function test_resolveTxt(done) { - const req = dns.resolveTxt('google.com', function(err, records) { + const req = dns.resolveTxt(addresses.TXT_HOST, function(err, records) { assert.ifError(err); assert.strictEqual(records.length, 1); assert.ok(util.isArray(records[0])); @@ -374,7 +379,7 @@ TEST(function test_resolveTxt(done) { }); TEST(function test_resolveTxt_failure(done) { - const req = dns.resolveTxt('something.invalid', function(err, result) { + const req = dns.resolveTxt(addresses.INVALID_HOST, function(err, result) { assert.ok(err instanceof Error); assert.strictEqual(err.errno, 'ENOTFOUND'); @@ -388,12 +393,12 @@ TEST(function test_resolveTxt_failure(done) { TEST(function test_lookup_failure(done) { - const req = dns.lookup('does.not.exist', 4, function(err, ip, family) { + const req = dns.lookup(addresses.INVALID_HOST, 4, (err, ip, family) => { assert.ok(err instanceof Error); assert.strictEqual(err.errno, dns.NOTFOUND); assert.strictEqual(err.errno, 'ENOTFOUND'); assert.ok(!/ENOENT/.test(err.message)); - assert.ok(/does\.not\.exist/.test(err.message)); + assert.ok(err.message.includes(addresses.INVALID_HOST)); done(); }); @@ -458,7 +463,9 @@ TEST(function test_lookup_null_all(done) { TEST(function test_lookup_all_mixed(done) { - const req = dns.lookup('www.google.com', { all: true }, function(err, ips) { + const req = dns.lookup(addresses.INET_HOST, { + all: true + }, function(err, ips) { assert.ifError(err); assert.ok(Array.isArray(ips)); assert.ok(ips.length > 0); @@ -508,11 +515,11 @@ TEST(function test_reverse_failure(done) { TEST(function test_lookup_failure(done) { - const req = dns.lookup('nosuchhostimsure', function(err) { + const req = dns.lookup(addresses.INVALID_HOST, (err) => { assert(err instanceof Error); assert.strictEqual(err.code, 'ENOTFOUND'); // Silly error code... - assert.strictEqual(err.hostname, 'nosuchhostimsure'); - assert.ok(/nosuchhostimsure/.test(err.message)); + assert.strictEqual(err.hostname, addresses.INVALID_HOST); + assert.ok(err.message.includes(addresses.INVALID_HOST)); done(); }); @@ -522,7 +529,7 @@ TEST(function test_lookup_failure(done) { TEST(function test_resolve_failure(done) { - const req = dns.resolve4('nosuchhostimsure', function(err) { + const req = dns.resolve4(addresses.INVALID_HOST, (err) => { assert(err instanceof Error); switch (err.code) { @@ -534,8 +541,8 @@ TEST(function test_resolve_failure(done) { break; } - assert.strictEqual(err.hostname, 'nosuchhostimsure'); - assert.ok(/nosuchhostimsure/.test(err.message)); + assert.strictEqual(err.hostname, addresses.INVALID_HOST); + assert.ok(err.message.includes(addresses.INVALID_HOST)); done(); }); @@ -546,15 +553,16 @@ TEST(function test_resolve_failure(done) { let getaddrinfoCallbackCalled = false; -console.log('looking up nodejs.org...'); +console.log(`looking up ${addresses.INET4_HOST}..`); const cares = process.binding('cares_wrap'); const req = new cares.GetAddrInfoReqWrap(); -cares.getaddrinfo(req, 'nodejs.org', 4, /* hints */ 0, /* verbatim */ true); +cares.getaddrinfo(req, addresses.INET4_HOST, 4, + /* hints */ 0, /* verbatim */ true); req.oncomplete = function(err, domains) { assert.strictEqual(err, 0); - console.log('nodejs.org = ', domains); + console.log(`${addresses.INET4_HOST} = ${domains}`); assert.ok(Array.isArray(domains)); assert.ok(domains.length >= 1); assert.strictEqual(typeof domains[0], 'string'); @@ -569,10 +577,14 @@ process.on('exit', function() { }); -assert.doesNotThrow(() => dns.lookup('nodejs.org', 6, common.mustCall())); +assert.doesNotThrow(() => + dns.lookup(addresses.INET6_HOST, 6, common.mustCall())); -assert.doesNotThrow(() => dns.lookup('nodejs.org', {}, common.mustCall())); +assert.doesNotThrow(() => + dns.lookup(addresses.INET_HOST, {}, common.mustCall())); -assert.doesNotThrow(() => dns.lookupService('0.0.0.0', '0', common.mustCall())); +assert.doesNotThrow(() => + dns.lookupService('0.0.0.0', '0', common.mustCall())); -assert.doesNotThrow(() => dns.lookupService('0.0.0.0', 0, common.mustCall())); +assert.doesNotThrow(() => + dns.lookupService('0.0.0.0', 0, common.mustCall())); diff --git a/test/internet/test-http-https-default-ports.js b/test/internet/test-http-https-default-ports.js index 567b045dc6eacf..5f1b9eddb4f416 100644 --- a/test/internet/test-http-https-default-ports.js +++ b/test/internet/test-http-https-default-ports.js @@ -21,6 +21,7 @@ 'use strict'; const common = require('../common'); +const { addresses } = require('../common/internet'); if (!common.hasCrypto) common.skip('missing crypto'); @@ -29,10 +30,10 @@ const https = require('https'); const http = require('http'); -https.get('https://www.google.com/', common.mustCall(function(res) { +https.get(`https://${addresses.INET_HOST}/`, common.mustCall(function(res) { res.resume(); })); -http.get('http://www.google.com/', common.mustCall(function(res) { +http.get(`http://${addresses.INET_HOST}/`, common.mustCall(function(res) { res.resume(); })); diff --git a/test/known_issues/known_issues.status b/test/known_issues/known_issues.status index 46c8ed32741c7d..e21913e232c03f 100644 --- a/test/known_issues/known_issues.status +++ b/test/known_issues/known_issues.status @@ -7,8 +7,6 @@ prefix known_issues [true] # This section applies to all platforms [$system==win32] -test-stdout-buffer-flush-on-exit: SKIP -test-cluster-disconnect-handles: SKIP [$system==linux] diff --git a/test/known_issues/test-cwd-enoent-file.js b/test/known_issues/test-cwd-enoent-file.js index 01e6e8359fb522..0f75896134f7e3 100644 --- a/test/known_issues/test-cwd-enoent-file.js +++ b/test/known_issues/test-cwd-enoent-file.js @@ -17,8 +17,9 @@ const fs = require('fs'); if (process.argv[2] === 'child') { // Do nothing. } else { - common.refreshTmpDir(); - const dir = fs.mkdtempSync(`${common.tmpDir}/`); + const tmpdir = require('../common/tmpdir'); + tmpdir.refresh(); + const dir = fs.mkdtempSync(`${tmpdir.path}/`); process.chdir(dir); fs.rmdirSync(dir); assert.throws(process.cwd, diff --git a/test/known_issues/test-http2-client-http1-server.js b/test/known_issues/test-http2-client-http1-server.js index 53f7bf42c465e1..616427b3904e16 100644 --- a/test/known_issues/test-http2-client-http1-server.js +++ b/test/known_issues/test-http2-client-http1-server.js @@ -7,6 +7,7 @@ if (!common.hasCrypto) const http = require('http'); const http2 = require('http2'); +// Creating an http1 server here... const server = http.createServer(common.mustNotCall()); server.listen(0, common.mustCall(() => { @@ -15,13 +16,17 @@ server.listen(0, common.mustCall(() => { const req = client.request(); req.on('close', common.mustCall()); + req.on('error', common.expectsError({ + code: 'ERR_HTTP2_ERROR', + type: Error, + message: 'Protocol error' + })); + client.on('error', common.expectsError({ code: 'ERR_HTTP2_ERROR', type: Error, message: 'Protocol error' })); - client.on('close', (...args) => { - server.close(); - }); + client.on('close', common.mustCall(() => server.close())); })); diff --git a/test/known_issues/test-module-deleted-extensions.js b/test/known_issues/test-module-deleted-extensions.js index 45ec41ad6041ad..3a51e8725eec60 100644 --- a/test/known_issues/test-module-deleted-extensions.js +++ b/test/known_issues/test-module-deleted-extensions.js @@ -4,9 +4,10 @@ const common = require('../common'); const assert = require('assert'); const fs = require('fs'); const path = require('path'); -const file = path.join(common.tmpDir, 'test-extensions.foo.bar'); +const tmpdir = require('../common/tmpdir'); +const file = path.join(tmpdir.path, 'test-extensions.foo.bar'); -common.refreshTmpDir(); +tmpdir.refresh(); fs.writeFileSync(file, '', 'utf8'); require.extensions['.foo.bar'] = (module, path) => {}; delete require.extensions['.foo.bar']; @@ -14,4 +15,4 @@ require.extensions['.bar'] = common.mustCall((module, path) => { assert.strictEqual(module.id, file); assert.strictEqual(path, file); }); -require(path.join(common.tmpDir, 'test-extensions')); +require(path.join(tmpdir.path, 'test-extensions')); diff --git a/test/parallel/test-accessor-properties.js b/test/parallel/test-accessor-properties.js new file mode 100644 index 00000000000000..13535ceda9667f --- /dev/null +++ b/test/parallel/test-accessor-properties.js @@ -0,0 +1,78 @@ +'use strict'; + +const common = require('../common'); + +// This tests that the accessor properties do not raise assertions +// when called with incompatible receivers. + +const assert = require('assert'); + +// Objects that call StreamBase::AddMethods, when setting up +// their prototype +const TTY = process.binding('tty_wrap').TTY; +const UDP = process.binding('udp_wrap').UDP; + +{ + // Should throw instead of raise assertions + assert.throws(() => { + TTY.prototype.bytesRead; + }, TypeError); + + assert.throws(() => { + TTY.prototype.fd; + }, TypeError); + + assert.throws(() => { + TTY.prototype._externalStream; + }, TypeError); + + assert.throws(() => { + UDP.prototype.fd; + }, TypeError); + + // Should not throw for Object.getOwnPropertyDescriptor + assert.strictEqual( + typeof Object.getOwnPropertyDescriptor(TTY.prototype, 'bytesRead'), + 'object' + ); + + assert.strictEqual( + typeof Object.getOwnPropertyDescriptor(TTY.prototype, 'fd'), + 'object' + ); + + assert.strictEqual( + typeof Object.getOwnPropertyDescriptor(TTY.prototype, '_externalStream'), + 'object' + ); + + assert.strictEqual( + typeof Object.getOwnPropertyDescriptor(UDP.prototype, 'fd'), + 'object' + ); + + if (common.hasCrypto) { // eslint-disable-line crypto-check + // There are accessor properties in crypto too + const crypto = process.binding('crypto'); + + assert.throws(() => { + crypto.SecureContext.prototype._external; + }, TypeError); + + assert.throws(() => { + crypto.Connection.prototype._external; + }, TypeError); + + assert.strictEqual( + typeof Object.getOwnPropertyDescriptor( + crypto.SecureContext.prototype, '_external'), + 'object' + ); + + assert.strictEqual( + typeof Object.getOwnPropertyDescriptor( + crypto.Connection.prototype, '_external'), + 'object' + ); + } +} diff --git a/test/parallel/test-assert.js b/test/parallel/test-assert.js index cc4f017dc5d0f7..cdb824c3cffa8f 100644 --- a/test/parallel/test-assert.js +++ b/test/parallel/test-assert.js @@ -586,7 +586,7 @@ testAssertionMessage({ a: undefined, b: null }, '{ a: undefined, b: null }'); testAssertionMessage({ a: NaN, b: Infinity, c: -Infinity }, '{ a: NaN, b: Infinity, c: -Infinity }'); -// #2893 +// https://github.com/nodejs/node-v0.x-archive/issues/2893 { let threw = false; try { @@ -601,7 +601,7 @@ testAssertionMessage({ a: NaN, b: Infinity, c: -Infinity }, assert.ok(threw); } -// #5292 +// https://github.com/nodejs/node-v0.x-archive/issues/5292 try { assert.strictEqual(1, 2); } catch (e) { diff --git a/test/parallel/test-async-hooks-http-agent.js b/test/parallel/test-async-hooks-http-agent.js index ff19d089a02099..e10820c3c202b6 100644 --- a/test/parallel/test-async-hooks-http-agent.js +++ b/test/parallel/test-async-hooks-http-agent.js @@ -8,7 +8,7 @@ const http = require('http'); // Checks that an http.Agent properly asyncReset()s a reused socket handle, and // re-assigns the fresh async id to the reused `net.Socket` instance. -// Make sure a single socket is transpartently reused for 2 requests. +// Make sure a single socket is transparently reused for 2 requests. const agent = new http.Agent({ keepAlive: true, keepAliveMsecs: Infinity, diff --git a/test/parallel/test-async-hooks-promise-enable-disable.js b/test/parallel/test-async-hooks-promise-enable-disable.js index 075b29e4e50a70..b7692c45cd9b2e 100644 --- a/test/parallel/test-async-hooks-promise-enable-disable.js +++ b/test/parallel/test-async-hooks-promise-enable-disable.js @@ -11,13 +11,13 @@ let p_inits = 0; common.crashOnUnhandledRejection(); // Not useful to place common.mustCall() around 'exit' event b/c it won't be -// able to check it anway. +// able to check it anyway. process.on('exit', (code) => { if (code !== 0) return; if (p_er !== null) throw p_er; - // Expecint exactly 2 PROMISE types to reach init. + // Expecting exactly 2 PROMISE types to reach init. assert.strictEqual(p_inits, EXPECTED_INITS); }); diff --git a/test/parallel/test-async-wrap-pop-id-during-load.js b/test/parallel/test-async-wrap-pop-id-during-load.js index 1017fc02a72b05..4f39a4fdf01b34 100644 --- a/test/parallel/test-async-wrap-pop-id-during-load.js +++ b/test/parallel/test-async-wrap-pop-id-during-load.js @@ -8,7 +8,7 @@ if (process.argv[2] === 'async') { throw new Error(); } (async function() { await fn(); })(); - // While the above should error, just in case it dosn't the script shouldn't + // While the above should error, just in case it doesn't the script shouldn't // fork itself indefinitely so return early. return; } diff --git a/test/parallel/test-benchmark-fs.js b/test/parallel/test-benchmark-fs.js index e960482a636e33..ad01b4e5803ce5 100644 --- a/test/parallel/test-benchmark-fs.js +++ b/test/parallel/test-benchmark-fs.js @@ -1,9 +1,10 @@ 'use strict'; -const common = require('../common'); +require('../common'); const runBenchmark = require('../common/benchmark'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); runBenchmark('fs', [ 'n=1', @@ -16,4 +17,4 @@ runBenchmark('fs', [ 'statSyncType=fstatSync', 'encodingType=buf', 'filesize=1024' -], { NODE_TMPDIR: common.tmpDir, NODEJS_BENCHMARK_ZERO_ALLOWED: 1 }); +], { NODE_TMPDIR: tmpdir.path, NODEJS_BENCHMARK_ZERO_ALLOWED: 1 }); diff --git a/test/parallel/test-buffer-alloc.js b/test/parallel/test-buffer-alloc.js index 6d7e5581ad665d..a9ca156810fb09 100644 --- a/test/parallel/test-buffer-alloc.js +++ b/test/parallel/test-buffer-alloc.js @@ -636,7 +636,8 @@ assert.strictEqual('', x.inspect()); } { - // #1210 Test UTF-8 string includes null character + // https://github.com/nodejs/node-v0.x-archive/pull/1210 + // Test UTF-8 string includes null character let buf = Buffer.from('\0'); assert.strictEqual(buf.length, 1); buf = Buffer.from('\0\0'); @@ -660,7 +661,8 @@ assert.strictEqual('', x.inspect()); } { - // #243 Test write() with maxLength + // https://github.com/nodejs/node-v0.x-archive/issues/243 + // Test write() with maxLength const buf = Buffer.allocUnsafe(4); buf.fill(0xFF); assert.strictEqual(buf.write('abcd', 1, 2, 'utf8'), 2); @@ -886,12 +888,14 @@ assert.throws(() => Buffer.allocUnsafe(8).writeFloatLE(0.0, -1), RangeError); assert.strictEqual(buf.readIntBE(0, 5), -0x0012000000); } -// Regression test for #5482: should throw but not assert in C++ land. +// Regression test for https://github.com/nodejs/node-v0.x-archive/issues/5482: +// should throw but not assert in C++ land. assert.throws(() => Buffer.from('', 'buffer'), /^TypeError: "encoding" must be a valid string encoding$/); -// Regression test for #6111. Constructing a buffer from another buffer -// should a) work, and b) not corrupt the source buffer. +// Regression test for https://github.com/nodejs/node-v0.x-archive/issues/6111. +// Constructing a buffer from another buffer should a) work, and b) not corrupt +// the source buffer. { const a = [...Array(128).keys()]; // [0, 1, 2, 3, ... 126, 127] const b = Buffer.from(a); diff --git a/test/parallel/test-buffer-fill.js b/test/parallel/test-buffer-fill.js index 2ca79170538a87..f80020752d6431 100644 --- a/test/parallel/test-buffer-fill.js +++ b/test/parallel/test-buffer-fill.js @@ -344,7 +344,7 @@ Buffer.alloc(8, ''); return 0; } else { elseWasLast = true; - // Once buffer.js calls the C++ implemenation of fill, return -1 + // Once buffer.js calls the C++ implementation of fill, return -1 return -1; } } @@ -377,7 +377,7 @@ assert.throws(() => { return 1; } else { elseWasLast = true; - // Once buffer.js calls the C++ implemenation of fill, return -1 + // Once buffer.js calls the C++ implementation of fill, return -1 return -1; } } diff --git a/test/parallel/test-buffer-includes.js b/test/parallel/test-buffer-includes.js index 2f3a5ba6b63b6b..59380f5403b295 100644 --- a/test/parallel/test-buffer-includes.js +++ b/test/parallel/test-buffer-includes.js @@ -137,7 +137,7 @@ assert.strictEqual( ); -// test usc2 encoding +// test ucs2 encoding let twoByteString = Buffer.from('\u039a\u0391\u03a3\u03a3\u0395', 'ucs2'); assert(twoByteString.includes('\u0395', 4, 'ucs2')); diff --git a/test/parallel/test-buffer-indexof.js b/test/parallel/test-buffer-indexof.js index 4da6f9fcddc5d2..52edda39fa0e54 100644 --- a/test/parallel/test-buffer-indexof.js +++ b/test/parallel/test-buffer-indexof.js @@ -553,7 +553,7 @@ assert.strictEqual(511, longBufferString.lastIndexOf(pattern, 1534)); // "yolo swag swag yolo swag yolo yolo swag" ..., goes on for about 5MB. // This is hard to search because it all looks similar, but never repeats. -// countBits returns the number of bits in the binary reprsentation of n. +// countBits returns the number of bits in the binary representation of n. function countBits(n) { let count; for (count = 0; n > 0; count++) { diff --git a/test/parallel/test-buffer-read.js b/test/parallel/test-buffer-read.js index 5eac575ff5ab81..e8f3bad383fd69 100644 --- a/test/parallel/test-buffer-read.js +++ b/test/parallel/test-buffer-read.js @@ -20,11 +20,11 @@ function read(buff, funx, args, expected) { } -// testing basic functionality of readDoubleBE() and readDOubleLE() +// testing basic functionality of readDoubleBE() and readDoubleLE() read(buf, 'readDoubleBE', [1], -3.1827727774563287e+295); read(buf, 'readDoubleLE', [1], -6.966010051009108e+144); -// testing basic functionality of readFLoatBE() and readFloatLE() +// testing basic functionality of readFloatBE() and readFloatLE() read(buf, 'readFloatBE', [1], -1.6691549692541768e+37); read(buf, 'readFloatLE', [1], -7861303808); diff --git a/test/parallel/test-child-process-exec-timeout.js b/test/parallel/test-child-process-exec-timeout.js index ed25d9bff825a9..e08aff908522f2 100644 --- a/test/parallel/test-child-process-exec-timeout.js +++ b/test/parallel/test-child-process-exec-timeout.js @@ -16,15 +16,22 @@ const cmd = `"${process.execPath}" "${__filename}" child`; // Test the case where a timeout is set, and it expires. cp.exec(cmd, { timeout: 1 }, common.mustCall((err, stdout, stderr) => { + let sigterm = 'SIGTERM'; assert.strictEqual(err.killed, true); - assert.strictEqual(err.code, null); + // TODO OpenBSD returns a null signal and 143 for code + if (common.isOpenBSD) { + assert.strictEqual(err.code, 143); + sigterm = null; + } else { + assert.strictEqual(err.code, null); + } // At least starting with Darwin Kernel Version 16.4.0, sending a SIGTERM to a // process that is still starting up kills it with SIGKILL instead of SIGTERM. // See: https://github.com/libuv/libuv/issues/1226 if (common.isOSX) assert.ok(err.signal === 'SIGTERM' || err.signal === 'SIGKILL'); else - assert.strictEqual(err.signal, 'SIGTERM'); + assert.strictEqual(err.signal, sigterm); assert.strictEqual(err.cmd, cmd); assert.strictEqual(stdout.trim(), ''); assert.strictEqual(stderr.trim(), ''); diff --git a/test/parallel/test-child-process-execfile.js b/test/parallel/test-child-process-execfile.js index 62cc7f534dc86b..a64128d6a3ab6b 100644 --- a/test/parallel/test-child-process-execfile.js +++ b/test/parallel/test-child-process-execfile.js @@ -6,6 +6,7 @@ const uv = process.binding('uv'); const fixtures = require('../common/fixtures'); const fixture = fixtures.path('exit.js'); +const execOpts = { encoding: 'utf8', shell: true }; { execFile( @@ -38,3 +39,10 @@ const fixture = fixtures.path('exit.js'); child.kill(); child.emit('close', code, null); } + +{ + // Verify the shell option works properly + execFile(process.execPath, [fixture, 0], execOpts, common.mustCall((err) => { + assert.ifError(err); + })); +} diff --git a/test/parallel/test-child-process-fork-exec-path.js b/test/parallel/test-child-process-fork-exec-path.js index 06c6244eddaac4..42855cd663e826 100644 --- a/test/parallel/test-child-process-fork-exec-path.js +++ b/test/parallel/test-child-process-fork-exec-path.js @@ -24,9 +24,10 @@ const common = require('../common'); const assert = require('assert'); const fs = require('fs'); const path = require('path'); +const tmpdir = require('../common/tmpdir'); const msg = { test: 'this' }; const nodePath = process.execPath; -const copyPath = path.join(common.tmpDir, 'node-copy.exe'); +const copyPath = path.join(tmpdir.path, 'node-copy.exe'); if (process.env.FORK) { assert(process.send); @@ -34,7 +35,7 @@ if (process.env.FORK) { process.send(msg); process.exit(); } else { - common.refreshTmpDir(); + tmpdir.refresh(); try { fs.unlinkSync(copyPath); } catch (e) { diff --git a/test/parallel/test-child-process-internal.js b/test/parallel/test-child-process-internal.js index 4b22b4060307ed..c99010aeb8935a 100644 --- a/test/parallel/test-child-process-internal.js +++ b/test/parallel/test-child-process-internal.js @@ -32,7 +32,7 @@ if (process.argv[2] === 'child') { //send non-internal message containing PREFIX at a non prefix position process.send(normal); - //send inernal message + //send internal message process.send(internal); process.exit(0); diff --git a/test/parallel/test-child-process-send-returns-boolean.js b/test/parallel/test-child-process-send-returns-boolean.js index 2fbba1a4544ced..4c986e307e083a 100644 --- a/test/parallel/test-child-process-send-returns-boolean.js +++ b/test/parallel/test-child-process-send-returns-boolean.js @@ -14,7 +14,7 @@ const fixtures = require('../common/fixtures'); const subScript = fixtures.path('child-process-persistent.js'); { - // Test `send` return value on `fork` that opens and IPC by deafult. + // Test `send` return value on `fork` that opens and IPC by default. const n = fork(subScript); // `subprocess.send` should always return `true` for the first send. const rv = n.send({ h: 'w' }, (err) => { if (err) assert.fail(err); }); @@ -31,12 +31,12 @@ const subScript = fixtures.path('child-process-persistent.js'); const server = net.createServer(common.mustNotCall()).listen(0, () => { const handle = server._handle; - // Sending a handle and not giving the tickQueue time to acknoladge should + // Sending a handle and not giving the tickQueue time to acknowledge should // create the internal backlog, but leave it empty. const rv1 = s.send('one', handle, (err) => { if (err) assert.fail(err); }); assert.strictEqual(rv1, true); - // Since the first `send` included a handle (should be unackoladged), - // we can safly queue up only one more message. + // Since the first `send` included a handle (should be unacknowledged), + // we can safely queue up only one more message. const rv2 = s.send('two', (err) => { if (err) assert.fail(err); }); assert.strictEqual(rv2, true); // The backlog should now be indicate to backoff. diff --git a/test/parallel/test-cli-node-options-disallowed.js b/test/parallel/test-cli-node-options-disallowed.js index b55543bfa243bc..e4ae2d1aea28f2 100644 --- a/test/parallel/test-cli-node-options-disallowed.js +++ b/test/parallel/test-cli-node-options-disallowed.js @@ -8,8 +8,9 @@ if (process.config.variables.node_without_node_options) const assert = require('assert'); const exec = require('child_process').execFile; -common.refreshTmpDir(); -process.chdir(common.tmpDir); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); +process.chdir(tmpdir.path); disallow('--version'); disallow('-v'); diff --git a/test/parallel/test-cli-node-options.js b/test/parallel/test-cli-node-options.js index 85c35508566ff1..29358134c22c99 100644 --- a/test/parallel/test-cli-node-options.js +++ b/test/parallel/test-cli-node-options.js @@ -8,8 +8,9 @@ if (process.config.variables.node_without_node_options) const assert = require('assert'); const exec = require('child_process').execFile; -common.refreshTmpDir(); -process.chdir(common.tmpDir); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); +process.chdir(tmpdir.path); expect(`-r ${require.resolve('../fixtures/printA.js')}`, 'A\nB\n'); expect('--no-deprecation', 'B\n'); diff --git a/test/parallel/test-cluster-eaccess.js b/test/parallel/test-cluster-eaccess.js index ecf0862fa3bab5..c6a2a8ac25e251 100644 --- a/test/parallel/test-cluster-eaccess.js +++ b/test/parallel/test-cluster-eaccess.js @@ -33,7 +33,8 @@ const net = require('net'); if (cluster.isMaster && process.argv.length !== 3) { // cluster.isMaster - common.refreshTmpDir(); + const tmpdir = require('../common/tmpdir'); + tmpdir.refresh(); const PIPE_NAME = common.PIPE; const worker = cluster.fork({ PIPE_NAME }); diff --git a/test/parallel/test-cluster-http-pipe.js b/test/parallel/test-cluster-http-pipe.js index 96f741e80443b7..9e58fb297b28fe 100644 --- a/test/parallel/test-cluster-http-pipe.js +++ b/test/parallel/test-cluster-http-pipe.js @@ -32,7 +32,8 @@ const cluster = require('cluster'); const http = require('http'); if (cluster.isMaster) { - common.refreshTmpDir(); + const tmpdir = require('../common/tmpdir'); + tmpdir.refresh(); const worker = cluster.fork(); worker.on('message', common.mustCall((msg) => { assert.strictEqual(msg, 'DONE'); diff --git a/test/parallel/test-cluster-net-listen-relative-path.js b/test/parallel/test-cluster-net-listen-relative-path.js index 2f95d05203d424..ce9ead9e2eb958 100644 --- a/test/parallel/test-cluster-net-listen-relative-path.js +++ b/test/parallel/test-cluster-net-listen-relative-path.js @@ -6,6 +6,8 @@ const net = require('net'); const path = require('path'); const fs = require('fs'); +const tmpdir = require('../common/tmpdir'); + if (common.isWindows) common.skip('On Windows named pipes live in their own ' + 'filesystem and don\'t have a ~100 byte limit'); @@ -20,8 +22,8 @@ assert.strictEqual(path.resolve(socketDir, socketName).length > 100, true, if (cluster.isMaster) { // ensure that the worker exits peacefully - common.refreshTmpDir(); - process.chdir(common.tmpDir); + tmpdir.refresh(); + process.chdir(tmpdir.path); fs.mkdirSync(socketDir); cluster.fork().on('exit', common.mustCall(function(statusCode) { assert.strictEqual(statusCode, 0); diff --git a/test/parallel/test-cluster-worker-init.js b/test/parallel/test-cluster-worker-init.js index fd4e43fb863a20..47a884c1cdcc22 100644 --- a/test/parallel/test-cluster-worker-init.js +++ b/test/parallel/test-cluster-worker-init.js @@ -42,7 +42,7 @@ if (cluster.isMaster) { worker.send(msg); }); } else { - // GH #7998 + // https://github.com/nodejs/node-v0.x-archive/issues/7998 cluster.worker.on('message', (message) => { process.send(message === msg); }); diff --git a/test/parallel/test-crypto-binary-default.js b/test/parallel/test-crypto-binary-default.js index 55da9e67015660..b9312ccbe02ad3 100644 --- a/test/parallel/test-crypto-binary-default.js +++ b/test/parallel/test-crypto-binary-default.js @@ -411,7 +411,8 @@ fileStream.on('close', common.mustCall(function() { ); })); -// Issue #2227: unknown digest method should throw an error. +// Unknown digest method should throw an error: +// https://github.com/nodejs/node-v0.x-archive/issues/2227 assert.throws(function() { crypto.createHash('xyzzy'); }, /^Error: Digest method not supported$/); diff --git a/test/parallel/test-crypto-cipher-decipher.js b/test/parallel/test-crypto-cipher-decipher.js index b0d7feb1071a51..1752d903cfcb09 100644 --- a/test/parallel/test-crypto-cipher-decipher.js +++ b/test/parallel/test-crypto-cipher-decipher.js @@ -70,7 +70,8 @@ testCipher1(Buffer.from('MySecretKey123')); testCipher2('0123456789abcdef'); testCipher2(Buffer.from('0123456789abcdef')); -// Base64 padding regression test, see #4837. +// Base64 padding regression test, see +// https://github.com/nodejs/node-v0.x-archive/issues/4837. { const c = crypto.createCipher('aes-256-cbc', 'secret'); const s = c.update('test', 'utf8', 'base64') + c.final('base64'); @@ -78,7 +79,7 @@ testCipher2(Buffer.from('0123456789abcdef')); } // Calling Cipher.final() or Decipher.final() twice should error but -// not assert. See #4886. +// not assert. See https://github.com/nodejs/node-v0.x-archive/issues/4886. { const c = crypto.createCipher('aes-256-cbc', 'secret'); try { c.final('xxx'); } catch (e) { /* Ignore. */ } @@ -90,14 +91,16 @@ testCipher2(Buffer.from('0123456789abcdef')); try { d.final('xxx'); } catch (e) { /* Ignore. */ } } -// Regression test for #5482: string to Cipher#update() should not assert. +// Regression test for https://github.com/nodejs/node-v0.x-archive/issues/5482: +// string to Cipher#update() should not assert. { const c = crypto.createCipher('aes192', '0123456789abcdef'); c.update('update'); c.final(); } -// #5655 regression tests, 'utf-8' and 'utf8' are identical. +// https://github.com/nodejs/node-v0.x-archive/issues/5655 regression tests, +// 'utf-8' and 'utf8' are identical. { let c = crypto.createCipher('aes192', '0123456789abcdef'); c.update('update', ''); // Defaults to "utf8". diff --git a/test/parallel/test-crypto-deprecated.js b/test/parallel/test-crypto-deprecated.js index 84f25316d49b61..acdd71301fbed0 100644 --- a/test/parallel/test-crypto-deprecated.js +++ b/test/parallel/test-crypto-deprecated.js @@ -14,7 +14,7 @@ common.expectWarning('DeprecationWarning', [ // Accessing the deprecated function is enough to trigger the warning event. // It does not need to be called. So the assert serves the purpose of both -// triggering the warning event and confirming that the deprected function is +// triggering the warning event and confirming that the deprecated function is // mapped to the correct non-deprecated function. assert.strictEqual(crypto.Credentials, tls.SecureContext); assert.strictEqual(crypto.createCredentials, tls.createSecureContext); diff --git a/test/parallel/test-crypto-fips.js b/test/parallel/test-crypto-fips.js index 755c6e20c26b2d..ed2654ec5b639c 100644 --- a/test/parallel/test-crypto-fips.js +++ b/test/parallel/test-crypto-fips.js @@ -91,7 +91,7 @@ testHelper( // to try to call the fips setter, to try to detect this situation, as // that would throw an error: // ("Error: Cannot set FIPS mode in a non-FIPS build."). -// Due to this uncertanty the following tests are skipped when configured +// Due to this uncertainty the following tests are skipped when configured // with --shared-openssl. if (!sharedOpenSSL()) { // OpenSSL config file should be able to turn on FIPS mode diff --git a/test/parallel/test-crypto-hash.js b/test/parallel/test-crypto-hash.js index 786cc8b71131b1..ef540681e2a46b 100644 --- a/test/parallel/test-crypto-hash.js +++ b/test/parallel/test-crypto-hash.js @@ -105,7 +105,8 @@ fileStream.on('close', common.mustCall(function() { 'Test SHA1 of sample.png'); })); -// Issue #2227: unknown digest method should throw an error. +// Issue https://github.com/nodejs/node-v0.x-archive/issues/2227: unknown digest +// method should throw an error. assert.throws(function() { crypto.createHash('xyzzy'); }, /Digest method not supported/); diff --git a/test/parallel/test-crypto-random.js b/test/parallel/test-crypto-random.js index d9fa7efd9b6a4a..8608eb04305d80 100644 --- a/test/parallel/test-crypto-random.js +++ b/test/parallel/test-crypto-random.js @@ -260,8 +260,9 @@ const expectedErrorRegexp = /^TypeError: size must be a number >= 0$/; } } -// #5126, "FATAL ERROR: v8::Object::SetIndexedPropertiesToExternalArrayData() -// length exceeds max acceptable value" +// https://github.com/nodejs/node-v0.x-archive/issues/5126, +// "FATAL ERROR: v8::Object::SetIndexedPropertiesToExternalArrayData() length +// exceeds max acceptable value" assert.throws(function() { crypto.randomBytes((-1 >>> 0) + 1); }, /^TypeError: size must be a number >= 0$/); diff --git a/test/parallel/test-crypto-sign-verify.js b/test/parallel/test-crypto-sign-verify.js index 2ecc405b8bb9af..4e6c2ee07837fd 100644 --- a/test/parallel/test-crypto-sign-verify.js +++ b/test/parallel/test-crypto-sign-verify.js @@ -255,11 +255,12 @@ const modSize = 1024; padding: crypto.constants.RSA_PKCS1_PSS_PADDING }); - common.refreshTmpDir(); + const tmpdir = require('../common/tmpdir'); + tmpdir.refresh(); - const sigfile = path.join(common.tmpDir, 's5.sig'); + const sigfile = path.join(tmpdir.path, 's5.sig'); fs.writeFileSync(sigfile, s5); - const msgfile = path.join(common.tmpDir, 's5.msg'); + const msgfile = path.join(tmpdir.path, 's5.msg'); fs.writeFileSync(msgfile, msg); const cmd = diff --git a/test/parallel/test-crypto.js b/test/parallel/test-crypto.js index 3b38fd47ad686e..ecdde91825112a 100644 --- a/test/parallel/test-crypto.js +++ b/test/parallel/test-crypto.js @@ -159,8 +159,8 @@ testImmutability(tls.getCiphers); testImmutability(crypto.getHashes); testImmutability(crypto.getCurves); -// Regression tests for #5725: hex input that's not a power of two should -// throw, not assert in C++ land. +// Regression tests for https://github.com/nodejs/node-v0.x-archive/pull/5725: +// hex input that's not a power of two should throw, not assert in C++ land. assert.throws(function() { crypto.createCipher('aes192', 'test').update('0', 'hex'); }, (err) => { diff --git a/test/parallel/test-cwd-enoent-preload.js b/test/parallel/test-cwd-enoent-preload.js index ec9f1fee754d9c..b83ff6ff883a05 100644 --- a/test/parallel/test-cwd-enoent-preload.js +++ b/test/parallel/test-cwd-enoent-preload.js @@ -8,10 +8,11 @@ const assert = require('assert'); const fs = require('fs'); const spawn = require('child_process').spawn; const fixtures = require('../common/fixtures'); +const tmpdir = require('../common/tmpdir'); -const dirname = `${common.tmpDir}/cwd-does-not-exist-${process.pid}`; +const dirname = `${tmpdir.path}/cwd-does-not-exist-${process.pid}`; const abspathFile = fixtures.path('a.js'); -common.refreshTmpDir(); +tmpdir.refresh(); fs.mkdirSync(dirname); process.chdir(dirname); fs.rmdirSync(dirname); diff --git a/test/parallel/test-cwd-enoent-repl.js b/test/parallel/test-cwd-enoent-repl.js index 8f846af9030c08..d42679d8688e06 100644 --- a/test/parallel/test-cwd-enoent-repl.js +++ b/test/parallel/test-cwd-enoent-repl.js @@ -8,8 +8,10 @@ const assert = require('assert'); const fs = require('fs'); const spawn = require('child_process').spawn; -const dirname = `${common.tmpDir}/cwd-does-not-exist-${process.pid}`; -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); + +const dirname = `${tmpdir.path}/cwd-does-not-exist-${process.pid}`; +tmpdir.refresh(); fs.mkdirSync(dirname); process.chdir(dirname); fs.rmdirSync(dirname); diff --git a/test/parallel/test-cwd-enoent.js b/test/parallel/test-cwd-enoent.js index c1b520aedd321d..e5d93f46ce60c0 100644 --- a/test/parallel/test-cwd-enoent.js +++ b/test/parallel/test-cwd-enoent.js @@ -8,8 +8,10 @@ const assert = require('assert'); const fs = require('fs'); const spawn = require('child_process').spawn; -const dirname = `${common.tmpDir}/cwd-does-not-exist-${process.pid}`; -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); + +const dirname = `${tmpdir.path}/cwd-does-not-exist-${process.pid}`; +tmpdir.refresh(); fs.mkdirSync(dirname); process.chdir(dirname); fs.rmdirSync(dirname); diff --git a/test/parallel/test-dgram-ref.js b/test/parallel/test-dgram-ref.js index 1a531db9aa194a..b3b8488297507c 100644 --- a/test/parallel/test-dgram-ref.js +++ b/test/parallel/test-dgram-ref.js @@ -23,7 +23,7 @@ const common = require('../common'); const dgram = require('dgram'); -// should not hang, see #1282 +// should not hang, see https://github.com/nodejs/node-v0.x-archive/issues/1282 dgram.createSocket('udp4'); dgram.createSocket('udp6'); diff --git a/test/parallel/test-dns-regress-7070.js b/test/parallel/test-dns-regress-7070.js index 1082a0ce699671..86a386ba45a806 100644 --- a/test/parallel/test-dns-regress-7070.js +++ b/test/parallel/test-dns-regress-7070.js @@ -24,7 +24,8 @@ require('../common'); const assert = require('assert'); const dns = require('dns'); -// Should not raise assertion error. Issue #7070 +// Should not raise assertion error. +// Issue https://github.com/nodejs/node-v0.x-archive/issues/7070 assert.throws(() => dns.resolveNs([]), // bad name /^Error: "name" argument must be a string$/); assert.throws(() => dns.resolveNs(''), // bad callback diff --git a/test/parallel/test-domain-throw-error-then-throw-from-uncaught-exception-handler.js b/test/parallel/test-domain-throw-error-then-throw-from-uncaught-exception-handler.js index 089300bc481c10..a2afebd838f410 100644 --- a/test/parallel/test-domain-throw-error-then-throw-from-uncaught-exception-handler.js +++ b/test/parallel/test-domain-throw-error-then-throw-from-uncaught-exception-handler.js @@ -25,7 +25,7 @@ if (process.argv[2] === 'child') { // is not properly flushed in V8's Isolate::Throw right before the // process aborts due to an uncaught exception, and thus the error // message representing the error that was thrown cannot be read by the - // parent process. So instead of parsing the child process' stdandard + // parent process. So instead of parsing the child process' standard // error, the parent process will check that in the case // --abort-on-uncaught-exception was passed, the process did not exit // with exit code RAN_UNCAUGHT_EXCEPTION_HANDLER_EXIT_CODE. diff --git a/test/parallel/test-eslint-prefer-assert-iferror.js b/test/parallel/test-eslint-prefer-assert-iferror.js index 790207bc30cd43..6ecf92adbef51a 100644 --- a/test/parallel/test-eslint-prefer-assert-iferror.js +++ b/test/parallel/test-eslint-prefer-assert-iferror.js @@ -16,12 +16,18 @@ new RuleTester().run('prefer-assert-iferror', rule, { ], invalid: [ { - code: 'if (err) throw err;', - errors: [{ message: 'Use assert.ifError(err) instead.' }] + code: 'require("assert");\n' + + 'if (err) throw err;', + errors: [{ message: 'Use assert.ifError(err) instead.' }], + output: 'require("assert");\n' + + 'assert.ifError(err);' }, { - code: 'if (error) { throw error; }', - errors: [{ message: 'Use assert.ifError(error) instead.' }] + code: 'require("assert");\n' + + 'if (error) { throw error; }', + errors: [{ message: 'Use assert.ifError(error) instead.' }], + output: 'require("assert");\n' + + 'assert.ifError(error);' } ] }); diff --git a/test/parallel/test-eslint-prefer-assert-methods.js b/test/parallel/test-eslint-prefer-assert-methods.js index ea1501ed8401eb..f5cf50b1ad32b3 100644 --- a/test/parallel/test-eslint-prefer-assert-methods.js +++ b/test/parallel/test-eslint-prefer-assert-methods.js @@ -9,31 +9,46 @@ const rule = require('../../tools/eslint-rules/prefer-assert-methods'); new RuleTester().run('prefer-assert-methods', rule, { valid: [ - 'assert.strictEqual(foo, bar)', - 'assert(foo === bar && baz)' + 'assert.strictEqual(foo, bar);', + 'assert(foo === bar && baz);', + 'assert.notStrictEqual(foo, bar);', + 'assert(foo !== bar && baz);', + 'assert.equal(foo, bar);', + 'assert(foo == bar && baz);', + 'assert.notEqual(foo, bar);', + 'assert(foo != bar && baz);', + 'assert.ok(foo);', + 'assert.ok(foo != bar);', + 'assert.ok(foo === bar && baz);' ], invalid: [ { - code: 'assert(foo == bar)', - errors: [{ message: "'assert.equal' should be used instead of '=='" }] + code: 'assert(foo == bar);', + errors: [{ + message: "'assert.equal' should be used instead of '=='" + }], + output: 'assert.equal(foo, bar);' }, { - code: 'assert(foo === bar)', + code: 'assert(foo === bar);', errors: [{ message: "'assert.strictEqual' should be used instead of '==='" - }] + }], + output: 'assert.strictEqual(foo, bar);' }, { - code: 'assert(foo != bar)', + code: 'assert(foo != bar);', errors: [{ message: "'assert.notEqual' should be used instead of '!='" - }] + }], + output: 'assert.notEqual(foo, bar);' }, { - code: 'assert(foo !== bar)', + code: 'assert(foo !== bar);', errors: [{ message: "'assert.notStrictEqual' should be used instead of '!=='" - }] - }, + }], + output: 'assert.notStrictEqual(foo, bar);' + } ] }); diff --git a/test/parallel/test-event-emitter-remove-listeners.js b/test/parallel/test-event-emitter-remove-listeners.js index d1c01e0bc3cfae..be7f68bcaecf0d 100644 --- a/test/parallel/test-event-emitter-remove-listeners.js +++ b/test/parallel/test-event-emitter-remove-listeners.js @@ -119,10 +119,10 @@ function listener2() {} // listener4 will still be called although it is removed by listener 3. ee.emit('hello'); - // This is so because the interal listener array at time of emit + // This is so because the internal listener array at time of emit // was [listener3,listener4] - // Interal listener array [listener3] + // Internal listener array [listener3] ee.emit('hello'); } diff --git a/test/parallel/test-file-write-stream.js b/test/parallel/test-file-write-stream.js index ed32a7791e03f0..fb584771aad089 100644 --- a/test/parallel/test-file-write-stream.js +++ b/test/parallel/test-file-write-stream.js @@ -20,13 +20,14 @@ // USE OR OTHER DEALINGS IN THE SOFTWARE. 'use strict'; -const common = require('../common'); +require('../common'); const assert = require('assert'); const path = require('path'); const fs = require('fs'); -const fn = path.join(common.tmpDir, 'write.txt'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +const fn = path.join(tmpdir.path, 'write.txt'); +tmpdir.refresh(); const file = fs.createWriteStream(fn, { highWaterMark: 10 }); diff --git a/test/parallel/test-file-write-stream2.js b/test/parallel/test-file-write-stream2.js index 1f838f0869260a..2db06640e18dba 100644 --- a/test/parallel/test-file-write-stream2.js +++ b/test/parallel/test-file-write-stream2.js @@ -20,14 +20,16 @@ // USE OR OTHER DEALINGS IN THE SOFTWARE. 'use strict'; -const common = require('../common'); +require('../common'); const assert = require('assert'); const path = require('path'); const fs = require('fs'); +const tmpdir = require('../common/tmpdir'); -const filepath = path.join(common.tmpDir, 'write.txt'); + +const filepath = path.join(tmpdir.path, 'write.txt'); const EXPECTED = '012345678910'; @@ -58,7 +60,7 @@ function removeTestFile() { } -common.refreshTmpDir(); +tmpdir.refresh(); // drain at 0, return false at 10. const file = fs.createWriteStream(filepath, { diff --git a/test/parallel/test-file-write-stream3.js b/test/parallel/test-file-write-stream3.js index 1d86d3d5237f56..979a72ff4c2ddd 100644 --- a/test/parallel/test-file-write-stream3.js +++ b/test/parallel/test-file-write-stream3.js @@ -25,8 +25,10 @@ const assert = require('assert'); const path = require('path'); const fs = require('fs'); +const tmpdir = require('../common/tmpdir'); -const filepath = path.join(common.tmpDir, 'write_pos.txt'); + +const filepath = path.join(tmpdir.path, 'write_pos.txt'); const cb_expected = 'write open close write open close write open close '; @@ -51,7 +53,7 @@ process.on('exit', function() { }); -common.refreshTmpDir(); +tmpdir.refresh(); function run_test_1() { diff --git a/test/parallel/test-fs-access.js b/test/parallel/test-fs-access.js index f378824cbcd950..52bc032f4a8fde 100644 --- a/test/parallel/test-fs-access.js +++ b/test/parallel/test-fs-access.js @@ -3,16 +3,18 @@ const common = require('../common'); const assert = require('assert'); const fs = require('fs'); const path = require('path'); -const doesNotExist = path.join(common.tmpDir, '__this_should_not_exist'); -const readOnlyFile = path.join(common.tmpDir, 'read_only_file'); -const readWriteFile = path.join(common.tmpDir, 'read_write_file'); + +const tmpdir = require('../common/tmpdir'); +const doesNotExist = path.join(tmpdir.path, '__this_should_not_exist'); +const readOnlyFile = path.join(tmpdir.path, 'read_only_file'); +const readWriteFile = path.join(tmpdir.path, 'read_write_file'); function createFileWithPerms(file, mode) { fs.writeFileSync(file, ''); fs.chmodSync(file, mode); } -common.refreshTmpDir(); +tmpdir.refresh(); createFileWithPerms(readOnlyFile, 0o444); createFileWithPerms(readWriteFile, 0o666); @@ -28,7 +30,7 @@ createFileWithPerms(readWriteFile, 0o666); * The change of user id is done after creating the fixtures files for the same * reason: the test may be run as the superuser within a directory in which * only the superuser can create files, and thus it may need superuser - * priviledges to create them. + * privileges to create them. * * There's not really any point in resetting the process' user id to 0 after * changing it to 'nobody', since in the case that the test runs without diff --git a/test/parallel/test-fs-append-file-sync.js b/test/parallel/test-fs-append-file-sync.js index 31e95c2e368656..b836d81bd5985c 100644 --- a/test/parallel/test-fs-append-file-sync.js +++ b/test/parallel/test-fs-append-file-sync.js @@ -36,10 +36,11 @@ const data = '南越国是前203年至前111年存在于岭南地区的一个国 '历经五代君主。南越国是岭南地区的第一个有记载的政权国家,采用封建制和郡县制并存的制度,' + '它的建立保证了秦末乱世岭南地区社会秩序的稳定,有效的改善了岭南地区落后的政治、##济现状。\n'; -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); // test that empty file will be created and have content added -const filename = join(common.tmpDir, 'append-sync.txt'); +const filename = join(tmpdir.path, 'append-sync.txt'); fs.appendFileSync(filename, data); @@ -48,7 +49,7 @@ const fileData = fs.readFileSync(filename); assert.strictEqual(Buffer.byteLength(data), fileData.length); // test that appends data to a non empty file -const filename2 = join(common.tmpDir, 'append-sync2.txt'); +const filename2 = join(tmpdir.path, 'append-sync2.txt'); fs.writeFileSync(filename2, currentFileData); fs.appendFileSync(filename2, data); @@ -59,7 +60,7 @@ assert.strictEqual(Buffer.byteLength(data) + currentFileData.length, fileData2.length); // test that appendFileSync accepts buffers -const filename3 = join(common.tmpDir, 'append-sync3.txt'); +const filename3 = join(tmpdir.path, 'append-sync3.txt'); fs.writeFileSync(filename3, currentFileData); const buf = Buffer.from(data, 'utf8'); @@ -70,7 +71,7 @@ const fileData3 = fs.readFileSync(filename3); assert.strictEqual(buf.length + currentFileData.length, fileData3.length); // test that appendFile accepts numbers. -const filename4 = join(common.tmpDir, 'append-sync4.txt'); +const filename4 = join(tmpdir.path, 'append-sync4.txt'); fs.writeFileSync(filename4, currentFileData, { mode: m }); fs.appendFileSync(filename4, num, { mode: m }); @@ -87,7 +88,7 @@ assert.strictEqual(Buffer.byteLength(String(num)) + currentFileData.length, fileData4.length); // test that appendFile accepts file descriptors -const filename5 = join(common.tmpDir, 'append-sync5.txt'); +const filename5 = join(tmpdir.path, 'append-sync5.txt'); fs.writeFileSync(filename5, currentFileData); const filename5fd = fs.openSync(filename5, 'a+', 0o600); diff --git a/test/parallel/test-fs-append-file.js b/test/parallel/test-fs-append-file.js index e3e4c273d3292d..8e9a0619b0a4fb 100644 --- a/test/parallel/test-fs-append-file.js +++ b/test/parallel/test-fs-append-file.js @@ -25,7 +25,9 @@ const assert = require('assert'); const fs = require('fs'); const join = require('path').join; -const filename = join(common.tmpDir, 'append.txt'); +const tmpdir = require('../common/tmpdir'); + +const filename = join(tmpdir.path, 'append.txt'); const currentFileData = 'ABCD'; @@ -40,7 +42,7 @@ const s = '南越国是前203年至前111年存在于岭南地区的一个国家 let ncallbacks = 0; -common.refreshTmpDir(); +tmpdir.refresh(); // test that empty file will be created and have content added fs.appendFile(filename, s, function(e) { @@ -56,7 +58,7 @@ fs.appendFile(filename, s, function(e) { }); // test that appends data to a non empty file -const filename2 = join(common.tmpDir, 'append2.txt'); +const filename2 = join(tmpdir.path, 'append2.txt'); fs.writeFileSync(filename2, currentFileData); fs.appendFile(filename2, s, function(e) { @@ -73,7 +75,7 @@ fs.appendFile(filename2, s, function(e) { }); // test that appendFile accepts buffers -const filename3 = join(common.tmpDir, 'append3.txt'); +const filename3 = join(tmpdir.path, 'append3.txt'); fs.writeFileSync(filename3, currentFileData); const buf = Buffer.from(s, 'utf8'); @@ -91,7 +93,7 @@ fs.appendFile(filename3, buf, function(e) { }); // test that appendFile accepts numbers. -const filename4 = join(common.tmpDir, 'append4.txt'); +const filename4 = join(tmpdir.path, 'append4.txt'); fs.writeFileSync(filename4, currentFileData); const m = 0o600; @@ -115,7 +117,7 @@ fs.appendFile(filename4, n, { mode: m }, function(e) { }); // test that appendFile accepts file descriptors -const filename5 = join(common.tmpDir, 'append5.txt'); +const filename5 = join(tmpdir.path, 'append5.txt'); fs.writeFileSync(filename5, currentFileData); fs.open(filename5, 'a+', function(e, fd) { @@ -146,7 +148,7 @@ fs.open(filename5, 'a+', function(e, fd) { // test that a missing callback emits a warning, even if the last argument is a // function. -const filename6 = join(common.tmpDir, 'append6.txt'); +const filename6 = join(tmpdir.path, 'append6.txt'); const warn = 'Calling an asynchronous function without callback is deprecated.'; common.expectWarning('DeprecationWarning', warn); fs.appendFile(filename6, console.log); diff --git a/test/parallel/test-fs-buffer.js b/test/parallel/test-fs-buffer.js index 1cbead434469e9..84234e8a7a0bd7 100644 --- a/test/parallel/test-fs-buffer.js +++ b/test/parallel/test-fs-buffer.js @@ -6,16 +6,17 @@ const assert = require('assert'); const fs = require('fs'); const path = require('path'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); assert.doesNotThrow(() => { - fs.access(Buffer.from(common.tmpDir), common.mustCall((err) => { + fs.access(Buffer.from(tmpdir.path), common.mustCall((err) => { assert.ifError(err); })); }); assert.doesNotThrow(() => { - const buf = Buffer.from(path.join(common.tmpDir, 'a.txt')); + const buf = Buffer.from(path.join(tmpdir.path, 'a.txt')); fs.open(buf, 'w+', common.mustCall((err, fd) => { assert.ifError(err); assert(fd); diff --git a/test/parallel/test-fs-buffertype-writesync.js b/test/parallel/test-fs-buffertype-writesync.js index 73a6f211893aaf..d5257d214bdb81 100644 --- a/test/parallel/test-fs-buffertype-writesync.js +++ b/test/parallel/test-fs-buffertype-writesync.js @@ -1,5 +1,5 @@ 'use strict'; -const common = require('../common'); +require('../common'); // This test ensures that writeSync does support inputs which // are then correctly converted into string buffers. @@ -8,10 +8,12 @@ const assert = require('assert'); const fs = require('fs'); const path = require('path'); -const filePath = path.join(common.tmpDir, 'test_buffer_type'); +const tmpdir = require('../common/tmpdir'); + +const filePath = path.join(tmpdir.path, 'test_buffer_type'); const v = [true, false, 0, 1, Infinity, () => {}, {}, [], undefined, null]; -common.refreshTmpDir(); +tmpdir.refresh(); v.forEach((value) => { const fd = fs.openSync(filePath, 'w'); diff --git a/test/parallel/test-fs-chmod.js b/test/parallel/test-fs-chmod.js index 7d4b7a10dbd9b4..98190d1f75ee01 100644 --- a/test/parallel/test-fs-chmod.js +++ b/test/parallel/test-fs-chmod.js @@ -71,10 +71,11 @@ if (common.isWindows) { mode_sync = 0o644; } -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); -const file1 = path.join(common.tmpDir, 'a.js'); -const file2 = path.join(common.tmpDir, 'a1.js'); +const file1 = path.join(tmpdir.path, 'a.js'); +const file2 = path.join(tmpdir.path, 'a1.js'); // Create file1. fs.closeSync(fs.openSync(file1, 'w')); @@ -121,7 +122,7 @@ fs.open(file2, 'w', common.mustCall((err, fd) => { // lchmod if (fs.lchmod) { - const link = path.join(common.tmpDir, 'symbolic-link'); + const link = path.join(tmpdir.path, 'symbolic-link'); fs.symlinkSync(file2, link); diff --git a/test/parallel/test-fs-copyfile.js b/test/parallel/test-fs-copyfile.js index 7487633764ae9a..e8315ef9b25f16 100644 --- a/test/parallel/test-fs-copyfile.js +++ b/test/parallel/test-fs-copyfile.js @@ -1,11 +1,12 @@ 'use strict'; const common = require('../common'); const fixtures = require('../common/fixtures'); +const tmpdir = require('../common/tmpdir'); const assert = require('assert'); const fs = require('fs'); const path = require('path'); const src = fixtures.path('a.js'); -const dest = path.join(common.tmpDir, 'copyfile.out'); +const dest = path.join(tmpdir.path, 'copyfile.out'); const { COPYFILE_EXCL, UV_FS_COPYFILE_EXCL } = fs.constants; function verify(src, dest) { @@ -19,7 +20,7 @@ function verify(src, dest) { assert.strictEqual(srcStat.size, destStat.size); } -common.refreshTmpDir(); +tmpdir.refresh(); // Verify that flags are defined. assert.strictEqual(typeof COPYFILE_EXCL, 'number'); diff --git a/test/parallel/test-fs-fsync.js b/test/parallel/test-fs-fsync.js index c55056e501f648..4040d2d550764e 100644 --- a/test/parallel/test-fs-fsync.js +++ b/test/parallel/test-fs-fsync.js @@ -23,15 +23,16 @@ const common = require('../common'); const assert = require('assert'); const fixtures = require('../common/fixtures'); +const tmpdir = require('../common/tmpdir'); const fs = require('fs'); const path = require('path'); const fileFixture = fixtures.path('a.js'); -const fileTemp = path.join(common.tmpDir, 'a.js'); +const fileTemp = path.join(tmpdir.path, 'a.js'); // Copy fixtures to temp. -common.refreshTmpDir(); +tmpdir.refresh(); fs.copyFileSync(fileFixture, fileTemp); fs.open(fileTemp, 'a', 0o777, common.mustCall(function(err, fd) { diff --git a/test/parallel/test-fs-link.js b/test/parallel/test-fs-link.js index 525392aa2be01c..8ee9b29f1ee9da 100644 --- a/test/parallel/test-fs-link.js +++ b/test/parallel/test-fs-link.js @@ -4,11 +4,12 @@ const assert = require('assert'); const path = require('path'); const fs = require('fs'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); // test creating and reading hard link -const srcPath = path.join(common.tmpDir, 'hardlink-target.txt'); -const dstPath = path.join(common.tmpDir, 'link1.js'); +const srcPath = path.join(tmpdir.path, 'hardlink-target.txt'); +const dstPath = path.join(tmpdir.path, 'link1.js'); fs.writeFileSync(srcPath, 'hello world'); function callback(err) { diff --git a/test/parallel/test-fs-long-path.js b/test/parallel/test-fs-long-path.js index ae60b16f1a3f97..74f63868b81054 100644 --- a/test/parallel/test-fs-long-path.js +++ b/test/parallel/test-fs-long-path.js @@ -28,12 +28,14 @@ const fs = require('fs'); const path = require('path'); const assert = require('assert'); +const tmpdir = require('../common/tmpdir'); + // make a path that will be at least 260 chars long. -const fileNameLen = Math.max(260 - common.tmpDir.length - 1, 1); -const fileName = path.join(common.tmpDir, 'x'.repeat(fileNameLen)); +const fileNameLen = Math.max(260 - tmpdir.path.length - 1, 1); +const fileName = path.join(tmpdir.path, 'x'.repeat(fileNameLen)); const fullPath = path.resolve(fileName); -common.refreshTmpDir(); +tmpdir.refresh(); console.log({ filenameLength: fileName.length, diff --git a/test/parallel/test-fs-make-callback.js b/test/parallel/test-fs-make-callback.js index 8a19e1cc9601f4..ad2f21098b9609 100644 --- a/test/parallel/test-fs-make-callback.js +++ b/test/parallel/test-fs-make-callback.js @@ -8,12 +8,13 @@ const callbackThrowValues = [null, true, false, 0, 1, 'foo', /foo/, [], {}]; const { sep } = require('path'); const warn = 'Calling an asynchronous function without callback is deprecated.'; -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); function testMakeCallback(cb) { return function() { // fs.mkdtemp() calls makeCallback() on its third argument - fs.mkdtemp(`${common.tmpDir}${sep}`, {}, cb); + fs.mkdtemp(`${tmpdir.path}${sep}`, {}, cb); }; } diff --git a/test/parallel/test-fs-mkdir-rmdir.js b/test/parallel/test-fs-mkdir-rmdir.js index 8c22331e85e3cd..865a5dba951555 100644 --- a/test/parallel/test-fs-mkdir-rmdir.js +++ b/test/parallel/test-fs-mkdir-rmdir.js @@ -4,9 +4,10 @@ const common = require('../common'); const assert = require('assert'); const path = require('path'); const fs = require('fs'); -const d = path.join(common.tmpDir, 'dir'); +const tmpdir = require('../common/tmpdir'); +const d = path.join(tmpdir.path, 'dir'); -common.refreshTmpDir(); +tmpdir.refresh(); // Make sure the directory does not exist assert(!common.fileExists(d)); diff --git a/test/parallel/test-fs-mkdir.js b/test/parallel/test-fs-mkdir.js index 54585a3f12a64b..6b4f3c921d670b 100644 --- a/test/parallel/test-fs-mkdir.js +++ b/test/parallel/test-fs-mkdir.js @@ -24,10 +24,11 @@ const common = require('../common'); const assert = require('assert'); const fs = require('fs'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); { - const pathname = `${common.tmpDir}/test1`; + const pathname = `${tmpdir.path}/test1`; fs.mkdir(pathname, common.mustCall(function(err) { assert.strictEqual(err, null); @@ -36,7 +37,7 @@ common.refreshTmpDir(); } { - const pathname = `${common.tmpDir}/test2`; + const pathname = `${tmpdir.path}/test2`; fs.mkdir(pathname, 0o777, common.mustCall(function(err) { assert.strictEqual(err, null); @@ -45,7 +46,7 @@ common.refreshTmpDir(); } { - const pathname = `${common.tmpDir}/test3`; + const pathname = `${tmpdir.path}/test3`; fs.mkdirSync(pathname); diff --git a/test/parallel/test-fs-mkdtemp.js b/test/parallel/test-fs-mkdtemp.js index 5ce340afa5ad0f..c1f62fbf22780b 100644 --- a/test/parallel/test-fs-mkdtemp.js +++ b/test/parallel/test-fs-mkdtemp.js @@ -5,14 +5,15 @@ const assert = require('assert'); const fs = require('fs'); const path = require('path'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); -const tmpFolder = fs.mkdtempSync(path.join(common.tmpDir, 'foo.')); +const tmpFolder = fs.mkdtempSync(path.join(tmpdir.path, 'foo.')); assert.strictEqual(path.basename(tmpFolder).length, 'foo.XXXXXX'.length); assert(common.fileExists(tmpFolder)); -const utf8 = fs.mkdtempSync(path.join(common.tmpDir, '\u0222abc.')); +const utf8 = fs.mkdtempSync(path.join(tmpdir.path, '\u0222abc.')); assert.strictEqual(Buffer.byteLength(path.basename(utf8)), Buffer.byteLength('\u0222abc.XXXXXX')); assert(common.fileExists(utf8)); @@ -23,13 +24,13 @@ function handler(err, folder) { assert.strictEqual(this, null); } -fs.mkdtemp(path.join(common.tmpDir, 'bar.'), common.mustCall(handler)); +fs.mkdtemp(path.join(tmpdir.path, 'bar.'), common.mustCall(handler)); // Same test as above, but making sure that passing an options object doesn't // affect the way the callback function is handled. -fs.mkdtemp(path.join(common.tmpDir, 'bar.'), {}, common.mustCall(handler)); +fs.mkdtemp(path.join(tmpdir.path, 'bar.'), {}, common.mustCall(handler)); // Making sure that not passing a callback doesn't crash, as a default function // is passed internally. -assert.doesNotThrow(() => fs.mkdtemp(path.join(common.tmpDir, 'bar-'))); -assert.doesNotThrow(() => fs.mkdtemp(path.join(common.tmpDir, 'bar-'), {})); +assert.doesNotThrow(() => fs.mkdtemp(path.join(tmpdir.path, 'bar-'))); +assert.doesNotThrow(() => fs.mkdtemp(path.join(tmpdir.path, 'bar-'), {})); diff --git a/test/parallel/test-fs-non-number-arguments-throw.js b/test/parallel/test-fs-non-number-arguments-throw.js index 5e4deb12a8734b..3bc98a585c5b74 100644 --- a/test/parallel/test-fs-non-number-arguments-throw.js +++ b/test/parallel/test-fs-non-number-arguments-throw.js @@ -4,9 +4,10 @@ const common = require('../common'); const assert = require('assert'); const fs = require('fs'); const path = require('path'); -const tempFile = path.join(common.tmpDir, 'fs-non-number-arguments-throw'); +const tmpdir = require('../common/tmpdir'); +const tempFile = path.join(tmpdir.path, 'fs-non-number-arguments-throw'); -common.refreshTmpDir(); +tmpdir.refresh(); fs.writeFileSync(tempFile, 'abc\ndef'); // a sanity check when using numbers instead of strings diff --git a/test/parallel/test-fs-open-flags.js b/test/parallel/test-fs-open-flags.js index 0aa467c6e96f96..adc1f899bfb223 100644 --- a/test/parallel/test-fs-open-flags.js +++ b/test/parallel/test-fs-open-flags.js @@ -84,8 +84,9 @@ assert.throws( ); if (common.isLinux || common.isOSX) { - common.refreshTmpDir(); - const file = path.join(common.tmpDir, 'a.js'); + const tmpdir = require('../common/tmpdir'); + tmpdir.refresh(); + const file = path.join(tmpdir.path, 'a.js'); fs.copyFileSync(fixtures.path('a.js'), file); fs.open(file, O_DSYNC, common.mustCall(assert.ifError)); } diff --git a/test/parallel/test-fs-open-numeric-flags.js b/test/parallel/test-fs-open-numeric-flags.js index 1bd9a043927039..0e5ab6997e3342 100644 --- a/test/parallel/test-fs-open-numeric-flags.js +++ b/test/parallel/test-fs-open-numeric-flags.js @@ -1,14 +1,15 @@ 'use strict'; -const common = require('../common'); +require('../common'); const assert = require('assert'); const fs = require('fs'); const path = require('path'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); // O_WRONLY without O_CREAT shall fail with ENOENT -const pathNE = path.join(common.tmpDir, 'file-should-not-exist'); +const pathNE = path.join(tmpdir.path, 'file-should-not-exist'); assert.throws( () => fs.openSync(pathNE, fs.constants.O_WRONLY), (e) => e.code === 'ENOENT' diff --git a/test/parallel/test-fs-options-immutable.js b/test/parallel/test-fs-options-immutable.js index 9d88cf0fa42697..ca5079b07dab3f 100644 --- a/test/parallel/test-fs-options-immutable.js +++ b/test/parallel/test-fs-options-immutable.js @@ -14,7 +14,8 @@ const path = require('path'); const errHandler = (e) => assert.ifError(e); const options = Object.freeze({}); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); { assert.doesNotThrow(() => @@ -31,8 +32,8 @@ common.refreshTmpDir(); } if (common.canCreateSymLink()) { - const sourceFile = path.resolve(common.tmpDir, 'test-readlink'); - const linkFile = path.resolve(common.tmpDir, 'test-readlink-link'); + const sourceFile = path.resolve(tmpdir.path, 'test-readlink'); + const linkFile = path.resolve(tmpdir.path, 'test-readlink-link'); fs.writeFileSync(sourceFile, ''); fs.symlinkSync(sourceFile, linkFile); @@ -44,7 +45,7 @@ if (common.canCreateSymLink()) { } { - const fileName = path.resolve(common.tmpDir, 'writeFile'); + const fileName = path.resolve(tmpdir.path, 'writeFile'); assert.doesNotThrow(() => fs.writeFileSync(fileName, 'ABCD', options)); assert.doesNotThrow(() => fs.writeFile(fileName, 'ABCD', options, common.mustCall(errHandler)) @@ -52,7 +53,7 @@ if (common.canCreateSymLink()) { } { - const fileName = path.resolve(common.tmpDir, 'appendFile'); + const fileName = path.resolve(tmpdir.path, 'appendFile'); assert.doesNotThrow(() => fs.appendFileSync(fileName, 'ABCD', options)); assert.doesNotThrow(() => fs.appendFile(fileName, 'ABCD', options, common.mustCall(errHandler)) @@ -82,7 +83,7 @@ if (common.canCreateSymLink()) { } { - const tempFileName = path.resolve(common.tmpDir, 'mkdtemp-'); + const tempFileName = path.resolve(tmpdir.path, 'mkdtemp-'); assert.doesNotThrow(() => fs.mkdtempSync(tempFileName, options)); assert.doesNotThrow(() => fs.mkdtemp(tempFileName, options, common.mustCall(errHandler)) @@ -90,7 +91,7 @@ if (common.canCreateSymLink()) { } { - const fileName = path.resolve(common.tmpDir, 'streams'); + const fileName = path.resolve(tmpdir.path, 'streams'); assert.doesNotThrow(() => { fs.WriteStream(fileName, options).once('open', common.mustCall(() => { assert.doesNotThrow(() => fs.ReadStream(fileName, options)); diff --git a/test/parallel/test-fs-promisified.js b/test/parallel/test-fs-promisified.js index ac6e22f9690821..13cf5e0e0f45f3 100644 --- a/test/parallel/test-fs-promisified.js +++ b/test/parallel/test-fs-promisified.js @@ -20,9 +20,10 @@ const exists = promisify(fs.exists); })); } -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); { - const filename = path.join(common.tmpDir, 'write-promise.txt'); + const filename = path.join(tmpdir.path, 'write-promise.txt'); const fd = fs.openSync(filename, 'w'); write(fd, Buffer.from('foobar')).then(common.mustCall((obj) => { assert.strictEqual(typeof obj.bytesWritten, 'number'); diff --git a/test/parallel/test-fs-read-stream-fd.js b/test/parallel/test-fs-read-stream-fd.js index c5ee6c05ef1e5a..7d4b264002b9f3 100644 --- a/test/parallel/test-fs-read-stream-fd.js +++ b/test/parallel/test-fs-read-stream-fd.js @@ -20,15 +20,16 @@ // USE OR OTHER DEALINGS IN THE SOFTWARE. 'use strict'; -const common = require('../common'); +require('../common'); const fs = require('fs'); const assert = require('assert'); const path = require('path'); -const file = path.join(common.tmpDir, '/read_stream_fd_test.txt'); +const tmpdir = require('../common/tmpdir'); +const file = path.join(tmpdir.path, '/read_stream_fd_test.txt'); const input = 'hello world'; let output = ''; -common.refreshTmpDir(); +tmpdir.refresh(); fs.writeFileSync(file, input); const fd = fs.openSync(file, 'r'); diff --git a/test/parallel/test-fs-read-stream.js b/test/parallel/test-fs-read-stream.js index 6748c68b52596c..2600f74b5452cb 100644 --- a/test/parallel/test-fs-read-stream.js +++ b/test/parallel/test-fs-read-stream.js @@ -22,6 +22,7 @@ 'use strict'; const common = require('../common'); +const child_process = require('child_process'); const assert = require('assert'); const fs = require('fs'); const fixtures = require('../common/fixtures'); @@ -171,6 +172,32 @@ assert.throws(function() { })); } +if (!common.isWindows) { + // Verify that end works when start is not specified, and we do not try to + // use positioned reads. This makes sure that this keeps working for + // non-seekable file descriptors. + const tmpdir = require('../common/tmpdir'); + tmpdir.refresh(); + const filename = `${tmpdir.path}/foo.pipe`; + const mkfifoResult = child_process.spawnSync('mkfifo', [filename]); + if (!mkfifoResult.error) { + child_process.exec(`echo "xyz foobar" > '${filename}'`); + const stream = new fs.createReadStream(filename, { end: 1 }); + stream.data = ''; + + stream.on('data', function(chunk) { + stream.data += chunk; + }); + + stream.on('end', common.mustCall(function() { + assert.strictEqual('xy', stream.data); + fs.unlinkSync(filename); + })); + } else { + common.printSkipMessage('mkfifo not available'); + } +} + { // pause and then resume immediately. const pauseRes = fs.createReadStream(rangeFile); diff --git a/test/parallel/test-fs-readdir-stack-overflow.js b/test/parallel/test-fs-readdir-stack-overflow.js new file mode 100644 index 00000000000000..b7dea52cc37ec5 --- /dev/null +++ b/test/parallel/test-fs-readdir-stack-overflow.js @@ -0,0 +1,18 @@ +'use strict'; + +const common = require('../common'); + +const fs = require('fs'); + +function recurse() { + fs.readdirSync('.'); + recurse(); +} + +common.expectsError( + () => recurse(), + { + type: RangeError, + message: 'Maximum call stack size exceeded' + } +); diff --git a/test/parallel/test-fs-readdir-ucs2.js b/test/parallel/test-fs-readdir-ucs2.js index debcfb7750becd..b17dc8d7292664 100644 --- a/test/parallel/test-fs-readdir-ucs2.js +++ b/test/parallel/test-fs-readdir-ucs2.js @@ -8,9 +8,10 @@ const path = require('path'); const fs = require('fs'); const assert = require('assert'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); const filename = '\uD83D\uDC04'; -const root = Buffer.from(`${common.tmpDir}${path.sep}`); +const root = Buffer.from(`${tmpdir.path}${path.sep}`); const filebuff = Buffer.from(filename, 'ucs2'); const fullpath = Buffer.concat([root, filebuff]); @@ -22,7 +23,7 @@ try { throw e; } -fs.readdir(common.tmpDir, 'ucs2', common.mustCall((err, list) => { +fs.readdir(tmpdir.path, 'ucs2', common.mustCall((err, list) => { assert.ifError(err); assert.strictEqual(1, list.length); const fn = list[0]; diff --git a/test/parallel/test-fs-readdir.js b/test/parallel/test-fs-readdir.js index a5c7ebfe688c41..cc30007d70a78e 100644 --- a/test/parallel/test-fs-readdir.js +++ b/test/parallel/test-fs-readdir.js @@ -4,11 +4,13 @@ const common = require('../common'); const assert = require('assert'); const fs = require('fs'); -const readdirDir = common.tmpDir; +const tmpdir = require('../common/tmpdir'); + +const readdirDir = tmpdir.path; const files = ['empty', 'files', 'for', 'just', 'testing']; // Make sure tmp directory is clean -common.refreshTmpDir(); +tmpdir.refresh(); // Create the necessary files files.forEach(function(currentFile) { diff --git a/test/parallel/test-fs-readfile-pipe-large.js b/test/parallel/test-fs-readfile-pipe-large.js index 17831f81f6df83..740a3876a2d76c 100644 --- a/test/parallel/test-fs-readfile-pipe-large.js +++ b/test/parallel/test-fs-readfile-pipe-large.js @@ -18,9 +18,11 @@ if (process.argv[2] === 'child') { return; } -const filename = path.join(common.tmpDir, '/readfile_pipe_large_test.txt'); +const tmpdir = require('../common/tmpdir'); + +const filename = path.join(tmpdir.path, '/readfile_pipe_large_test.txt'); const dataExpected = 'a'.repeat(999999); -common.refreshTmpDir(); +tmpdir.refresh(); fs.writeFileSync(filename, dataExpected); const exec = require('child_process').exec; diff --git a/test/parallel/test-fs-readfile-unlink.js b/test/parallel/test-fs-readfile-unlink.js index 9ec2e849bee19f..1ed6fefb5ccfc7 100644 --- a/test/parallel/test-fs-readfile-unlink.js +++ b/test/parallel/test-fs-readfile-unlink.js @@ -20,7 +20,7 @@ // USE OR OTHER DEALINGS IN THE SOFTWARE. 'use strict'; -const common = require('../common'); +require('../common'); // Test that unlink succeeds immediately after readFile completes. @@ -28,10 +28,12 @@ const assert = require('assert'); const fs = require('fs'); const path = require('path'); -const fileName = path.resolve(common.tmpDir, 'test.bin'); +const tmpdir = require('../common/tmpdir'); + +const fileName = path.resolve(tmpdir.path, 'test.bin'); const buf = Buffer.alloc(512 * 1024, 42); -common.refreshTmpDir(); +tmpdir.refresh(); fs.writeFileSync(fileName, buf); diff --git a/test/parallel/test-fs-readfilesync-pipe-large.js b/test/parallel/test-fs-readfilesync-pipe-large.js index f9dea90d104fad..18a06b1ba117db 100644 --- a/test/parallel/test-fs-readfilesync-pipe-large.js +++ b/test/parallel/test-fs-readfilesync-pipe-large.js @@ -15,9 +15,11 @@ if (process.argv[2] === 'child') { return; } -const filename = path.join(common.tmpDir, '/readfilesync_pipe_large_test.txt'); +const tmpdir = require('../common/tmpdir'); + +const filename = path.join(tmpdir.path, '/readfilesync_pipe_large_test.txt'); const dataExpected = 'a'.repeat(999999); -common.refreshTmpDir(); +tmpdir.refresh(); fs.writeFileSync(filename, dataExpected); const exec = require('child_process').exec; diff --git a/test/parallel/test-fs-realpath.js b/test/parallel/test-fs-realpath.js index 1efd698fe20f3c..cdd196f6bbe399 100644 --- a/test/parallel/test-fs-realpath.js +++ b/test/parallel/test-fs-realpath.js @@ -22,6 +22,7 @@ 'use strict'; const common = require('../common'); const fixtures = require('../common/fixtures'); +const tmpdir = require('../common/tmpdir'); const assert = require('assert'); const fs = require('fs'); @@ -31,9 +32,9 @@ let async_completed = 0; let async_expected = 0; const unlink = []; let skipSymlinks = false; -const tmpDir = common.tmpDir; +const tmpDir = tmpdir.path; -common.refreshTmpDir(); +tmpdir.refresh(); let root = '/'; let assertEqualPath = assert.strictEqual; @@ -400,6 +401,8 @@ function test_up_multiple(cb) { cleanup(); } setup(); + const tmpdir = require('../common/tmpdir'); + tmpdir.refresh(); fs.mkdirSync(tmp('a'), 0o755); fs.mkdirSync(tmp('a/b'), 0o755); fs.symlinkSync('..', tmp('a/d'), 'dir'); diff --git a/test/parallel/test-fs-sir-writes-alot.js b/test/parallel/test-fs-sir-writes-alot.js index 3a3458a552ee7a..5d8c3dfec90aab 100644 --- a/test/parallel/test-fs-sir-writes-alot.js +++ b/test/parallel/test-fs-sir-writes-alot.js @@ -20,14 +20,16 @@ // USE OR OTHER DEALINGS IN THE SOFTWARE. 'use strict'; -const common = require('../common'); +require('../common'); const fs = require('fs'); const assert = require('assert'); const join = require('path').join; -const filename = join(common.tmpDir, 'out.txt'); +const tmpdir = require('../common/tmpdir'); -common.refreshTmpDir(); +const filename = join(tmpdir.path, 'out.txt'); + +tmpdir.refresh(); const fd = fs.openSync(filename, 'w'); diff --git a/test/parallel/test-fs-stream-double-close.js b/test/parallel/test-fs-stream-double-close.js index 3a8086d0ac0a3b..8c0037b24312ff 100644 --- a/test/parallel/test-fs-stream-double-close.js +++ b/test/parallel/test-fs-stream-double-close.js @@ -23,15 +23,16 @@ const common = require('../common'); const fs = require('fs'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); test1(fs.createReadStream(__filename)); test2(fs.createReadStream(__filename)); test3(fs.createReadStream(__filename)); -test1(fs.createWriteStream(`${common.tmpDir}/dummy1`)); -test2(fs.createWriteStream(`${common.tmpDir}/dummy2`)); -test3(fs.createWriteStream(`${common.tmpDir}/dummy3`)); +test1(fs.createWriteStream(`${tmpdir.path}/dummy1`)); +test2(fs.createWriteStream(`${tmpdir.path}/dummy2`)); +test3(fs.createWriteStream(`${tmpdir.path}/dummy3`)); function test1(stream) { stream.destroy(); diff --git a/test/parallel/test-fs-symlink-dir-junction-relative.js b/test/parallel/test-fs-symlink-dir-junction-relative.js index 7cb50b0291d0d5..308ab040488471 100644 --- a/test/parallel/test-fs-symlink-dir-junction-relative.js +++ b/test/parallel/test-fs-symlink-dir-junction-relative.js @@ -28,12 +28,14 @@ const assert = require('assert'); const path = require('path'); const fs = require('fs'); -const linkPath1 = path.join(common.tmpDir, 'junction1'); -const linkPath2 = path.join(common.tmpDir, 'junction2'); +const tmpdir = require('../common/tmpdir'); + +const linkPath1 = path.join(tmpdir.path, 'junction1'); +const linkPath2 = path.join(tmpdir.path, 'junction2'); const linkTarget = fixtures.fixturesDir; const linkData = fixtures.fixturesDir; -common.refreshTmpDir(); +tmpdir.refresh(); // Test fs.symlink() fs.symlink(linkData, linkPath1, 'junction', common.mustCall(function(err) { diff --git a/test/parallel/test-fs-symlink-dir-junction.js b/test/parallel/test-fs-symlink-dir-junction.js index f7ba3a6d384923..cd9459bf44afe1 100644 --- a/test/parallel/test-fs-symlink-dir-junction.js +++ b/test/parallel/test-fs-symlink-dir-junction.js @@ -26,11 +26,13 @@ const assert = require('assert'); const path = require('path'); const fs = require('fs'); +const tmpdir = require('../common/tmpdir'); + // test creating and reading symbolic link const linkData = fixtures.path('cycles/'); -const linkPath = path.join(common.tmpDir, 'cycles_link'); +const linkPath = path.join(tmpdir.path, 'cycles_link'); -common.refreshTmpDir(); +tmpdir.refresh(); fs.symlink(linkData, linkPath, 'junction', common.mustCall(function(err) { assert.ifError(err); diff --git a/test/parallel/test-fs-symlink.js b/test/parallel/test-fs-symlink.js index 1de7532068c5e5..89142137dd23cf 100644 --- a/test/parallel/test-fs-symlink.js +++ b/test/parallel/test-fs-symlink.js @@ -32,11 +32,12 @@ const fs = require('fs'); let linkTime; let fileTime; -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); // test creating and reading symbolic link const linkData = fixtures.path('/cycles/root.js'); -const linkPath = path.join(common.tmpDir, 'symlink1.js'); +const linkPath = path.join(tmpdir.path, 'symlink1.js'); fs.symlink(linkData, linkPath, common.mustCall(function(err) { assert.ifError(err); diff --git a/test/parallel/test-fs-syncwritestream.js b/test/parallel/test-fs-syncwritestream.js index 236c412c45b543..a014277a6ba259 100644 --- a/test/parallel/test-fs-syncwritestream.js +++ b/test/parallel/test-fs-syncwritestream.js @@ -21,9 +21,10 @@ if (process.argv[2] === 'child') { return; } -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); -const filename = path.join(common.tmpDir, 'stdout'); +const filename = path.join(tmpdir.path, 'stdout'); const stdoutFd = fs.openSync(filename, 'w'); const proc = spawn(process.execPath, [__filename, 'child'], { diff --git a/test/parallel/test-fs-truncate-GH-6233.js b/test/parallel/test-fs-truncate-GH-6233.js index 07bd272024f99b..87663c63616ffa 100644 --- a/test/parallel/test-fs-truncate-GH-6233.js +++ b/test/parallel/test-fs-truncate-GH-6233.js @@ -24,9 +24,11 @@ const common = require('../common'); const assert = require('assert'); const fs = require('fs'); -const filename = `${common.tmpDir}/truncate-file.txt`; +const tmpdir = require('../common/tmpdir'); -common.refreshTmpDir(); +const filename = `${tmpdir.path}/truncate-file.txt`; + +tmpdir.refresh(); // Synchronous test. { diff --git a/test/parallel/test-fs-truncate-fd.js b/test/parallel/test-fs-truncate-fd.js index 526612870d9f73..4e001596d35efc 100644 --- a/test/parallel/test-fs-truncate-fd.js +++ b/test/parallel/test-fs-truncate-fd.js @@ -3,8 +3,9 @@ const common = require('../common'); const assert = require('assert'); const path = require('path'); const fs = require('fs'); -const tmp = common.tmpDir; -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +const tmp = tmpdir.path; +tmpdir.refresh(); const filename = path.resolve(tmp, 'truncate-file.txt'); fs.writeFileSync(filename, 'hello world', 'utf8'); diff --git a/test/parallel/test-fs-truncate-sync.js b/test/parallel/test-fs-truncate-sync.js index a7ce2f4d97f3fe..66250cf4386b34 100644 --- a/test/parallel/test-fs-truncate-sync.js +++ b/test/parallel/test-fs-truncate-sync.js @@ -1,11 +1,12 @@ 'use strict'; -const common = require('../common'); +require('../common'); const assert = require('assert'); const path = require('path'); const fs = require('fs'); -const tmp = common.tmpDir; +const tmpdir = require('../common/tmpdir'); +const tmp = tmpdir.path; -common.refreshTmpDir(); +tmpdir.refresh(); const filename = path.resolve(tmp, 'truncate-sync-file.txt'); diff --git a/test/parallel/test-fs-truncate.js b/test/parallel/test-fs-truncate.js index a56a1a054ca718..67aa0da53fd620 100644 --- a/test/parallel/test-fs-truncate.js +++ b/test/parallel/test-fs-truncate.js @@ -24,11 +24,12 @@ const common = require('../common'); const assert = require('assert'); const path = require('path'); const fs = require('fs'); -const tmp = common.tmpDir; +const tmpdir = require('../common/tmpdir'); +const tmp = tmpdir.path; const filename = path.resolve(tmp, 'truncate-file.txt'); const data = Buffer.alloc(1024 * 16, 'x'); -common.refreshTmpDir(); +tmpdir.refresh(); let stat; diff --git a/test/parallel/test-fs-utimes.js b/test/parallel/test-fs-utimes.js index 9bcf6039cd60bc..3dc0bb59def6a2 100644 --- a/test/parallel/test-fs-utimes.js +++ b/test/parallel/test-fs-utimes.js @@ -25,7 +25,8 @@ const assert = require('assert'); const util = require('util'); const fs = require('fs'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); let tests_ok = 0; let tests_run = 0; @@ -73,8 +74,8 @@ function testIt(atime, mtime, callback) { // test synchronized code paths, these functions throw on failure // function syncTests() { - fs.utimesSync(common.tmpDir, atime, mtime); - expect_ok('utimesSync', common.tmpDir, undefined, atime, mtime); + fs.utimesSync(tmpdir.path, atime, mtime); + expect_ok('utimesSync', tmpdir.path, undefined, atime, mtime); tests_run++; // some systems don't have futimes @@ -109,17 +110,17 @@ function testIt(atime, mtime, callback) { // // test async code paths // - fs.utimes(common.tmpDir, atime, mtime, common.mustCall(function(err) { - expect_ok('utimes', common.tmpDir, err, atime, mtime); + fs.utimes(tmpdir.path, atime, mtime, common.mustCall(function(err) { + expect_ok('utimes', tmpdir.path, err, atime, mtime); fs.utimes('foobarbaz', atime, mtime, common.mustCall(function(err) { expect_errno('utimes', 'foobarbaz', err, 'ENOENT'); // don't close this fd if (common.isWindows) { - fd = fs.openSync(common.tmpDir, 'r+'); + fd = fs.openSync(tmpdir.path, 'r+'); } else { - fd = fs.openSync(common.tmpDir, 'r'); + fd = fs.openSync(tmpdir.path, 'r'); } fs.futimes(fd, atime, mtime, common.mustCall(function(err) { @@ -139,7 +140,7 @@ function testIt(atime, mtime, callback) { tests_run++; } -const stats = fs.statSync(common.tmpDir); +const stats = fs.statSync(tmpdir.path); // run tests const runTest = common.mustCall(testIt, 6); @@ -168,11 +169,11 @@ process.on('exit', function() { // Ref: https://github.com/nodejs/node/issues/13255 -const path = `${common.tmpDir}/test-utimes-precision`; +const path = `${tmpdir.path}/test-utimes-precision`; fs.writeFileSync(path, ''); -// test Y2K38 for all platforms [except 'arm', and 'SunOS'] -if (!process.arch.includes('arm') && !common.isSunOS) { +// test Y2K38 for all platforms [except 'arm', 'OpenBSD' and 'SunOS'] +if (!process.arch.includes('arm') && !common.isOpenBSD && !common.isSunOS) { // because 2 ** 31 doesn't look right // eslint-disable-next-line space-infix-ops const Y2K38_mtime = 2**31; diff --git a/test/parallel/test-fs-watch-encoding.js b/test/parallel/test-fs-watch-encoding.js index 5226899d2f33a8..1cea6255098360 100644 --- a/test/parallel/test-fs-watch-encoding.js +++ b/test/parallel/test-fs-watch-encoding.js @@ -22,10 +22,11 @@ if (common.isAIX) const fs = require('fs'); const path = require('path'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); const fn = '新建文夹件.txt'; -const a = path.join(common.tmpDir, fn); +const a = path.join(tmpdir.path, fn); const watchers = new Set(); @@ -42,7 +43,7 @@ function unregisterWatcher(watcher) { } const watcher1 = fs.watch( - common.tmpDir, + tmpdir.path, { encoding: 'hex' }, (event, filename) => { if (['e696b0e5bbbae69687e5a4b9e4bbb62e747874', null].includes(filename)) @@ -52,7 +53,7 @@ const watcher1 = fs.watch( registerWatcher(watcher1); const watcher2 = fs.watch( - common.tmpDir, + tmpdir.path, (event, filename) => { if ([fn, null].includes(filename)) done(watcher2); @@ -61,7 +62,7 @@ const watcher2 = fs.watch( registerWatcher(watcher2); const watcher3 = fs.watch( - common.tmpDir, + tmpdir.path, { encoding: 'buffer' }, (event, filename) => { if (filename instanceof Buffer && filename.toString('utf8') === fn) diff --git a/test/parallel/test-fs-watch-recursive.js b/test/parallel/test-fs-watch-recursive.js index 3e3746df1ef336..82d87aa2ecbb93 100644 --- a/test/parallel/test-fs-watch-recursive.js +++ b/test/parallel/test-fs-watch-recursive.js @@ -9,10 +9,12 @@ const assert = require('assert'); const path = require('path'); const fs = require('fs'); -const testDir = common.tmpDir; +const tmpdir = require('../common/tmpdir'); + +const testDir = tmpdir.path; const filenameOne = 'watch.txt'; -common.refreshTmpDir(); +tmpdir.refresh(); const testsubdir = fs.mkdtempSync(testDir + path.sep); const relativePathOne = path.join(path.basename(testsubdir), filenameOne); diff --git a/test/parallel/test-fs-watch.js b/test/parallel/test-fs-watch.js index bf5fc9a8e1ed75..37701c84b8932a 100644 --- a/test/parallel/test-fs-watch.js +++ b/test/parallel/test-fs-watch.js @@ -14,7 +14,7 @@ class WatchTestCase { this.field = field; this.shouldSkip = !shouldInclude; } - get dirPath() { return join(common.tmpDir, this.dirName); } + get dirPath() { return join(tmpdir.path, this.dirName); } get filePath() { return join(this.dirPath, this.fileName); } } @@ -35,7 +35,8 @@ const cases = [ ) ]; -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); for (const testCase of cases) { if (testCase.shouldSkip) continue; diff --git a/test/parallel/test-fs-watchfile.js b/test/parallel/test-fs-watchfile.js index d99de562bb2a8e..5a30981c33e882 100644 --- a/test/parallel/test-fs-watchfile.js +++ b/test/parallel/test-fs-watchfile.js @@ -5,6 +5,8 @@ const assert = require('assert'); const fs = require('fs'); const path = require('path'); +const tmpdir = require('../common/tmpdir'); + // Basic usage tests. assert.throws(function() { fs.watchFile('./some-file'); @@ -18,7 +20,7 @@ assert.throws(function() { fs.watchFile(new Object(), common.mustNotCall()); }, /Path must be a string/); -const enoentFile = path.join(common.tmpDir, 'non-existent-file'); +const enoentFile = path.join(tmpdir.path, 'non-existent-file'); const expectedStatObject = new fs.Stats( 0, // dev 0, // mode @@ -36,7 +38,7 @@ const expectedStatObject = new fs.Stats( Date.UTC(1970, 0, 1, 0, 0, 0) // birthtime ); -common.refreshTmpDir(); +tmpdir.refresh(); // If the file initially didn't exist, and gets created at a later point of // time, the callback should be invoked again with proper values in stat object @@ -67,7 +69,7 @@ fs.watchFile(enoentFile, { interval: 0 }, common.mustCall(function(curr, prev) { // Watch events should callback with a filename on supported systems. // Omitting AIX. It works but not reliably. if (common.isLinux || common.isOSX || common.isWindows) { - const dir = path.join(common.tmpDir, 'watch'); + const dir = path.join(tmpdir.path, 'watch'); fs.mkdir(dir, common.mustCall(function(err) { if (err) assert.fail(err); diff --git a/test/parallel/test-fs-write-buffer.js b/test/parallel/test-fs-write-buffer.js index ed998958ae060f..6e6154642a583d 100644 --- a/test/parallel/test-fs-write-buffer.js +++ b/test/parallel/test-fs-write-buffer.js @@ -26,11 +26,12 @@ const path = require('path'); const fs = require('fs'); const expected = Buffer.from('hello'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); // fs.write with all parameters provided: { - const filename = path.join(common.tmpDir, 'write1.txt'); + const filename = path.join(tmpdir.path, 'write1.txt'); fs.open(filename, 'w', 0o644, common.mustCall((err, fd) => { assert.ifError(err); @@ -50,7 +51,7 @@ common.refreshTmpDir(); // fs.write with a buffer, without the length parameter: { - const filename = path.join(common.tmpDir, 'write2.txt'); + const filename = path.join(tmpdir.path, 'write2.txt'); fs.open(filename, 'w', 0o644, common.mustCall((err, fd) => { assert.ifError(err); @@ -70,7 +71,7 @@ common.refreshTmpDir(); // fs.write with a buffer, without the offset and length parameters: { - const filename = path.join(common.tmpDir, 'write3.txt'); + const filename = path.join(tmpdir.path, 'write3.txt'); fs.open(filename, 'w', 0o644, common.mustCall(function(err, fd) { assert.ifError(err); @@ -90,7 +91,7 @@ common.refreshTmpDir(); // fs.write with the offset passed as undefined followed by the callback: { - const filename = path.join(common.tmpDir, 'write4.txt'); + const filename = path.join(tmpdir.path, 'write4.txt'); fs.open(filename, 'w', 0o644, common.mustCall(function(err, fd) { assert.ifError(err); @@ -110,7 +111,7 @@ common.refreshTmpDir(); // fs.write with offset and length passed as undefined followed by the callback: { - const filename = path.join(common.tmpDir, 'write5.txt'); + const filename = path.join(tmpdir.path, 'write5.txt'); fs.open(filename, 'w', 0o644, common.mustCall((err, fd) => { assert.ifError(err); @@ -130,7 +131,7 @@ common.refreshTmpDir(); // fs.write with a Uint8Array, without the offset and length parameters: { - const filename = path.join(common.tmpDir, 'write6.txt'); + const filename = path.join(tmpdir.path, 'write6.txt'); fs.open(filename, 'w', 0o644, common.mustCall((err, fd) => { assert.ifError(err); diff --git a/test/parallel/test-fs-write-file-buffer.js b/test/parallel/test-fs-write-file-buffer.js index f2039c87ab4f0e..82fb7ad69a443b 100644 --- a/test/parallel/test-fs-write-file-buffer.js +++ b/test/parallel/test-fs-write-file-buffer.js @@ -20,7 +20,7 @@ // USE OR OTHER DEALINGS IN THE SOFTWARE. 'use strict'; -const common = require('../common'); +require('../common'); const join = require('path').join; const util = require('util'); const fs = require('fs'); @@ -46,9 +46,10 @@ let data = [ data = data.join('\n'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); const buf = Buffer.from(data, 'base64'); -fs.writeFileSync(join(common.tmpDir, 'test.jpg'), buf); +fs.writeFileSync(join(tmpdir.path, 'test.jpg'), buf); util.log('Done!'); diff --git a/test/parallel/test-fs-write-file-invalid-path.js b/test/parallel/test-fs-write-file-invalid-path.js index c45eaccf2bc193..a4c8ff5bf73a03 100644 --- a/test/parallel/test-fs-write-file-invalid-path.js +++ b/test/parallel/test-fs-write-file-invalid-path.js @@ -8,7 +8,8 @@ const path = require('path'); if (!common.isWindows) common.skip('This test is for Windows only.'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); const DATA_VALUE = 'hello'; @@ -17,7 +18,7 @@ const DATA_VALUE = 'hello'; const RESERVED_CHARACTERS = '<>"|?*'; [...RESERVED_CHARACTERS].forEach((ch) => { - const pathname = path.join(common.tmpDir, `somefile_${ch}`); + const pathname = path.join(tmpdir.path, `somefile_${ch}`); assert.throws( () => { fs.writeFileSync(pathname, DATA_VALUE); @@ -28,7 +29,7 @@ const RESERVED_CHARACTERS = '<>"|?*'; // Test for ':' (NTFS data streams). // Refs: https://msdn.microsoft.com/en-us/library/windows/desktop/bb540537.aspx -const pathname = path.join(common.tmpDir, 'foo:bar'); +const pathname = path.join(tmpdir.path, 'foo:bar'); fs.writeFileSync(pathname, DATA_VALUE); let content = ''; diff --git a/test/parallel/test-fs-write-file-sync.js b/test/parallel/test-fs-write-file-sync.js index aa3864962c90af..9a19b9f6e93439 100644 --- a/test/parallel/test-fs-write-file-sync.js +++ b/test/parallel/test-fs-write-file-sync.js @@ -46,10 +46,11 @@ if (common.isWindows) { mode = 0o755; } -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); // Test writeFileSync -const file1 = path.join(common.tmpDir, 'testWriteFileSync.txt'); +const file1 = path.join(tmpdir.path, 'testWriteFileSync.txt'); fs.writeFileSync(file1, '123', { mode }); @@ -59,7 +60,7 @@ assert.strictEqual(content, '123'); assert.strictEqual(fs.statSync(file1).mode & 0o777, mode); // Test appendFileSync -const file2 = path.join(common.tmpDir, 'testAppendFileSync.txt'); +const file2 = path.join(tmpdir.path, 'testAppendFileSync.txt'); fs.appendFileSync(file2, 'abc', { mode }); @@ -69,7 +70,7 @@ assert.strictEqual(content, 'abc'); assert.strictEqual(fs.statSync(file2).mode & mode, mode); // Test writeFileSync with file descriptor -const file3 = path.join(common.tmpDir, 'testWriteFileSyncFd.txt'); +const file3 = path.join(tmpdir.path, 'testWriteFileSyncFd.txt'); const fd = fs.openSync(file3, 'w+', mode); fs.writeFileSync(fd, '123'); diff --git a/test/parallel/test-fs-write-file-uint8array.js b/test/parallel/test-fs-write-file-uint8array.js index 219379c77a920d..592bdb05814e06 100644 --- a/test/parallel/test-fs-write-file-uint8array.js +++ b/test/parallel/test-fs-write-file-uint8array.js @@ -4,9 +4,10 @@ const assert = require('assert'); const fs = require('fs'); const join = require('path').join; -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); -const filename = join(common.tmpDir, 'test.txt'); +const filename = join(tmpdir.path, 'test.txt'); const s = '南越国是前203年至前111年存在于岭南地区的一个国家,国都位于番禺,疆域包括今天中国的广东、' + '广西两省区的大部份地区,福建省、湖南、贵州、云南的一小部份地区和越南的北部。' + diff --git a/test/parallel/test-fs-write-file.js b/test/parallel/test-fs-write-file.js index 6dd1a58ecba832..b137e55547201e 100644 --- a/test/parallel/test-fs-write-file.js +++ b/test/parallel/test-fs-write-file.js @@ -25,9 +25,10 @@ const assert = require('assert'); const fs = require('fs'); const join = require('path').join; -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); -const filename = join(common.tmpDir, 'test.txt'); +const filename = join(tmpdir.path, 'test.txt'); const n = 220; const s = '南越国是前203年至前111年存在于岭南地区的一个国家,国都位于番禺,疆域包括今天中国的广东、' + @@ -48,7 +49,7 @@ fs.writeFile(filename, s, common.mustCall(function(e) { })); // test that writeFile accepts buffers -const filename2 = join(common.tmpDir, 'test2.txt'); +const filename2 = join(tmpdir.path, 'test2.txt'); const buf = Buffer.from(s, 'utf8'); fs.writeFile(filename2, buf, common.mustCall(function(e) { @@ -62,7 +63,7 @@ fs.writeFile(filename2, buf, common.mustCall(function(e) { })); // test that writeFile accepts numbers. -const filename3 = join(common.tmpDir, 'test3.txt'); +const filename3 = join(tmpdir.path, 'test3.txt'); const m = 0o600; fs.writeFile(filename3, n, { mode: m }, common.mustCall(function(e) { @@ -82,7 +83,7 @@ fs.writeFile(filename3, n, { mode: m }, common.mustCall(function(e) { })); // test that writeFile accepts file descriptors -const filename4 = join(common.tmpDir, 'test4.txt'); +const filename4 = join(tmpdir.path, 'test4.txt'); fs.open(filename4, 'w+', common.mustCall(function(e, fd) { assert.ifError(e); diff --git a/test/parallel/test-fs-write-stream-autoclose-option.js b/test/parallel/test-fs-write-stream-autoclose-option.js index df73d18b44210d..d632868f3eaeb4 100644 --- a/test/parallel/test-fs-write-stream-autoclose-option.js +++ b/test/parallel/test-fs-write-stream-autoclose-option.js @@ -4,8 +4,10 @@ const assert = require('assert'); const path = require('path'); const fs = require('fs'); -const file = path.join(common.tmpDir, 'write-autoclose-opt1.txt'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); + +const file = path.join(tmpdir.path, 'write-autoclose-opt1.txt'); +tmpdir.refresh(); let stream = fs.createWriteStream(file, { flags: 'w+', autoClose: false }); stream.write('Test1'); stream.end(); diff --git a/test/parallel/test-fs-write-stream-change-open.js b/test/parallel/test-fs-write-stream-change-open.js index 50860f2e405f18..8f79e59427e50d 100644 --- a/test/parallel/test-fs-write-stream-change-open.js +++ b/test/parallel/test-fs-write-stream-change-open.js @@ -20,14 +20,16 @@ // USE OR OTHER DEALINGS IN THE SOFTWARE. 'use strict'; -const common = require('../common'); +require('../common'); const assert = require('assert'); const path = require('path'); const fs = require('fs'); -const file = path.join(common.tmpDir, 'write.txt'); +const tmpdir = require('../common/tmpdir'); -common.refreshTmpDir(); +const file = path.join(tmpdir.path, 'write.txt'); + +tmpdir.refresh(); const stream = fs.WriteStream(file); const _fs_close = fs.close; diff --git a/test/parallel/test-fs-write-stream-double-close.js b/test/parallel/test-fs-write-stream-double-close.js index c73c9c7d6ad9ae..b083d157bb8723 100644 --- a/test/parallel/test-fs-write-stream-double-close.js +++ b/test/parallel/test-fs-write-stream-double-close.js @@ -4,9 +4,10 @@ const common = require('../common'); const fs = require('fs'); const path = require('path'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); -const s = fs.createWriteStream(path.join(common.tmpDir, 'rw')); +const s = fs.createWriteStream(path.join(tmpdir.path, 'rw')); s.close(common.mustCall()); s.close(common.mustCall()); diff --git a/test/parallel/test-fs-write-stream-encoding.js b/test/parallel/test-fs-write-stream-encoding.js index 5fb810887721d9..5803d99fd7b33d 100644 --- a/test/parallel/test-fs-write-stream-encoding.js +++ b/test/parallel/test-fs-write-stream-encoding.js @@ -1,17 +1,18 @@ 'use strict'; -const common = require('../common'); +require('../common'); const assert = require('assert'); const fixtures = require('../common/fixtures'); const fs = require('fs'); const path = require('path'); const stream = require('stream'); +const tmpdir = require('../common/tmpdir'); const firstEncoding = 'base64'; const secondEncoding = 'latin1'; const examplePath = fixtures.path('x.txt'); -const dummyPath = path.join(common.tmpDir, 'x.txt'); +const dummyPath = path.join(tmpdir.path, 'x.txt'); -common.refreshTmpDir(); +tmpdir.refresh(); const exampleReadStream = fs.createReadStream(examplePath, { encoding: firstEncoding diff --git a/test/parallel/test-fs-write-stream-end.js b/test/parallel/test-fs-write-stream-end.js index 9c889b94e4cba2..36e7cb5504cab0 100644 --- a/test/parallel/test-fs-write-stream-end.js +++ b/test/parallel/test-fs-write-stream-end.js @@ -25,17 +25,18 @@ const assert = require('assert'); const path = require('path'); const fs = require('fs'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); { - const file = path.join(common.tmpDir, 'write-end-test0.txt'); + const file = path.join(tmpdir.path, 'write-end-test0.txt'); const stream = fs.createWriteStream(file); stream.end(); stream.on('close', common.mustCall()); } { - const file = path.join(common.tmpDir, 'write-end-test1.txt'); + const file = path.join(tmpdir.path, 'write-end-test1.txt'); const stream = fs.createWriteStream(file); stream.end('a\n', 'utf8'); stream.on('close', common.mustCall(function() { diff --git a/test/parallel/test-fs-write-stream-err.js b/test/parallel/test-fs-write-stream-err.js index 077bfb24b75cff..36bf9dbcfb068e 100644 --- a/test/parallel/test-fs-write-stream-err.js +++ b/test/parallel/test-fs-write-stream-err.js @@ -24,9 +24,10 @@ const common = require('../common'); const assert = require('assert'); const fs = require('fs'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); -const stream = fs.createWriteStream(`${common.tmpDir}/out`, { +const stream = fs.createWriteStream(`${tmpdir.path}/out`, { highWaterMark: 10 }); const err = new Error('BAM'); diff --git a/test/parallel/test-fs-write-stream-throw-type-error.js b/test/parallel/test-fs-write-stream-throw-type-error.js index 5652e9e5e697cc..e04840ee8f76c1 100644 --- a/test/parallel/test-fs-write-stream-throw-type-error.js +++ b/test/parallel/test-fs-write-stream-throw-type-error.js @@ -1,5 +1,5 @@ 'use strict'; -const common = require('../common'); +require('../common'); const assert = require('assert'); const fs = require('fs'); const path = require('path'); @@ -10,9 +10,11 @@ const numberError = const booleanError = /^TypeError: "options" must be a string or an object, got boolean instead\.$/; -const example = path.join(common.tmpDir, 'dummy'); +const tmpdir = require('../common/tmpdir'); -common.refreshTmpDir(); +const example = path.join(tmpdir.path, 'dummy'); + +tmpdir.refresh(); assert.doesNotThrow(() => { fs.createWriteStream(example, undefined); diff --git a/test/parallel/test-fs-write-stream.js b/test/parallel/test-fs-write-stream.js index bba2debf9156e5..6cf5ff3b69f533 100644 --- a/test/parallel/test-fs-write-stream.js +++ b/test/parallel/test-fs-write-stream.js @@ -20,14 +20,16 @@ // USE OR OTHER DEALINGS IN THE SOFTWARE. 'use strict'; -const common = require('../common'); +require('../common'); const assert = require('assert'); const path = require('path'); const fs = require('fs'); -const file = path.join(common.tmpDir, 'write.txt'); +const tmpdir = require('../common/tmpdir'); -common.refreshTmpDir(); +const file = path.join(tmpdir.path, 'write.txt'); + +tmpdir.refresh(); { const stream = fs.WriteStream(file); diff --git a/test/parallel/test-fs-write-string-coerce.js b/test/parallel/test-fs-write-string-coerce.js index 9356bc71850303..4581c319277a78 100644 --- a/test/parallel/test-fs-write-string-coerce.js +++ b/test/parallel/test-fs-write-string-coerce.js @@ -4,9 +4,10 @@ const assert = require('assert'); const path = require('path'); const fs = require('fs'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); -const fn = path.join(common.tmpDir, 'write-string-coerce.txt'); +const fn = path.join(tmpdir.path, 'write-string-coerce.txt'); const data = true; const expected = String(data); diff --git a/test/parallel/test-fs-write-sync.js b/test/parallel/test-fs-write-sync.js index 41a9f2c8887b32..4ca7a1dd570eb0 100644 --- a/test/parallel/test-fs-write-sync.js +++ b/test/parallel/test-fs-write-sync.js @@ -20,13 +20,14 @@ // USE OR OTHER DEALINGS IN THE SOFTWARE. 'use strict'; -const common = require('../common'); +require('../common'); const assert = require('assert'); const path = require('path'); const fs = require('fs'); -const filename = path.join(common.tmpDir, 'write.txt'); +const tmpdir = require('../common/tmpdir'); +const filename = path.join(tmpdir.path, 'write.txt'); -common.refreshTmpDir(); +tmpdir.refresh(); // fs.writeSync with all parameters provided: { diff --git a/test/parallel/test-fs-write.js b/test/parallel/test-fs-write.js index ccf2d9b40f7934..5dd9ca0e902a1e 100644 --- a/test/parallel/test-fs-write.js +++ b/test/parallel/test-fs-write.js @@ -24,14 +24,16 @@ const common = require('../common'); const assert = require('assert'); const path = require('path'); const fs = require('fs'); -const fn = path.join(common.tmpDir, 'write.txt'); -const fn2 = path.join(common.tmpDir, 'write2.txt'); -const fn3 = path.join(common.tmpDir, 'write3.txt'); +const tmpdir = require('../common/tmpdir'); + +tmpdir.refresh(); + +const fn = path.join(tmpdir.path, 'write.txt'); +const fn2 = path.join(tmpdir.path, 'write2.txt'); +const fn3 = path.join(tmpdir.path, 'write3.txt'); const expected = 'ümlaut.'; const constants = fs.constants; -common.refreshTmpDir(); - fs.open(fn, 'w', 0o644, common.mustCall(function(err, fd) { assert.ifError(err); diff --git a/test/parallel/test-http-abort-before-end.js b/test/parallel/test-http-abort-before-end.js index 37d1291b074127..5577f256ca2ec9 100644 --- a/test/parallel/test-http-abort-before-end.js +++ b/test/parallel/test-http-abort-before-end.js @@ -22,25 +22,22 @@ 'use strict'; const common = require('../common'); const http = require('http'); -const assert = require('assert'); const server = http.createServer(common.mustNotCall()); -server.listen(0, function() { +server.listen(0, common.mustCall(() => { const req = http.request({ method: 'GET', host: '127.0.0.1', - port: this.address().port + port: server.address().port }); - req.on('error', function(ex) { - // https://github.com/joyent/node/issues/1399#issuecomment-2597359 - // abort() should emit an Error, not the net.Socket object - assert(ex instanceof Error); - }); + req.on('abort', common.mustCall(() => { + server.close(); + })); + + req.on('error', common.mustNotCall()); req.abort(); req.end(); - - server.close(); -}); +})); diff --git a/test/parallel/test-http-agent-getname.js b/test/parallel/test-http-agent-getname.js index 4b4e9ac26b44a5..31dc255ba558db 100644 --- a/test/parallel/test-http-agent-getname.js +++ b/test/parallel/test-http-agent-getname.js @@ -1,10 +1,12 @@ 'use strict'; -const common = require('../common'); +require('../common'); const assert = require('assert'); const http = require('http'); const path = require('path'); +const tmpdir = require('../common/tmpdir'); + const agent = new http.Agent(); // default to localhost @@ -33,7 +35,7 @@ assert.strictEqual( ); // unix socket -const socketPath = path.join(common.tmpDir, 'foo', 'bar'); +const socketPath = path.join(tmpdir.path, 'foo', 'bar'); assert.strictEqual( agent.getName({ socketPath diff --git a/test/parallel/test-http-agent-keepalive.js b/test/parallel/test-http-agent-keepalive.js index 2c8b6c1cc98d2d..8ac8d79df192b0 100644 --- a/test/parallel/test-http-agent-keepalive.js +++ b/test/parallel/test-http-agent-keepalive.js @@ -89,7 +89,7 @@ function remoteClose() { process.nextTick(common.mustCall(() => { assert.strictEqual(agent.sockets[name], undefined); assert.strictEqual(agent.freeSockets[name].length, 1); - // waitting remote server close the socket + // waiting remote server close the socket setTimeout(common.mustCall(() => { assert.strictEqual(agent.sockets[name], undefined); assert.strictEqual(agent.freeSockets[name], undefined, @@ -102,7 +102,7 @@ function remoteClose() { } function remoteError() { - // remove server will destroy ths socket + // remote server will destroy the socket const req = get('/error', common.mustNotCall()); req.on('error', common.mustCall((err) => { assert(err); diff --git a/test/parallel/test-http-chunk-problem.js b/test/parallel/test-http-chunk-problem.js index 46a7406e74595a..f999f055fc0a91 100644 --- a/test/parallel/test-http-chunk-problem.js +++ b/test/parallel/test-http-chunk-problem.js @@ -37,7 +37,9 @@ if (process.argv[2] === 'shasum') { const http = require('http'); const cp = require('child_process'); -const filename = require('path').join(common.tmpDir, 'big'); +const tmpdir = require('../common/tmpdir'); + +const filename = require('path').join(tmpdir.path, 'big'); let server; function executeRequest(cb) { @@ -59,7 +61,7 @@ function executeRequest(cb) { } -common.refreshTmpDir(); +tmpdir.refresh(); const ddcmd = common.ddCommand(filename, 10240); diff --git a/test/parallel/test-http-client-abort-keep-alive-queued-unix-socket.js b/test/parallel/test-http-client-abort-keep-alive-queued-unix-socket.js index efcbfe8dc5846f..745ed4ceeee887 100644 --- a/test/parallel/test-http-client-abort-keep-alive-queued-unix-socket.js +++ b/test/parallel/test-http-client-abort-keep-alive-queued-unix-socket.js @@ -16,7 +16,8 @@ class Agent extends http.Agent { const server = http.createServer((req, res) => res.end()); const socketPath = common.PIPE; -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); server.listen(socketPath, common.mustCall(() => { const agent = new Agent({ diff --git a/test/parallel/test-http-client-abort-unix-socket.js b/test/parallel/test-http-client-abort-unix-socket.js index 3fb2cd9b869f45..bf666b7935a3a6 100644 --- a/test/parallel/test-http-client-abort-unix-socket.js +++ b/test/parallel/test-http-client-abort-unix-socket.js @@ -12,7 +12,8 @@ class Agent extends http.Agent { } } -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); server.listen(common.PIPE, common.mustCall(() => { const req = http.get({ diff --git a/test/parallel/test-http-client-pipe-end.js b/test/parallel/test-http-client-pipe-end.js index 4b9f168e9907f2..9dcdbe4a49d83a 100644 --- a/test/parallel/test-http-client-pipe-end.js +++ b/test/parallel/test-http-client-pipe-end.js @@ -34,7 +34,8 @@ const server = http.createServer(function(req, res) { }); }); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); server.listen(common.PIPE, function() { const req = http.request({ diff --git a/test/parallel/test-http-client-response-domain.js b/test/parallel/test-http-client-response-domain.js index ff73fd51cc5d73..0a32e929141e45 100644 --- a/test/parallel/test-http-client-response-domain.js +++ b/test/parallel/test-http-client-response-domain.js @@ -27,7 +27,8 @@ const domain = require('domain'); let d; -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); // first fire up a simple HTTP server const server = http.createServer(function(req, res) { diff --git a/test/parallel/test-http-connect.js b/test/parallel/test-http-connect.js index b019c61573e3fa..9499cd95275f8c 100644 --- a/test/parallel/test-http-connect.js +++ b/test/parallel/test-http-connect.js @@ -49,6 +49,10 @@ server.listen(0, common.mustCall(() => { path: 'google.com:443' }, common.mustNotCall()); + req.on('socket', common.mustCall((socket) => { + assert.strictEqual(socket._httpMessage, req); + })); + req.on('close', common.mustCall()); req.on('connect', common.mustCall((res, socket, firstBodyChunk) => { @@ -60,8 +64,10 @@ server.listen(0, common.mustCall(() => { // Make sure this socket has detached. assert(!socket.ondata); assert(!socket.onend); + assert.strictEqual(socket._httpMessage, null); assert.strictEqual(socket.listeners('connect').length, 0); assert.strictEqual(socket.listeners('data').length, 0); + assert.strictEqual(socket.listeners('drain').length, 0); // the stream.Duplex onend listener // allow 0 here, so that i can run the same test on streams1 impl diff --git a/test/parallel/test-http-dns-error.js b/test/parallel/test-http-dns-error.js index 723b710647682b..06a15c89fb46b3 100644 --- a/test/parallel/test-http-dns-error.js +++ b/test/parallel/test-http-dns-error.js @@ -30,30 +30,45 @@ const http = require('http'); const https = require('https'); const host = '*'.repeat(256); +const MAX_TRIES = 5; -function do_not_call() { - throw new Error('This function should not have been called.'); -} - -function test(mod) { +let errCode = 'ENOTFOUND'; +if (common.isOpenBSD) + errCode = 'EAI_FAIL'; +function tryGet(mod, tries) { // Bad host name should not throw an uncatchable exception. // Ensure that there is time to attach an error listener. - const req1 = mod.get({ host: host, port: 42 }, do_not_call); - req1.on('error', common.mustCall(function(err) { - assert.strictEqual(err.code, 'ENOTFOUND'); + const req = mod.get({ host: host, port: 42 }, common.mustNotCall()); + req.on('error', common.mustCall(function(err) { + if (err.code === 'EAGAIN' && tries < MAX_TRIES) { + tryGet(mod, ++tries); + return; + } + assert.strictEqual(err.code, errCode); })); // http.get() called req1.end() for us +} - const req2 = mod.request({ +function tryRequest(mod, tries) { + const req = mod.request({ method: 'GET', host: host, port: 42 - }, do_not_call); - req2.on('error', common.mustCall(function(err) { - assert.strictEqual(err.code, 'ENOTFOUND'); + }, common.mustNotCall()); + req.on('error', common.mustCall(function(err) { + if (err.code === 'EAGAIN' && tries < MAX_TRIES) { + tryRequest(mod, ++tries); + return; + } + assert.strictEqual(err.code, errCode); })); - req2.end(); + req.end(); +} + +function test(mod) { + tryGet(mod, 0); + tryRequest(mod, 0); } if (common.hasCrypto) { diff --git a/test/parallel/test-http-extra-response.js b/test/parallel/test-http-extra-response.js index 7c9514232c7ba8..6d1a770487402d 100644 --- a/test/parallel/test-http-extra-response.js +++ b/test/parallel/test-http-extra-response.js @@ -51,7 +51,6 @@ const server = net.createServer(function(socket) { if (postBody.includes('\r\n')) { socket.write(fullResponse); - // omg, I wrote the response twice, what a terrible HTTP server I am. socket.end(fullResponse); } }); diff --git a/test/parallel/test-http-get-pipeline-problem.js b/test/parallel/test-http-get-pipeline-problem.js index 3182c1faacd802..b8b11e7e77c29a 100644 --- a/test/parallel/test-http-get-pipeline-problem.js +++ b/test/parallel/test-http-get-pipeline-problem.js @@ -32,7 +32,8 @@ const Countdown = require('../common/countdown'); http.globalAgent.maxSockets = 1; -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); const image = fixtures.readSync('/person.jpg'); @@ -68,7 +69,7 @@ server.listen(0, function() { http.get(opts, function(res) { console.error(`recv ${x}`); - const s = fs.createWriteStream(`${common.tmpDir}/${x}.jpg`); + const s = fs.createWriteStream(`${tmpdir.path}/${x}.jpg`); res.pipe(s); s.on('finish', function() { @@ -85,13 +86,13 @@ server.listen(0, function() { function checkFiles() { // Should see 1.jpg, 2.jpg, ..., 100.jpg in tmpDir - const files = fs.readdirSync(common.tmpDir); + const files = fs.readdirSync(tmpdir.path); assert(total <= files.length); for (let i = 0; i < total; i++) { const fn = `${i}.jpg`; assert.ok(files.includes(fn), `couldn't find '${fn}'`); - const stat = fs.statSync(`${common.tmpDir}/${fn}`); + const stat = fs.statSync(`${tmpdir.path}/${fn}`); assert.strictEqual( image.length, stat.size, `size doesn't match on '${fn}'. Got ${stat.size} bytes`); diff --git a/test/parallel/test-http-parser-freed-before-upgrade.js b/test/parallel/test-http-parser-freed-before-upgrade.js new file mode 100644 index 00000000000000..4ba1de9501681c --- /dev/null +++ b/test/parallel/test-http-parser-freed-before-upgrade.js @@ -0,0 +1,33 @@ +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const http = require('http'); + +const server = http.createServer(); + +server.on('upgrade', common.mustCall((request, socket) => { + assert.strictEqual(socket.parser, null); + socket.write([ + 'HTTP/1.1 101 Switching Protocols', + 'Connection: Upgrade', + 'Upgrade: WebSocket', + '\r\n' + ].join('\r\n')); +})); + +server.listen(common.mustCall(() => { + const request = http.get({ + port: server.address().port, + headers: { + Connection: 'Upgrade', + Upgrade: 'WebSocket' + } + }); + + request.on('upgrade', common.mustCall((response, socket) => { + assert.strictEqual(socket.parser, null); + socket.destroy(); + server.close(); + })); +})); diff --git a/test/parallel/test-http-pipe-fs.js b/test/parallel/test-http-pipe-fs.js index fd625bb4acc541..dfb44ff3b25d99 100644 --- a/test/parallel/test-http-pipe-fs.js +++ b/test/parallel/test-http-pipe-fs.js @@ -29,9 +29,10 @@ const NUMBER_OF_STREAMS = 2; const countdown = new Countdown(NUMBER_OF_STREAMS, () => server.close()); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); -const file = path.join(common.tmpDir, 'http-pipe-fs-test.txt'); +const file = path.join(tmpdir.path, 'http-pipe-fs-test.txt'); const server = http.createServer(common.mustCall(function(req, res) { const stream = fs.createWriteStream(file); diff --git a/test/parallel/test-http-unix-socket-keep-alive.js b/test/parallel/test-http-unix-socket-keep-alive.js index 668c440325e0ec..11b3d9b39264a4 100644 --- a/test/parallel/test-http-unix-socket-keep-alive.js +++ b/test/parallel/test-http-unix-socket-keep-alive.js @@ -5,7 +5,8 @@ const http = require('http'); const server = http.createServer((req, res) => res.end()); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); server.listen(common.PIPE, common.mustCall(() => asyncLoop(makeKeepAliveRequest, 10, common.mustCall(() => diff --git a/test/parallel/test-http-unix-socket.js b/test/parallel/test-http-unix-socket.js index 6d5897cacbecc2..08a533b26875ba 100644 --- a/test/parallel/test-http-unix-socket.js +++ b/test/parallel/test-http-unix-socket.js @@ -34,7 +34,8 @@ const server = http.createServer(function(req, res) { res.end(); }); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); server.listen(common.PIPE, common.mustCall(function() { diff --git a/test/parallel/test-http-upgrade-binary.js b/test/parallel/test-http-upgrade-binary.js new file mode 100644 index 00000000000000..002ac9c564ad1e --- /dev/null +++ b/test/parallel/test-http-upgrade-binary.js @@ -0,0 +1,28 @@ +'use strict'; +const { mustCall } = require('../common'); +const assert = require('assert'); +const http = require('http'); +const net = require('net'); + +// https://github.com/nodejs/node/issues/17789 - a connection upgrade response +// that has a Transfer-Encoding header and a body whose first byte is > 127 +// triggers a bug where said byte is skipped over. +net.createServer(mustCall(function(conn) { + conn.write('HTTP/1.1 101 Switching Protocols\r\n' + + 'Connection: upgrade\r\n' + + 'Transfer-Encoding: chunked\r\n' + + 'Upgrade: websocket\r\n' + + '\r\n' + + '\u0080', 'latin1'); + this.close(); +})).listen(0, mustCall(function() { + http.get({ + host: this.address().host, + port: this.address().port, + headers: { 'Connection': 'upgrade', 'Upgrade': 'websocket' }, + }).on('upgrade', mustCall((res, conn, head) => { + assert.strictEqual(head.length, 1); + assert.strictEqual(head[0], 128); + conn.destroy(); + })); +})); diff --git a/test/parallel/test-http2-altsvc.js b/test/parallel/test-http2-altsvc.js new file mode 100644 index 00000000000000..9fd9a9fc278552 --- /dev/null +++ b/test/parallel/test-http2-altsvc.js @@ -0,0 +1,126 @@ +'use strict'; + +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); + +const assert = require('assert'); +const http2 = require('http2'); +const { URL } = require('url'); +const Countdown = require('../common/countdown'); + +const server = http2.createServer(); +server.on('stream', common.mustCall((stream) => { + stream.session.altsvc('h2=":8000"', stream.id); + stream.respond(); + stream.end('ok'); +})); +server.on('session', common.mustCall((session) => { + // Origin may be specified by string, URL object, or object with an + // origin property. For string and URL object, origin is guaranteed + // to be an ASCII serialized origin. For object with an origin + // property, it is up to the user to ensure proper serialization. + session.altsvc('h2=":8000"', 'https://example.org:8111/this'); + session.altsvc('h2=":8000"', new URL('https://example.org:8111/this')); + session.altsvc('h2=":8000"', { origin: 'https://example.org:8111' }); + + // Won't error, but won't send anything because the stream does not exist + session.altsvc('h2=":8000"', 3); + + // Will error because the numeric stream id is out of valid range + [0, -1, 1.1, 0xFFFFFFFF + 1, Infinity, -Infinity].forEach((i) => { + common.expectsError( + () => session.altsvc('h2=":8000"', i), + { + code: 'ERR_OUT_OF_RANGE', + type: RangeError + } + ); + }); + + // First argument must be a string + [0, {}, [], null, Infinity].forEach((i) => { + common.expectsError( + () => session.altsvc(i), + { + code: 'ERR_INVALID_ARG_TYPE', + type: TypeError + } + ); + }); + + ['\u0001', 'h2="\uff20"', '👀'].forEach((i) => { + common.expectsError( + () => session.altsvc(i), + { + code: 'ERR_INVALID_CHAR', + type: TypeError + } + ); + }); + + [{}, [], true].forEach((i) => { + common.expectsError( + () => session.altsvc('clear', i), + { + code: 'ERR_INVALID_ARG_TYPE', + type: TypeError + } + ); + }); + + [ + 'abc:', + new URL('abc:'), + { origin: 'null' }, + { origin: '' } + ].forEach((i) => { + common.expectsError( + () => session.altsvc('h2=":8000', i), + { + code: 'ERR_HTTP2_ALTSVC_INVALID_ORIGIN', + type: TypeError + } + ); + }); + + // arguments + origin are too long for an ALTSVC frame + common.expectsError( + () => { + session.altsvc('h2=":8000"', + `http://example.${'a'.repeat(17000)}.org:8000`); + }, + { + code: 'ERR_HTTP2_ALTSVC_LENGTH', + type: TypeError + } + ); +})); + +server.listen(0, common.mustCall(() => { + const client = http2.connect(`http://localhost:${server.address().port}`); + + const countdown = new Countdown(4, () => { + client.close(); + server.close(); + }); + + client.on('altsvc', common.mustCall((alt, origin, stream) => { + assert.strictEqual(alt, 'h2=":8000"'); + switch (stream) { + case 0: + assert.strictEqual(origin, 'https://example.org:8111'); + break; + case 1: + assert.strictEqual(origin, ''); + break; + default: + assert.fail('should not happen'); + } + countdown.dec(); + }, 4)); + + const req = client.request(); + req.resume(); + req.on('close', common.mustCall()); +})); diff --git a/test/parallel/test-http2-backpressure.js b/test/parallel/test-http2-backpressure.js new file mode 100644 index 00000000000000..9b69dddbfd2e26 --- /dev/null +++ b/test/parallel/test-http2-backpressure.js @@ -0,0 +1,49 @@ +'use strict'; + +// Verifies that a full HTTP2 pipeline handles backpressure. + +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); +const assert = require('assert'); +const http2 = require('http2'); +const makeDuplexPair = require('../common/duplexpair'); + +common.crashOnUnhandledRejection(); + +{ + let req; + const server = http2.createServer(); + server.on('stream', common.mustCallAsync(async (stream, headers) => { + stream.respond({ + 'content-type': 'text/html', + ':status': 200 + }); + req._readableState.highWaterMark = 20; + stream._writableState.highWaterMark = 20; + assert.strictEqual(stream.write('A'.repeat(5)), true); + assert.strictEqual(stream.write('A'.repeat(40)), false); + assert.strictEqual(await event(req, 'data'), 'A'.repeat(5)); + assert.strictEqual(await event(req, 'data'), 'A'.repeat(40)); + await event(stream, 'drain'); + assert.strictEqual(stream.write('A'.repeat(5)), true); + assert.strictEqual(stream.write('A'.repeat(40)), false); + })); + + const { clientSide, serverSide } = makeDuplexPair(); + server.emit('connection', serverSide); + + const client = http2.connect('http://localhost:80', { + createConnection: common.mustCall(() => clientSide) + }); + + req = client.request({ ':path': '/' }); + req.setEncoding('utf8'); + req.end(); +} + +function event(ee, eventName) { + return new Promise((resolve) => { + ee.once(eventName, common.mustCall(resolve)); + }); +} diff --git a/test/parallel/test-http2-client-data-end.js b/test/parallel/test-http2-client-data-end.js index 43665029630c12..2f251692d5c412 100644 --- a/test/parallel/test-http2-client-data-end.js +++ b/test/parallel/test-http2-client-data-end.js @@ -5,53 +5,37 @@ if (!common.hasCrypto) common.skip('missing crypto'); const assert = require('assert'); const http2 = require('http2'); +const Countdown = require('../common/countdown'); const server = http2.createServer(); server.on('stream', common.mustCall((stream, headers, flags) => { - const port = server.address().port; if (headers[':path'] === '/') { - stream.pushStream({ - ':scheme': 'http', - ':path': '/foobar', - ':authority': `localhost:${port}`, - }, (push, headers) => { + stream.pushStream({ ':path': '/foobar' }, (err, push, headers) => { + assert.ifError(err); push.respond({ 'content-type': 'text/html', - ':status': 200, 'x-push-data': 'pushed by server', }); push.write('pushed by server '); - // Sending in next immediate ensures that a second data frame - // will be sent to the client, which will cause the 'data' event - // to fire multiple times. - setImmediate(() => { - push.end('data'); - }); + setImmediate(() => push.end('data')); stream.end('st'); }); } - stream.respond({ - 'content-type': 'text/html', - ':status': 200 - }); + stream.respond({ 'content-type': 'text/html' }); stream.write('te'); })); server.listen(0, common.mustCall(() => { const port = server.address().port; - const headers = { ':path': '/' }; const client = http2.connect(`http://localhost:${port}`); - const req = client.request(headers); + const req = client.request(); - let expected = 2; - function maybeClose() { - if (--expected === 0) { - server.close(); - client.destroy(); - } - } + const countdown = new Countdown(2, () => { + server.close(); + client.close(); + }); req.on('response', common.mustCall((headers) => { assert.strictEqual(headers[':status'], 200); @@ -70,13 +54,11 @@ server.listen(0, common.mustCall(() => { stream.setEncoding('utf8'); let pushData = ''; - stream.on('data', common.mustCall((d) => { - pushData += d; - }, 2)); + stream.on('data', (d) => pushData += d); stream.on('end', common.mustCall(() => { assert.strictEqual(pushData, 'pushed by server data'); - maybeClose(); })); + stream.on('close', () => countdown.dec()); })); let data = ''; @@ -85,7 +67,6 @@ server.listen(0, common.mustCall(() => { req.on('data', common.mustCallAtLeast((d) => data += d)); req.on('end', common.mustCall(() => { assert.strictEqual(data, 'test'); - maybeClose(); })); - req.end(); + req.on('close', () => countdown.dec()); })); diff --git a/test/parallel/test-http2-client-destroy.js b/test/parallel/test-http2-client-destroy.js index bb93366247aef7..fab8a4fc24d652 100644 --- a/test/parallel/test-http2-client-destroy.js +++ b/test/parallel/test-http2-client-destroy.js @@ -8,139 +8,115 @@ if (!common.hasCrypto) const assert = require('assert'); const h2 = require('http2'); const { kSocket } = require('internal/http2/util'); +const Countdown = require('../common/countdown'); { const server = h2.createServer(); - server.listen( - 0, - common.mustCall(() => { - const destroyCallbacks = [ - (client) => client.destroy(), - (client) => client[kSocket].destroy() - ]; - - let remaining = destroyCallbacks.length; - - destroyCallbacks.forEach((destroyCallback) => { - const client = h2.connect(`http://localhost:${server.address().port}`); - client.on( - 'connect', - common.mustCall(() => { - const socket = client[kSocket]; - - assert(socket, 'client session has associated socket'); - assert( - !client.destroyed, - 'client has not been destroyed before destroy is called' - ); - assert( - !socket.destroyed, - 'socket has not been destroyed before destroy is called' - ); - - // Ensure that 'close' event is emitted - client.on('close', common.mustCall()); - - destroyCallback(client); - - assert( - !client[kSocket], - 'client.socket undefined after destroy is called' - ); - - // Must must be closed - client.on( - 'close', - common.mustCall(() => { - assert(client.destroyed); - }) - ); - - // socket will close on process.nextTick - socket.on( - 'close', - common.mustCall(() => { - assert(socket.destroyed); - }) - ); - - if (--remaining === 0) { - server.close(); - } - }) + server.listen(0, common.mustCall(() => { + const destroyCallbacks = [ + (client) => client.destroy(), + (client) => client[kSocket].destroy() + ]; + + const countdown = new Countdown(destroyCallbacks.length, () => { + server.close(); + }); + + destroyCallbacks.forEach((destroyCallback) => { + const client = h2.connect(`http://localhost:${server.address().port}`); + client.on('connect', common.mustCall(() => { + const socket = client[kSocket]; + + assert(socket, 'client session has associated socket'); + assert( + !client.destroyed, + 'client has not been destroyed before destroy is called' ); - }); - }) - ); + assert( + !socket.destroyed, + 'socket has not been destroyed before destroy is called' + ); + + destroyCallback(client); + + client.on('close', common.mustCall(() => { + assert(client.destroyed); + })); + + countdown.dec(); + })); + }); + })); } // test destroy before client operations { const server = h2.createServer(); - server.listen( - 0, - common.mustCall(() => { - const client = h2.connect(`http://localhost:${server.address().port}`); - const req = client.request(); - client.destroy(); - - req.on('response', common.mustNotCall()); - req.resume(); - - const sessionError = { - type: Error, - code: 'ERR_HTTP2_INVALID_SESSION', - message: 'The session has been destroyed' - }; - + server.listen(0, common.mustCall(() => { + const client = h2.connect(`http://localhost:${server.address().port}`); + const socket = client[kSocket]; + socket.on('close', common.mustCall(() => { + assert(socket.destroyed); + })); + + + const req = client.request(); + req.on('error', common.expectsError({ + code: 'ERR_HTTP2_STREAM_CANCEL', + type: Error, + message: 'The pending stream has been canceled' + })); + + client.destroy(); + + req.on('response', common.mustNotCall()); + + const sessionError = { + type: Error, + code: 'ERR_HTTP2_INVALID_SESSION', + message: 'The session has been destroyed' + }; + + common.expectsError(() => client.request(), sessionError); + common.expectsError(() => client.settings({}), sessionError); + client.close(); // should be a non-op at this point + + // Wait for setImmediate call from destroy() to complete + // so that state.destroyed is set to true + setImmediate(() => { common.expectsError(() => client.request(), sessionError); common.expectsError(() => client.settings({}), sessionError); - common.expectsError(() => client.shutdown(), sessionError); - - // Wait for setImmediate call from destroy() to complete - // so that state.destroyed is set to true - setImmediate(() => { - common.expectsError(() => client.request(), sessionError); - common.expectsError(() => client.settings({}), sessionError); - common.expectsError(() => client.shutdown(), sessionError); - }); - - req.on( - 'end', - common.mustCall(() => { - server.close(); - }) - ); - req.end(); - }) - ); + client.close(); // should be a non-op at this point + }); + + req.resume(); + req.on('end', common.mustCall()); + req.on('close', common.mustCall(() => server.close())); + })); } // test destroy before goaway { const server = h2.createServer(); - server.on( - 'stream', - common.mustCall((stream) => { - stream.on('error', common.mustCall()); - stream.session.shutdown(); - }) - ); - server.listen( - 0, - common.mustCall(() => { - const client = h2.connect(`http://localhost:${server.address().port}`); + server.on('stream', common.mustCall((stream) => { + stream.session.destroy(); + })); + + server.listen(0, common.mustCall(() => { + const client = h2.connect(`http://localhost:${server.address().port}`); + // On some platforms (e.g. windows), an ECONNRESET may occur at this + // point -- or it may not. Do not make this a mustCall + client.on('error', () => {}); + + client.on('close', () => { + server.close(); + // calling destroy in here should not matter + client.destroy(); + }); - client.on( - 'goaway', - common.mustCall(() => { - // We ought to be able to destroy the client in here without an error - server.close(); - client.destroy(); - }) - ); - - client.request(); - }) - ); + const req = client.request(); + // On some platforms (e.g. windows), an ECONNRESET may occur at this + // point -- or it may not. Do not make this a mustCall + req.on('error', () => {}); + })); } diff --git a/test/parallel/test-http2-client-onconnect-errors.js b/test/parallel/test-http2-client-onconnect-errors.js index 08007753654878..af67a0d0ae27db 100644 --- a/test/parallel/test-http2-client-onconnect-errors.js +++ b/test/parallel/test-http2-client-onconnect-errors.js @@ -1,13 +1,14 @@ 'use strict'; +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); + const { constants, Http2Session, nghttp2ErrorString } = process.binding('http2'); -const common = require('../common'); -if (!common.hasCrypto) - common.skip('missing crypto'); const http2 = require('http2'); // tests error handling within requestOnConnect @@ -69,6 +70,8 @@ server.listen(0, common.mustCall(() => runTest(tests.shift()))); function runTest(test) { const client = http2.connect(`http://localhost:${server.address().port}`); + client.on('close', common.mustCall()); + const req = client.request({ ':method': 'POST' }); currentError = test.ngError; @@ -83,15 +86,20 @@ function runTest(test) { if (test.type === 'stream') { client.on('error', errorMustNotCall); req.on('error', errorMustCall); - req.on('error', common.mustCall(() => { - client.destroy(); - })); } else { client.on('error', errorMustCall); - req.on('error', errorMustNotCall); + req.on('error', (err) => { + common.expectsError({ + code: 'ERR_HTTP2_STREAM_CANCEL' + })(err); + common.expectsError({ + code: 'ERR_HTTP2_ERROR' + })(err.cause); + }); } - req.on('end', common.mustCall(() => { + req.on('end', common.mustCall()); + req.on('close', common.mustCall(() => { client.destroy(); if (!tests.length) { diff --git a/test/parallel/test-http2-client-port-80.js b/test/parallel/test-http2-client-port-80.js index a9d19eb5b9f008..fc82e231f6af8f 100644 --- a/test/parallel/test-http2-client-port-80.js +++ b/test/parallel/test-http2-client-port-80.js @@ -16,4 +16,10 @@ net.connect = common.mustCall((...args) => { }); const client = http2.connect('http://localhost:80'); -client.destroy(); + +// A socket error may or may not occur depending on whether there is something +// currently listening on port 80. Keep this as a non-op and not a mustCall or +// mustNotCall. +client.on('error', () => {}); + +client.close(); diff --git a/test/parallel/test-http2-client-priority-before-connect.js b/test/parallel/test-http2-client-priority-before-connect.js index b062107e4ab7f7..a9615d2cd69f12 100644 --- a/test/parallel/test-http2-client-priority-before-connect.js +++ b/test/parallel/test-http2-client-priority-before-connect.js @@ -8,31 +8,21 @@ const h2 = require('http2'); const server = h2.createServer(); // we use the lower-level API here -server.on('stream', common.mustCall(onStream)); - -function onStream(stream, headers, flags) { - stream.respond({ - 'content-type': 'text/html', - ':status': 200 - }); - stream.end('hello world'); -} - -server.listen(0); - -server.on('listening', common.mustCall(() => { +server.on('stream', common.mustCall((stream) => { + stream.respond(); + stream.end('ok'); +})); +server.listen(0, common.mustCall(() => { const client = h2.connect(`http://localhost:${server.address().port}`); - - const req = client.request({ ':path': '/' }); + const req = client.request(); req.priority({}); req.on('response', common.mustCall()); req.resume(); - req.on('end', common.mustCall(() => { + req.on('end', common.mustCall()); + req.on('close', common.mustCall(() => { server.close(); - client.destroy(); + client.close(); })); - req.end(); - })); diff --git a/test/parallel/test-http2-client-promisify-connect.js b/test/parallel/test-http2-client-promisify-connect.js index b66827c1507302..2eb7da3b9cfd85 100644 --- a/test/parallel/test-http2-client-promisify-connect.js +++ b/test/parallel/test-http2-client-promisify-connect.js @@ -15,7 +15,6 @@ server.on('stream', common.mustCall((stream) => { stream.end('ok'); })); server.listen(0, common.mustCall(() => { - const connect = util.promisify(http2.connect); connect(`http://localhost:${server.address().port}`) @@ -28,7 +27,7 @@ server.listen(0, common.mustCall(() => { req.on('data', (chunk) => data += chunk); req.on('end', common.mustCall(() => { assert.strictEqual(data, 'ok'); - client.destroy(); + client.close(); server.close(); })); })); diff --git a/test/parallel/test-http2-client-request-options-errors.js b/test/parallel/test-http2-client-request-options-errors.js index 5d3fc0ab5a1fd8..3ad808cb1fbe23 100644 --- a/test/parallel/test-http2-client-request-options-errors.js +++ b/test/parallel/test-http2-client-request-options-errors.js @@ -3,7 +3,6 @@ const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); -const assert = require('assert'); const http2 = require('http2'); // Check if correct errors are emitted when wrong type of data is passed @@ -34,29 +33,27 @@ server.listen(0, common.mustCall(() => { const port = server.address().port; const client = http2.connect(`http://localhost:${port}`); - Object.keys(optionsToTest).forEach((option) => { - Object.keys(types).forEach((type) => { - if (type === optionsToTest[option]) { - return; - } - - assert.throws( - () => client.request({ - ':method': 'CONNECT', - ':authority': `localhost:${port}` - }, { - [option]: types[type] - }), - common.expectsError({ - type: TypeError, - code: 'ERR_INVALID_OPT_VALUE', - message: `The value "${String(types[type])}" is invalid ` + - `for option "${option}"` - }) - ); + client.on('connect', () => { + Object.keys(optionsToTest).forEach((option) => { + Object.keys(types).forEach((type) => { + if (type === optionsToTest[option]) + return; + + common.expectsError( + () => client.request({ + ':method': 'CONNECT', + ':authority': `localhost:${port}` + }, { + [option]: types[type] + }), { + type: TypeError, + code: 'ERR_INVALID_OPT_VALUE', + message: `The value "${String(types[type])}" is invalid ` + + `for option "${option}"` + }); + }); }); + server.close(); + client.close(); }); - - server.close(); - client.destroy(); })); diff --git a/test/parallel/test-http2-client-rststream-before-connect.js b/test/parallel/test-http2-client-rststream-before-connect.js index eb3a0087d7893c..aeb31949db074e 100644 --- a/test/parallel/test-http2-client-rststream-before-connect.js +++ b/test/parallel/test-http2-client-rststream-before-connect.js @@ -8,33 +8,61 @@ const h2 = require('http2'); const server = h2.createServer(); server.on('stream', (stream) => { + stream.on('close', common.mustCall()); stream.respond(); stream.end('ok'); }); -server.listen(0); +server.listen(0, common.mustCall(() => { + const client = h2.connect(`http://localhost:${server.address().port}`); + const req = client.request(); + const closeCode = 1; -server.on('listening', common.mustCall(() => { + common.expectsError( + () => req.close(2 ** 32), + { + type: RangeError, + code: 'ERR_OUT_OF_RANGE', + message: 'The "code" argument is out of range' + } + ); + assert.strictEqual(req.closed, false); - const client = h2.connect(`http://localhost:${server.address().port}`); + [true, 1, {}, [], null, 'test'].forEach((notFunction) => { + common.expectsError( + () => req.close(closeCode, notFunction), + { + type: TypeError, + code: 'ERR_INVALID_CALLBACK', + message: 'callback must be a function' + } + ); + assert.strictEqual(req.closed, false); + }); - const req = client.request({ ':path': '/' }); - req.rstStream(0); + req.close(closeCode, common.mustCall()); + assert.strictEqual(req.closed, true); // make sure that destroy is called req._destroy = common.mustCall(req._destroy.bind(req)); - // second call doesn't do anything - assert.doesNotThrow(() => req.rstStream(8)); + // Second call doesn't do anything. + req.close(closeCode + 1); req.on('close', common.mustCall((code) => { assert.strictEqual(req.destroyed, true); - assert.strictEqual(code, 0); + assert.strictEqual(code, closeCode); server.close(); - client.destroy(); + client.close(); + })); + + req.on('error', common.expectsError({ + code: 'ERR_HTTP2_STREAM_ERROR', + type: Error, + message: `Stream closed with error code ${closeCode}` })); - req.on('response', common.mustNotCall()); + req.on('response', common.mustCall()); req.resume(); req.on('end', common.mustCall()); req.end(); diff --git a/test/parallel/test-http2-client-set-priority.js b/test/parallel/test-http2-client-set-priority.js index f3e1d7afa50d7e..64b7b56dfa2543 100644 --- a/test/parallel/test-http2-client-set-priority.js +++ b/test/parallel/test-http2-client-set-priority.js @@ -10,26 +10,20 @@ const checkWeight = (actual, expect) => { const server = http2.createServer(); server.on('stream', common.mustCall((stream, headers, flags) => { assert.strictEqual(stream.state.weight, expect); - stream.respond({ - 'content-type': 'text/html', - ':status': 200 - }); + stream.respond(); stream.end('test'); })); server.listen(0, common.mustCall(() => { - const port = server.address().port; - const client = http2.connect(`http://localhost:${port}`); + const client = http2.connect(`http://localhost:${server.address().port}`); + const req = client.request({}, { weight: actual }); - const headers = { ':path': '/' }; - const req = client.request(headers, { weight: actual }); - - req.on('data', common.mustCall(() => {})); - req.on('end', common.mustCall(() => { + req.on('data', common.mustCall()); + req.on('end', common.mustCall()); + req.on('close', common.mustCall(() => { server.close(); - client.destroy(); + client.close(); })); - req.end(); })); }; diff --git a/test/parallel/test-http2-client-settings-before-connect.js b/test/parallel/test-http2-client-settings-before-connect.js index 27caa9e601897b..4642bf5220f554 100644 --- a/test/parallel/test-http2-client-settings-before-connect.js +++ b/test/parallel/test-http2-client-settings-before-connect.js @@ -3,62 +3,53 @@ const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); -const assert = require('assert'); const h2 = require('http2'); const server = h2.createServer(); // we use the lower-level API here -server.on('stream', common.mustCall(onStream)); - -function onStream(stream, headers, flags) { - stream.respond({ - 'content-type': 'text/html', - ':status': 200 - }); - stream.end('hello world'); -} - -server.listen(0); - -server.on('listening', common.mustCall(() => { +server.on('stream', common.mustCall((stream, headers, flags) => { + stream.respond(); + stream.end('ok'); +})); +server.on('session', common.mustCall((session) => { + session.on('remoteSettings', common.mustCall(2)); +})); +server.listen(0, common.mustCall(() => { const client = h2.connect(`http://localhost:${server.address().port}`); - assert.throws(() => client.settings({ headerTableSize: -1 }), - RangeError); - assert.throws(() => client.settings({ headerTableSize: 2 ** 32 }), - RangeError); - assert.throws(() => client.settings({ initialWindowSize: -1 }), - RangeError); - assert.throws(() => client.settings({ initialWindowSize: 2 ** 32 }), - RangeError); - assert.throws(() => client.settings({ maxFrameSize: 1 }), - RangeError); - assert.throws(() => client.settings({ maxFrameSize: 2 ** 24 }), - RangeError); - assert.throws(() => client.settings({ maxConcurrentStreams: -1 }), - RangeError); - assert.throws(() => client.settings({ maxConcurrentStreams: 2 ** 31 }), - RangeError); - assert.throws(() => client.settings({ maxHeaderListSize: -1 }), - RangeError); - assert.throws(() => client.settings({ maxHeaderListSize: 2 ** 32 }), - RangeError); - ['a', 1, 0, null, {}].forEach((i) => { - assert.throws(() => client.settings({ enablePush: i }), TypeError); + [ + ['headerTableSize', -1, RangeError], + ['headerTableSize', 2 ** 32, RangeError], + ['initialWindowSize', -1, RangeError], + ['initialWindowSize', 2 ** 32, RangeError], + ['maxFrameSize', 1, RangeError], + ['maxFrameSize', 2 ** 24, RangeError], + ['maxConcurrentStreams', -1, RangeError], + ['maxConcurrentStreams', 2 ** 31, RangeError], + ['maxHeaderListSize', -1, RangeError], + ['maxHeaderListSize', 2 ** 32, RangeError], + ['enablePush', 'a', TypeError], + ['enablePush', 1, TypeError], + ['enablePush', 0, TypeError], + ['enablePush', null, TypeError], + ['enablePush', {}, TypeError] + ].forEach((i) => { + common.expectsError( + () => client.settings({ [i[0]]: i[1] }), + { + code: 'ERR_HTTP2_INVALID_SETTING_VALUE', + type: i[2] }); }); client.settings({ maxFrameSize: 1234567 }); - const req = client.request({ ':path': '/' }); - + const req = client.request(); req.on('response', common.mustCall()); req.resume(); - req.on('end', common.mustCall(() => { + req.on('close', common.mustCall(() => { server.close(); - client.destroy(); + client.close(); })); - req.end(); - })); diff --git a/test/parallel/test-http2-client-shutdown-before-connect.js b/test/parallel/test-http2-client-shutdown-before-connect.js index 4fed0ee3ad0703..bd971ebf7de69c 100644 --- a/test/parallel/test-http2-client-shutdown-before-connect.js +++ b/test/parallel/test-http2-client-shutdown-before-connect.js @@ -10,15 +10,7 @@ const server = h2.createServer(); // we use the lower-level API here server.on('stream', common.mustNotCall()); -server.listen(0); - -server.on('listening', common.mustCall(() => { - +server.listen(0, common.mustCall(() => { const client = h2.connect(`http://localhost:${server.address().port}`); - - client.shutdown({ graceful: true }, common.mustCall(() => { - server.close(); - client.destroy(); - })); - + client.close(common.mustCall(() => server.close())); })); diff --git a/test/parallel/test-http2-client-socket-destroy.js b/test/parallel/test-http2-client-socket-destroy.js index faf4643b0304e3..3eb7e898edcfc1 100644 --- a/test/parallel/test-http2-client-socket-destroy.js +++ b/test/parallel/test-http2-client-socket-destroy.js @@ -14,38 +14,27 @@ const body = const server = h2.createServer(); // we use the lower-level API here -server.on('stream', common.mustCall(onStream)); - -function onStream(stream) { - // The stream aborted event must have been triggered +server.on('stream', common.mustCall((stream) => { stream.on('aborted', common.mustCall()); - - stream.respond({ - 'content-type': 'text/html', - ':status': 200 - }); + stream.on('close', common.mustCall()); + stream.respond(); stream.write(body); -} - -server.listen(0); + // purposefully do not end() +})); -server.on('listening', common.mustCall(function() { +server.listen(0, common.mustCall(function() { const client = h2.connect(`http://localhost:${this.address().port}`); - - const req = client.request({ ':path': '/' }); + const req = client.request(); req.on('response', common.mustCall(() => { // send a premature socket close client[kSocket].destroy(); })); - req.on('data', common.mustNotCall()); - req.on('end', common.mustCall(() => { - server.close(); - })); + req.resume(); + req.on('end', common.mustCall()); + req.on('close', common.mustCall(() => server.close())); // On the client, the close event must call client.on('close', common.mustCall()); - req.end(); - })); diff --git a/test/parallel/test-http2-client-stream-destroy-before-connect.js b/test/parallel/test-http2-client-stream-destroy-before-connect.js index 06afbf3ce8ceb2..a2412b9f1d646a 100644 --- a/test/parallel/test-http2-client-stream-destroy-before-connect.js +++ b/test/parallel/test-http2-client-stream-destroy-before-connect.js @@ -20,36 +20,29 @@ server.on('stream', (stream) => { assert.strictEqual(err.code, 'ERR_HTTP2_STREAM_ERROR'); assert.strictEqual(err.message, 'Stream closed with error code 2'); }); - stream.respond({}); + stream.respond(); stream.end(); }); -server.listen(0); - -server.on('listening', common.mustCall(() => { - +server.listen(0, common.mustCall(() => { const client = h2.connect(`http://localhost:${server.address().port}`); - const req = client.request({ ':path': '/' }); - const err = new Error('test'); - req.destroy(err); + const req = client.request(); + req.destroy(new Error('test')); - req.on('error', common.mustCall((err) => { - common.expectsError({ - type: Error, - message: 'test' - })(err); + req.on('error', common.expectsError({ + type: Error, + message: 'test' })); req.on('close', common.mustCall((code) => { assert.strictEqual(req.rstCode, NGHTTP2_INTERNAL_ERROR); assert.strictEqual(code, NGHTTP2_INTERNAL_ERROR); server.close(); - client.destroy(); + client.close(); })); req.on('response', common.mustNotCall()); req.resume(); req.on('end', common.mustCall()); - })); diff --git a/test/parallel/test-http2-client-unescaped-path.js b/test/parallel/test-http2-client-unescaped-path.js index adfbd61fe762b4..190f8ce75e8917 100644 --- a/test/parallel/test-http2-client-unescaped-path.js +++ b/test/parallel/test-http2-client-unescaped-path.js @@ -4,6 +4,7 @@ const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); const http2 = require('http2'); +const Countdown = require('../common/countdown'); const server = http2.createServer(); @@ -13,14 +14,12 @@ const count = 32; server.listen(0, common.mustCall(() => { const client = http2.connect(`http://localhost:${server.address().port}`); + client.setMaxListeners(33); - let remaining = count + 1; - function maybeClose() { - if (--remaining === 0) { - server.close(); - client.destroy(); - } - } + const countdown = new Countdown(count + 1, () => { + server.close(); + client.close(); + }); // nghttp2 will catch the bad header value for us. function doTest(i) { @@ -30,7 +29,7 @@ server.listen(0, common.mustCall(() => { type: Error, message: 'Stream closed with error code 1' })); - req.on('close', common.mustCall(maybeClose)); + req.on('close', common.mustCall(() => countdown.dec())); } for (let i = 0; i <= count; i += 1) diff --git a/test/parallel/test-http2-client-upload.js b/test/parallel/test-http2-client-upload.js index 8fb5f369ca4cb7..70a8ff3ced01c6 100644 --- a/test/parallel/test-http2-client-upload.js +++ b/test/parallel/test-http2-client-upload.js @@ -9,6 +9,7 @@ const assert = require('assert'); const http2 = require('http2'); const fs = require('fs'); const fixtures = require('../common/fixtures'); +const Countdown = require('../common/countdown'); const loc = fixtures.path('person.jpg'); let fileData; @@ -34,20 +35,21 @@ fs.readFile(loc, common.mustCall((err, data) => { server.listen(0, common.mustCall(() => { const client = http2.connect(`http://localhost:${server.address().port}`); - let remaining = 2; - function maybeClose() { - if (--remaining === 0) { - server.close(); - client.shutdown(); - } - } + const countdown = new Countdown(2, () => { + server.close(); + client.close(); + }); const req = client.request({ ':method': 'POST' }); req.on('response', common.mustCall()); + req.resume(); - req.on('end', common.mustCall(maybeClose)); + req.on('end', common.mustCall()); + + req.on('finish', () => countdown.dec()); const str = fs.createReadStream(loc); - req.on('finish', common.mustCall(maybeClose)); + str.on('end', common.mustCall()); + str.on('close', () => countdown.dec()); str.pipe(req); })); })); diff --git a/test/parallel/test-http2-client-write-before-connect.js b/test/parallel/test-http2-client-write-before-connect.js index 26674dcad369e3..6588d7dccd139d 100644 --- a/test/parallel/test-http2-client-write-before-connect.js +++ b/test/parallel/test-http2-client-write-before-connect.js @@ -8,47 +8,30 @@ const h2 = require('http2'); const server = h2.createServer(); -const { - HTTP2_HEADER_PATH, - HTTP2_HEADER_METHOD, - HTTP2_METHOD_POST -} = h2.constants; - // we use the lower-level API here -server.on('stream', common.mustCall(onStream)); - -function onStream(stream, headers, flags) { +server.on('stream', common.mustCall((stream, headers, flags) => { let data = ''; stream.setEncoding('utf8'); stream.on('data', (chunk) => data += chunk); stream.on('end', common.mustCall(() => { assert.strictEqual(data, 'some data more data'); })); - stream.respond({ - 'content-type': 'text/html', - ':status': 200 - }); - stream.end('hello world'); -} - -server.listen(0); - -server.on('listening', common.mustCall(() => { + stream.respond(); + stream.end('ok'); +})); +server.listen(0, common.mustCall(() => { const client = h2.connect(`http://localhost:${server.address().port}`); - const req = client.request({ - [HTTP2_HEADER_PATH]: '/', - [HTTP2_HEADER_METHOD]: HTTP2_METHOD_POST }); + const req = client.request({ ':method': 'POST' }); req.write('some data '); - req.write('more data'); + req.end('more data'); req.on('response', common.mustCall()); req.resume(); - req.on('end', common.mustCall(() => { + req.on('end', common.mustCall()); + req.on('close', common.mustCall(() => { server.close(); - client.destroy(); + client.close(); })); - req.end(); - })); diff --git a/test/parallel/test-http2-client-write-empty-string.js b/test/parallel/test-http2-client-write-empty-string.js new file mode 100644 index 00000000000000..c10698d417038d --- /dev/null +++ b/test/parallel/test-http2-client-write-empty-string.js @@ -0,0 +1,54 @@ +'use strict'; + +const assert = require('assert'); +const http2 = require('http2'); + +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); + +for (const chunkSequence of [ + [ '' ], + [ '', '' ] +]) { + const server = http2.createServer(); + server.on('stream', common.mustCall((stream, headers, flags) => { + stream.respond({ 'content-type': 'text/html' }); + + let data = ''; + stream.on('data', common.mustNotCall((chunk) => { + data += chunk.toString(); + })); + stream.on('end', common.mustCall(() => { + stream.end(`"${data}"`); + })); + })); + + server.listen(0, common.mustCall(() => { + const port = server.address().port; + const client = http2.connect(`http://localhost:${port}`); + + const req = client.request({ + ':method': 'POST', + ':path': '/' + }); + + req.on('response', common.mustCall((headers) => { + assert.strictEqual(headers[':status'], 200); + assert.strictEqual(headers['content-type'], 'text/html'); + })); + + let data = ''; + req.setEncoding('utf8'); + req.on('data', common.mustCallAtLeast((d) => data += d)); + req.on('end', common.mustCall(() => { + assert.strictEqual(data, '""'); + server.close(); + client.close(); + })); + + for (const chunk of chunkSequence) + req.write(chunk); + req.end(); + })); +} diff --git a/test/parallel/test-http2-compat-errors.js b/test/parallel/test-http2-compat-errors.js index 5774d1a922bd52..c84318bad68e35 100644 --- a/test/parallel/test-http2-compat-errors.js +++ b/test/parallel/test-http2-compat-errors.js @@ -4,9 +4,7 @@ const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); -const assert = require('assert'); const h2 = require('http2'); -const { Http2Stream } = require('internal/http2/core'); // Errors should not be reported both in Http2ServerRequest // and Http2ServerResponse @@ -14,6 +12,7 @@ const { Http2Stream } = require('internal/http2/core'); let expected = null; const server = h2.createServer(common.mustCall(function(req, res) { + res.stream.on('error', common.mustCall()); req.on('error', common.mustNotCall()); res.on('error', common.mustNotCall()); req.on('aborted', common.mustCall()); @@ -26,27 +25,12 @@ const server = h2.createServer(common.mustCall(function(req, res) { server.close(); })); -server.on('streamError', common.mustCall(function(err, stream) { - assert.strictEqual(err, expected); - assert.strictEqual(stream instanceof Http2Stream, true); -})); - server.listen(0, common.mustCall(function() { - const port = server.address().port; - - const url = `http://localhost:${port}`; - const client = h2.connect(url, common.mustCall(function() { - const headers = { - ':path': '/foobar', - ':method': 'GET', - ':scheme': 'http', - ':authority': `localhost:${port}`, - }; - const request = client.request(headers); - request.on('data', common.mustCall(function(chunk) { - // cause an error on the server side + const url = `http://localhost:${server.address().port}`; + const client = h2.connect(url, common.mustCall(() => { + const request = client.request(); + request.on('data', common.mustCall((chunk) => { client.destroy(); })); - request.end(); })); })); diff --git a/test/parallel/test-http2-compat-expect-continue-check.js b/test/parallel/test-http2-compat-expect-continue-check.js index 800df1c432944f..6aded8b52935c1 100644 --- a/test/parallel/test-http2-compat-expect-continue-check.js +++ b/test/parallel/test-http2-compat-expect-continue-check.js @@ -12,74 +12,47 @@ const testResBody = 'other stuff!\n'; // through server receiving it, triggering 'checkContinue' custom handler, // writing the rest of the request to finally the client receiving to. -function handler(req, res) { - console.error('Server sent full response'); +const server = http2.createServer( + common.mustNotCall('Full request received before 100 Continue') +); - res.writeHead(200, { - 'content-type': 'text/plain', - 'abcd': '1' - }); +server.on('checkContinue', common.mustCall((req, res) => { + res.writeContinue(); + res.writeHead(200, {}); res.end(testResBody); // should simply return false if already too late to write assert.strictEqual(res.writeContinue(), false); res.on('finish', common.mustCall( () => process.nextTick(() => assert.strictEqual(res.writeContinue(), false)) )); -} - -const server = http2.createServer( - common.mustNotCall('Full request received before 100 Continue') -); - -server.on('checkContinue', common.mustCall((req, res) => { - console.error('Server received Expect: 100-continue'); - - res.writeContinue(); - - // timeout so that we allow the client to receive continue first - setTimeout( - common.mustCall(() => handler(req, res)), - common.platformTimeout(100) - ); })); -server.listen(0); - -server.on('listening', common.mustCall(() => { +server.listen(0, common.mustCall(() => { let body = ''; - const port = server.address().port; - const client = http2.connect(`http://localhost:${port}`); + const client = http2.connect(`http://localhost:${server.address().port}`); const req = client.request({ ':method': 'POST', - ':path': '/world', expect: '100-continue' }); - console.error('Client sent request'); let gotContinue = false; req.on('continue', common.mustCall(() => { - console.error('Client received 100-continue'); gotContinue = true; })); req.on('response', common.mustCall((headers) => { - console.error('Client received response headers'); - assert.strictEqual(gotContinue, true); assert.strictEqual(headers[':status'], 200); - assert.strictEqual(headers['abcd'], '1'); + req.end(); })); req.setEncoding('utf-8'); req.on('data', common.mustCall((chunk) => { body += chunk; })); req.on('end', common.mustCall(() => { - console.error('Client received full response'); - assert.strictEqual(body, testResBody); - - client.destroy(); + client.close(); server.close(); })); })); diff --git a/test/parallel/test-http2-compat-expect-continue.js b/test/parallel/test-http2-compat-expect-continue.js index 6f08e813ef385a..42fa80ae4e8620 100644 --- a/test/parallel/test-http2-compat-expect-continue.js +++ b/test/parallel/test-http2-compat-expect-continue.js @@ -17,8 +17,6 @@ const server = http2.createServer(); let sentResponse = false; server.on('request', common.mustCall((req, res) => { - console.error('Server sent full response'); - res.end(testResBody); sentResponse = true; })); @@ -28,38 +26,29 @@ server.listen(0); server.on('listening', common.mustCall(() => { let body = ''; - const port = server.address().port; - const client = http2.connect(`http://localhost:${port}`); + const client = http2.connect(`http://localhost:${server.address().port}`); const req = client.request({ ':method': 'POST', - ':path': '/world', expect: '100-continue' }); - console.error('Client sent request'); let gotContinue = false; req.on('continue', common.mustCall(() => { - console.error('Client received 100-continue'); gotContinue = true; })); req.on('response', common.mustCall((headers) => { - console.error('Client received response headers'); - assert.strictEqual(gotContinue, true); assert.strictEqual(sentResponse, true); assert.strictEqual(headers[':status'], 200); + req.end(); })); req.setEncoding('utf8'); req.on('data', common.mustCall((chunk) => { body += chunk; })); - req.on('end', common.mustCall(() => { - console.error('Client received full response'); - assert.strictEqual(body, testResBody); - - client.destroy(); + client.close(); server.close(); })); })); diff --git a/test/parallel/test-http2-compat-expect-handling.js b/test/parallel/test-http2-compat-expect-handling.js index 0a5de368c6cfff..f36032c972fc45 100644 --- a/test/parallel/test-http2-compat-expect-handling.js +++ b/test/parallel/test-http2-compat-expect-handling.js @@ -39,7 +39,7 @@ function nextTest(testsToRun) { })); req.on('end', common.mustCall(() => { - client.destroy(); + client.close(); nextTest(testsToRun - 1); })); } diff --git a/test/parallel/test-http2-compat-method-connect.js b/test/parallel/test-http2-compat-method-connect.js index 1f43b3891b24ed..21ad23e92ba65b 100644 --- a/test/parallel/test-http2-compat-method-connect.js +++ b/test/parallel/test-http2-compat-method-connect.js @@ -33,7 +33,7 @@ function testMethodConnect(testsToRun) { })); req.resume(); req.on('end', common.mustCall(() => { - client.destroy(); + client.close(); testMethodConnect(testsToRun - 1); })); req.end(); diff --git a/test/parallel/test-http2-compat-serverrequest-end.js b/test/parallel/test-http2-compat-serverrequest-end.js index b6bfd04089a103..d34372118582db 100644 --- a/test/parallel/test-http2-compat-serverrequest-end.js +++ b/test/parallel/test-http2-compat-serverrequest-end.js @@ -31,18 +31,11 @@ server.listen(0, common.mustCall(function() { })); const url = `http://localhost:${port}`; - const client = h2.connect(url, common.mustCall(function() { - const headers = { - ':path': '/foobar', - ':method': 'GET', - ':scheme': 'http', - ':authority': `localhost:${port}` - }; - const request = client.request(headers); + const client = h2.connect(url, common.mustCall(() => { + const request = client.request(); request.resume(); - request.on('end', common.mustCall(function() { - client.destroy(); + request.on('end', common.mustCall(() => { + client.close(); })); - request.end(); })); })); diff --git a/test/parallel/test-http2-compat-serverrequest-headers.js b/test/parallel/test-http2-compat-serverrequest-headers.js index 58cc52c64f6c91..5843104c019189 100644 --- a/test/parallel/test-http2-compat-serverrequest-headers.js +++ b/test/parallel/test-http2-compat-serverrequest-headers.js @@ -79,7 +79,7 @@ server.listen(0, common.mustCall(function() { }; const request = client.request(headers); request.on('end', common.mustCall(function() { - client.destroy(); + client.close(); })); request.end(); request.resume(); diff --git a/test/parallel/test-http2-compat-serverrequest-pause.js b/test/parallel/test-http2-compat-serverrequest-pause.js index f8494bb0ddee39..62a23997c75bd8 100644 --- a/test/parallel/test-http2-compat-serverrequest-pause.js +++ b/test/parallel/test-http2-compat-serverrequest-pause.js @@ -46,7 +46,7 @@ server.listen(0, common.mustCall(() => { request.resume(); request.end(testStr); request.on('end', common.mustCall(function() { - client.destroy(); + client.close(); server.close(); })); })); diff --git a/test/parallel/test-http2-compat-serverrequest-pipe.js b/test/parallel/test-http2-compat-serverrequest-pipe.js index 04c8cfe546f329..53e54cdf913b0e 100644 --- a/test/parallel/test-http2-compat-serverrequest-pipe.js +++ b/test/parallel/test-http2-compat-serverrequest-pipe.js @@ -11,9 +11,10 @@ const path = require('path'); // piping should work as expected with createWriteStream -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); const loc = fixtures.path('url-tests.js'); -const fn = path.join(common.tmpDir, 'http2-url-tests.js'); +const fn = path.join(tmpdir.path, 'http2-url-tests.js'); const server = http2.createServer(); @@ -35,7 +36,7 @@ server.listen(0, common.mustCall(() => { function maybeClose() { if (--remaining === 0) { server.close(); - client.destroy(); + client.close(); } } diff --git a/test/parallel/test-http2-compat-serverrequest-settimeout.js b/test/parallel/test-http2-compat-serverrequest-settimeout.js index 460eb576bfd4f6..f7189161802301 100644 --- a/test/parallel/test-http2-compat-serverrequest-settimeout.js +++ b/test/parallel/test-http2-compat-serverrequest-settimeout.js @@ -12,7 +12,6 @@ const server = http2.createServer(); server.on('request', (req, res) => { req.setTimeout(msecs, common.mustCall(() => { res.end(); - req.setTimeout(msecs, common.mustNotCall()); })); res.on('finish', common.mustCall(() => { req.setTimeout(msecs, common.mustNotCall()); @@ -35,7 +34,7 @@ server.listen(0, common.mustCall(() => { ':authority': `localhost:${port}` }); req.on('end', common.mustCall(() => { - client.destroy(); + client.close(); })); req.resume(); req.end(); diff --git a/test/parallel/test-http2-compat-serverrequest-trailers.js b/test/parallel/test-http2-compat-serverrequest-trailers.js index b4d90281918d9e..285178cab66816 100644 --- a/test/parallel/test-http2-compat-serverrequest-trailers.js +++ b/test/parallel/test-http2-compat-serverrequest-trailers.js @@ -62,7 +62,7 @@ server.listen(0, common.mustCall(function() { request.resume(); request.on('end', common.mustCall(function() { server.close(); - client.destroy(); + client.close(); })); request.write('test\n'); request.end('test'); diff --git a/test/parallel/test-http2-compat-serverrequest.js b/test/parallel/test-http2-compat-serverrequest.js index edcd7a8f8cdea4..d92da61d943cb7 100644 --- a/test/parallel/test-http2-compat-serverrequest.js +++ b/test/parallel/test-http2-compat-serverrequest.js @@ -46,7 +46,7 @@ server.listen(0, common.mustCall(function() { }; const request = client.request(headers); request.on('end', common.mustCall(function() { - client.destroy(); + client.close(); })); request.end(); request.resume(); diff --git a/test/parallel/test-http2-compat-serverresponse-close.js b/test/parallel/test-http2-compat-serverresponse-close.js index 35e39b9670868e..0ff6bd3a83f600 100644 --- a/test/parallel/test-http2-compat-serverresponse-close.js +++ b/test/parallel/test-http2-compat-serverresponse-close.js @@ -16,26 +16,17 @@ const server = h2.createServer(common.mustCall((req, res) => { req.on('close', common.mustCall()); res.on('close', common.mustCall()); + req.on('error', common.mustNotCall()); })); server.listen(0); -server.on('listening', function() { - const port = server.address().port; - - const url = `http://localhost:${port}`; - const client = h2.connect(url, common.mustCall(function() { - const headers = { - ':path': '/foobar', - ':method': 'GET', - ':scheme': 'http', - ':authority': `localhost:${port}`, - }; - const request = client.request(headers); +server.on('listening', () => { + const url = `http://localhost:${server.address().port}`; + const client = h2.connect(url, common.mustCall(() => { + const request = client.request(); request.on('data', common.mustCall(function(chunk) { - // cause an error on the server side client.destroy(); server.close(); })); - request.end(); })); }); diff --git a/test/parallel/test-http2-compat-serverresponse-createpushresponse.js b/test/parallel/test-http2-compat-serverresponse-createpushresponse.js index da7c4adbbfff16..18b3ba15be841c 100755 --- a/test/parallel/test-http2-compat-serverresponse-createpushresponse.js +++ b/test/parallel/test-http2-compat-serverresponse-createpushresponse.js @@ -43,7 +43,7 @@ const server = h2.createServer((request, response) => { ':path': '/pushed', ':method': 'GET' }, common.mustCall((error) => { - assert.strictEqual(error.code, 'ERR_HTTP2_STREAM_CLOSED'); + assert.strictEqual(error.code, 'ERR_HTTP2_INVALID_STREAM'); })); }); })); @@ -61,7 +61,7 @@ server.listen(0, common.mustCall(() => { let remaining = 2; function maybeClose() { if (--remaining === 0) { - client.destroy(); + client.close(); server.close(); } } diff --git a/test/parallel/test-http2-compat-serverresponse-destroy.js b/test/parallel/test-http2-compat-serverresponse-destroy.js index 77e761b6227702..54214737840061 100644 --- a/test/parallel/test-http2-compat-serverresponse-destroy.js +++ b/test/parallel/test-http2-compat-serverresponse-destroy.js @@ -5,6 +5,7 @@ if (!common.hasCrypto) common.skip('missing crypto'); const assert = require('assert'); const http2 = require('http2'); +const Countdown = require('../common/countdown'); // Check that destroying the Http2ServerResponse stream produces // the expected result, including the ability to throw an error @@ -30,63 +31,54 @@ const server = http2.createServer(common.mustCall((req, res) => { if (req.url !== '/') { nextError = errors.shift(); } + res.destroy(nextError); }, 3)); -server.on( - 'streamError', - common.mustCall((err) => assert.strictEqual(err, nextError), 2) -); - server.listen(0, common.mustCall(() => { - const port = server.address().port; - const client = http2.connect(`http://localhost:${port}`); - const req = client.request({ - ':path': '/', - ':method': 'GET', - ':scheme': 'http', - ':authority': `localhost:${port}` - }); + const client = http2.connect(`http://localhost:${server.address().port}`); - req.on('response', common.mustNotCall()); - req.on('error', common.mustNotCall()); - req.on('end', common.mustCall()); + const countdown = new Countdown(3, () => { + server.close(); + client.close(); + }); - req.resume(); - req.end(); + { + const req = client.request(); + req.on('response', common.mustNotCall()); + req.on('error', common.mustNotCall()); + req.on('end', common.mustCall()); + req.on('close', common.mustCall(() => countdown.dec())); + req.resume(); + } - const req2 = client.request({ - ':path': '/error', - ':method': 'GET', - ':scheme': 'http', - ':authority': `localhost:${port}` - }); + { + const req = client.request({ ':path': '/error' }); - req2.on('response', common.mustNotCall()); - req2.on('error', common.mustNotCall()); - req2.on('end', common.mustCall()); + req.on('response', common.mustNotCall()); + req.on('error', common.expectsError({ + code: 'ERR_HTTP2_STREAM_ERROR', + type: Error, + message: 'Stream closed with error code 2' + })); + req.on('close', common.mustCall(() => countdown.dec())); - req2.resume(); - req2.end(); + req.resume(); + req.on('end', common.mustCall()); + } - const req3 = client.request({ - ':path': '/error', - ':method': 'GET', - ':scheme': 'http', - ':authority': `localhost:${port}` - }); + { + const req = client.request({ ':path': '/error' }); - req3.on('response', common.mustNotCall()); - req3.on('error', common.expectsError({ - code: 'ERR_HTTP2_STREAM_ERROR', - type: Error, - message: 'Stream closed with error code 2' - })); - req3.on('end', common.mustCall(() => { - server.close(); - client.destroy(); - })); + req.on('response', common.mustNotCall()); + req.on('error', common.expectsError({ + code: 'ERR_HTTP2_STREAM_ERROR', + type: Error, + message: 'Stream closed with error code 2' + })); + req.on('close', common.mustCall(() => countdown.dec())); - req3.resume(); - req3.end(); + req.resume(); + req.on('end', common.mustCall()); + } })); diff --git a/test/parallel/test-http2-compat-serverresponse-drain.js b/test/parallel/test-http2-compat-serverresponse-drain.js index e2465cfa00d1f6..7ccbb1f4d21209 100644 --- a/test/parallel/test-http2-compat-serverresponse-drain.js +++ b/test/parallel/test-http2-compat-serverresponse-drain.js @@ -37,7 +37,7 @@ server.listen(0, common.mustCall(() => { request.on('end', common.mustCall(function() { assert.strictEqual(data, testString.repeat(2)); - client.destroy(); + client.close(); server.close(); })); })); diff --git a/test/parallel/test-http2-compat-serverresponse-end.js b/test/parallel/test-http2-compat-serverresponse-end.js index 366d52321554fe..0e846a5948e3cc 100644 --- a/test/parallel/test-http2-compat-serverresponse-end.js +++ b/test/parallel/test-http2-compat-serverresponse-end.js @@ -52,7 +52,7 @@ const { request.on('data', (chunk) => (data += chunk)); request.on('end', mustCall(() => { strictEqual(data, 'end'); - client.destroy(); + client.close(); })); request.end(); request.resume(); @@ -83,7 +83,7 @@ const { request.on('data', (chunk) => (data += chunk)); request.on('end', mustCall(() => { strictEqual(data, 'test\uD83D\uDE00'); - client.destroy(); + client.close(); })); request.end(); request.resume(); @@ -110,7 +110,7 @@ const { }; const request = client.request(headers); request.on('data', mustNotCall()); - request.on('end', mustCall(() => client.destroy())); + request.on('end', mustCall(() => client.close())); request.end(); request.resume(); })); @@ -143,7 +143,7 @@ const { })); request.on('data', mustNotCall()); request.on('end', mustCall(() => { - client.destroy(); + client.close(); server.close(); })); request.end(); @@ -172,7 +172,7 @@ const { const request = client.request(headers); request.on('data', mustNotCall()); request.on('end', mustCall(() => { - client.destroy(); + client.close(); server.close(); })); request.end(); @@ -208,7 +208,7 @@ const { })); request.on('data', mustNotCall()); request.on('end', mustCall(() => { - client.destroy(); + client.close(); server.close(); })); request.end(); @@ -243,7 +243,7 @@ const { })); request.on('data', mustNotCall()); request.on('end', mustCall(() => { - client.destroy(); + client.close(); server.close(); })); request.end(); @@ -283,7 +283,7 @@ const { })); request.on('data', mustNotCall()); request.on('end', mustCall(() => { - client.destroy(); + client.close(); server.close(); })); request.end(); @@ -315,7 +315,7 @@ const { })); request.on('data', mustNotCall()); request.on('end', mustCall(() => { - client.destroy(); + client.close(); server.close(); })); request.end(); diff --git a/test/parallel/test-http2-compat-serverresponse-finished.js b/test/parallel/test-http2-compat-serverresponse-finished.js index b816b922202dd6..ceaa6eb5c3cf2c 100644 --- a/test/parallel/test-http2-compat-serverresponse-finished.js +++ b/test/parallel/test-http2-compat-serverresponse-finished.js @@ -39,7 +39,7 @@ server.listen(0, common.mustCall(function() { }; const request = client.request(headers); request.on('end', common.mustCall(function() { - client.destroy(); + client.close(); })); request.end(); request.resume(); diff --git a/test/parallel/test-http2-compat-serverresponse-flushheaders.js b/test/parallel/test-http2-compat-serverresponse-flushheaders.js index 68d4789f69be53..d155b07863d26c 100644 --- a/test/parallel/test-http2-compat-serverresponse-flushheaders.js +++ b/test/parallel/test-http2-compat-serverresponse-flushheaders.js @@ -51,7 +51,7 @@ server.listen(0, common.mustCall(function() { serverResponse.end(); }, 1)); request.on('end', common.mustCall(function() { - client.destroy(); + client.close(); })); request.end(); request.resume(); diff --git a/test/parallel/test-http2-compat-serverresponse-headers-after-destroy.js b/test/parallel/test-http2-compat-serverresponse-headers-after-destroy.js index 8bfd64ebec6dec..171eb7e27f32b1 100644 --- a/test/parallel/test-http2-compat-serverresponse-headers-after-destroy.js +++ b/test/parallel/test-http2-compat-serverresponse-headers-after-destroy.js @@ -14,8 +14,6 @@ const server = h2.createServer(); server.listen(0, common.mustCall(function() { const port = server.address().port; server.once('request', common.mustCall(function(request, response) { - response.destroy(); - response.on('finish', common.mustCall(() => { assert.strictEqual(response.headersSent, false); assert.doesNotThrow(() => response.setHeader('test', 'value')); @@ -28,6 +26,8 @@ server.listen(0, common.mustCall(function() { server.close(); }); })); + + response.destroy(); })); const url = `http://localhost:${port}`; @@ -40,7 +40,7 @@ server.listen(0, common.mustCall(function() { }; const request = client.request(headers); request.on('end', common.mustCall(function() { - client.destroy(); + client.close(); })); request.end(); request.resume(); diff --git a/test/parallel/test-http2-compat-serverresponse-headers.js b/test/parallel/test-http2-compat-serverresponse-headers.js index ec1071bc34ffa1..2b7252135917ac 100644 --- a/test/parallel/test-http2-compat-serverresponse-headers.js +++ b/test/parallel/test-http2-compat-serverresponse-headers.js @@ -179,7 +179,7 @@ server.listen(0, common.mustCall(function() { }; const request = client.request(headers); request.on('end', common.mustCall(function() { - client.destroy(); + client.close(); })); request.end(); request.resume(); diff --git a/test/parallel/test-http2-compat-serverresponse-settimeout.js b/test/parallel/test-http2-compat-serverresponse-settimeout.js index 6d06d07f7dc0ab..bb09633727ccf7 100644 --- a/test/parallel/test-http2-compat-serverresponse-settimeout.js +++ b/test/parallel/test-http2-compat-serverresponse-settimeout.js @@ -12,7 +12,6 @@ const server = http2.createServer(); server.on('request', (req, res) => { res.setTimeout(msecs, common.mustCall(() => { res.end(); - res.setTimeout(msecs, common.mustNotCall()); })); res.on('finish', common.mustCall(() => { res.setTimeout(msecs, common.mustNotCall()); @@ -35,7 +34,7 @@ server.listen(0, common.mustCall(() => { ':authority': `localhost:${port}` }); req.on('end', common.mustCall(() => { - client.destroy(); + client.close(); })); req.resume(); req.end(); diff --git a/test/parallel/test-http2-compat-serverresponse-statuscode.js b/test/parallel/test-http2-compat-serverresponse-statuscode.js index 96b033328f2855..0c2a96f55953b8 100644 --- a/test/parallel/test-http2-compat-serverresponse-statuscode.js +++ b/test/parallel/test-http2-compat-serverresponse-statuscode.js @@ -69,7 +69,7 @@ server.listen(0, common.mustCall(function() { }; const request = client.request(headers); request.on('end', common.mustCall(function() { - client.destroy(); + client.close(); })); request.end(); request.resume(); diff --git a/test/parallel/test-http2-compat-serverresponse-statusmessage-property-set.js b/test/parallel/test-http2-compat-serverresponse-statusmessage-property-set.js index 45a876d674313b..87e172402899f2 100644 --- a/test/parallel/test-http2-compat-serverresponse-statusmessage-property-set.js +++ b/test/parallel/test-http2-compat-serverresponse-statusmessage-property-set.js @@ -42,7 +42,7 @@ server.listen(0, common.mustCall(function() { assert.strictEqual(headers[':status'], 200); }, 1)); request.on('end', common.mustCall(function() { - client.destroy(); + client.close(); })); request.end(); request.resume(); diff --git a/test/parallel/test-http2-compat-serverresponse-statusmessage-property.js b/test/parallel/test-http2-compat-serverresponse-statusmessage-property.js index 21a5b6ea4e2820..8a083cf3ba1638 100644 --- a/test/parallel/test-http2-compat-serverresponse-statusmessage-property.js +++ b/test/parallel/test-http2-compat-serverresponse-statusmessage-property.js @@ -41,7 +41,7 @@ server.listen(0, common.mustCall(function() { assert.strictEqual(headers[':status'], 200); }, 1)); request.on('end', common.mustCall(function() { - client.destroy(); + client.close(); })); request.end(); request.resume(); diff --git a/test/parallel/test-http2-compat-serverresponse-statusmessage.js b/test/parallel/test-http2-compat-serverresponse-statusmessage.js index 841bafe724a7a8..dee916d1aeef54 100644 --- a/test/parallel/test-http2-compat-serverresponse-statusmessage.js +++ b/test/parallel/test-http2-compat-serverresponse-statusmessage.js @@ -45,7 +45,7 @@ server.listen(0, common.mustCall(function() { assert.strictEqual(headers['foo-bar'], 'abc123'); }, 1)); request.on('end', common.mustCall(function() { - client.destroy(); + client.close(); })); request.end(); request.resume(); diff --git a/test/parallel/test-http2-compat-serverresponse-trailers.js b/test/parallel/test-http2-compat-serverresponse-trailers.js index 7332f9e8d0b63d..66ad8843fa33b9 100755 --- a/test/parallel/test-http2-compat-serverresponse-trailers.js +++ b/test/parallel/test-http2-compat-serverresponse-trailers.js @@ -68,7 +68,7 @@ server.listen(0, common.mustCall(() => { })); request.resume(); request.on('end', common.mustCall(() => { - client.destroy(); + client.close(); server.close(); })); })); diff --git a/test/parallel/test-http2-compat-serverresponse-write-no-cb.js b/test/parallel/test-http2-compat-serverresponse-write-no-cb.js index 58a4ca053d222c..a62bb1b0ac78f1 100644 --- a/test/parallel/test-http2-compat-serverresponse-write-no-cb.js +++ b/test/parallel/test-http2-compat-serverresponse-write-no-cb.js @@ -6,44 +6,33 @@ const { mustCall, hasCrypto, skip } = require('../common'); if (!hasCrypto) skip('missing crypto'); -const { throws } = require('assert'); const { createServer, connect } = require('http2'); // Http2ServerResponse.write does not imply there is a callback -const expectedError = expectsError({ - code: 'ERR_HTTP2_STREAM_CLOSED', - message: 'The stream is already closed' -}, 2); - { const server = createServer(); server.listen(0, mustCall(() => { const port = server.address().port; const url = `http://localhost:${port}`; const client = connect(url, mustCall(() => { - const headers = { - ':path': '/', - ':method': 'GET', - ':scheme': 'http', - ':authority': `localhost:${port}` - }; - const request = client.request(headers); - request.end(); + const request = client.request(); request.resume(); + request.on('end', mustCall()); + request.on('close', mustCall(() => { + client.close(); + })); })); server.once('request', mustCall((request, response) => { client.destroy(); response.stream.session.on('close', mustCall(() => { response.on('error', mustNotCall()); - throws( + expectsError( () => { response.write('muahaha'); }, - expectsError({ - code: 'ERR_HTTP2_STREAM_CLOSED', - type: Error, - message: 'The stream is already closed' - }) + { + code: 'ERR_HTTP2_INVALID_STREAM' + } ); server.close(); })); @@ -57,21 +46,21 @@ const expectedError = expectsError({ const port = server.address().port; const url = `http://localhost:${port}`; const client = connect(url, mustCall(() => { - const headers = { - ':path': '/', - ':method': 'get', - ':scheme': 'http', - ':authority': `localhost:${port}` - }; - const request = client.request(headers); - request.end(); + const request = client.request(); request.resume(); + request.on('end', mustCall()); + request.on('close', mustCall(() => client.close())); })); server.once('request', mustCall((request, response) => { client.destroy(); response.stream.session.on('close', mustCall(() => { - response.write('muahaha', mustCall(expectedError)); + expectsError( + () => response.write('muahaha'), + { + code: 'ERR_HTTP2_INVALID_STREAM' + } + ); server.close(); })); })); @@ -84,20 +73,20 @@ const expectedError = expectsError({ const port = server.address().port; const url = `http://localhost:${port}`; const client = connect(url, mustCall(() => { - const headers = { - ':path': '/', - ':method': 'get', - ':scheme': 'http', - ':authority': `localhost:${port}` - }; - const request = client.request(headers); - request.end(); + const request = client.request(); request.resume(); + request.on('end', mustCall()); + request.on('close', mustCall(() => client.close())); })); server.once('request', mustCall((request, response) => { response.stream.session.on('close', mustCall(() => { - response.write('muahaha', 'utf8', mustCall(expectedError)); + expectsError( + () => response.write('muahaha', 'utf8'), + { + code: 'ERR_HTTP2_INVALID_STREAM' + } + ); server.close(); })); client.destroy(); diff --git a/test/parallel/test-http2-compat-serverresponse-writehead.js b/test/parallel/test-http2-compat-serverresponse-writehead.js index 704f199ca27e99..5fd787e100350c 100644 --- a/test/parallel/test-http2-compat-serverresponse-writehead.js +++ b/test/parallel/test-http2-compat-serverresponse-writehead.js @@ -23,7 +23,7 @@ server.listen(0, common.mustCall(function() { server.close(); process.nextTick(common.mustCall(() => { common.expectsError(() => { response.writeHead(300); }, { - code: 'ERR_HTTP2_STREAM_CLOSED' + code: 'ERR_HTTP2_INVALID_STREAM' }); })); })); @@ -44,7 +44,7 @@ server.listen(0, common.mustCall(function() { assert.strictEqual(headers[':status'], 418); }, 1)); request.on('end', common.mustCall(function() { - client.destroy(); + client.close(); })); request.end(); request.resume(); diff --git a/test/parallel/test-http2-compat-short-stream-client-server.js b/test/parallel/test-http2-compat-short-stream-client-server.js new file mode 100644 index 00000000000000..f7ef9412106f59 --- /dev/null +++ b/test/parallel/test-http2-compat-short-stream-client-server.js @@ -0,0 +1,50 @@ +'use strict'; + +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); +const assert = require('assert'); +const http2 = require('http2'); +const { Readable } = require('stream'); + +const server = http2.createServer(common.mustCall((req, res) => { + res.setHeader('content-type', 'text/html'); + const input = new Readable({ + read() { + this.push('test'); + this.push(null); + } + }); + input.pipe(res); +})); + +server.listen(0, common.mustCall(() => { + const port = server.address().port; + const client = http2.connect(`http://localhost:${port}`); + + const req = client.request(); + + req.on('response', common.mustCall((headers) => { + assert.strictEqual(headers[':status'], 200); + assert.strictEqual(headers['content-type'], 'text/html'); + })); + + let data = ''; + + const notCallClose = common.mustNotCall(); + + setTimeout(() => { + req.setEncoding('utf8'); + req.removeListener('close', notCallClose); + req.on('close', common.mustCall(() => { + server.close(); + client.close(); + })); + req.on('data', common.mustCallAtLeast((d) => data += d)); + req.on('end', common.mustCall(() => { + assert.strictEqual(data, 'test'); + })); + }, common.platformTimeout(100)); + + req.on('close', notCallClose); +})); diff --git a/test/parallel/test-http2-compat-socket-set.js b/test/parallel/test-http2-compat-socket-set.js index f62c782a45d8ea..c6107564319143 100644 --- a/test/parallel/test-http2-compat-socket-set.js +++ b/test/parallel/test-http2-compat-socket-set.js @@ -99,7 +99,7 @@ server.listen(0, common.mustCall(function() { }; const request = client.request(headers); request.on('end', common.mustCall(() => { - client.destroy(); + client.close(); server.close(); })); request.end(); diff --git a/test/parallel/test-http2-compat-socket.js b/test/parallel/test-http2-compat-socket.js index c6a09802981d16..9b98d328111633 100644 --- a/test/parallel/test-http2-compat-socket.js +++ b/test/parallel/test-http2-compat-socket.js @@ -82,7 +82,7 @@ server.listen(0, common.mustCall(function() { }; const request = client.request(headers); request.on('end', common.mustCall(() => { - client.destroy(); + client.close(); })); request.end(); request.resume(); diff --git a/test/parallel/test-http2-connect-method.js b/test/parallel/test-http2-connect-method.js index 78c9a345293c12..b425cafb1478d3 100644 --- a/test/parallel/test-http2-connect-method.js +++ b/test/parallel/test-http2-connect-method.js @@ -13,7 +13,8 @@ const { HTTP2_HEADER_AUTHORITY, HTTP2_HEADER_SCHEME, HTTP2_HEADER_PATH, - NGHTTP2_CONNECT_ERROR + NGHTTP2_CONNECT_ERROR, + NGHTTP2_REFUSED_STREAM } = http2.constants; const server = net.createServer(common.mustCall((socket) => { @@ -34,7 +35,7 @@ server.listen(0, common.mustCall(() => { const proxy = http2.createServer(); proxy.on('stream', common.mustCall((stream, headers) => { if (headers[HTTP2_HEADER_METHOD] !== 'CONNECT') { - stream.rstWithRefused(); + stream.close(NGHTTP2_REFUSED_STREAM); return; } const auth = new URL(`tcp://${headers[HTTP2_HEADER_AUTHORITY]}`); @@ -47,7 +48,7 @@ server.listen(0, common.mustCall(() => { }); socket.on('close', common.mustCall()); socket.on('error', (error) => { - stream.rstStream(NGHTTP2_CONNECT_ERROR); + stream.close(NGHTTP2_CONNECT_ERROR); }); })); @@ -99,7 +100,7 @@ server.listen(0, common.mustCall(() => { req.on('data', (chunk) => data += chunk); req.on('end', common.mustCall(() => { assert.strictEqual(data, 'hello'); - client.destroy(); + client.close(); proxy.close(); server.close(); })); diff --git a/test/parallel/test-http2-connect.js b/test/parallel/test-http2-connect.js index e5a4e429907090..325a420b7e6e49 100644 --- a/test/parallel/test-http2-connect.js +++ b/test/parallel/test-http2-connect.js @@ -5,6 +5,9 @@ if (!hasCrypto) skip('missing crypto'); const { doesNotThrow, throws } = require('assert'); const { createServer, connect } = require('http2'); +const { connect: netConnect } = require('net'); + +// check for session connect callback and event { const server = createServer(); server.listen(0, mustCall(() => { @@ -20,7 +23,7 @@ const { createServer, connect } = require('http2'); for (const client of clients) { client.once('connect', mustCall((headers) => { - client.destroy(); + client.close(); clients.delete(client); if (clients.size === 0) { server.close(); @@ -30,10 +33,36 @@ const { createServer, connect } = require('http2'); })); } +// check for session connect callback on already connected socket +{ + const server = createServer(); + server.listen(0, mustCall(() => { + const { port } = server.address(); + + const onSocketConnect = () => { + const authority = `http://localhost:${port}`; + const createConnection = mustCall(() => socket); + const options = { createConnection }; + connect(authority, options, mustCall(onSessionConnect)); + }; + + const onSessionConnect = (session) => { + session.close(); + server.close(); + }; + + const socket = netConnect(port, mustCall(onSocketConnect)); + })); +} + // check for https as protocol { const authority = 'https://localhost'; - doesNotThrow(() => connect(authority)); + doesNotThrow(() => { + // A socket error may or may not be reported, keep this as a non-op + // instead of a mustCall or mustNotCall + connect(authority).on('error', () => {}); + }); } // check for error for an invalid protocol (not http or https) diff --git a/test/parallel/test-http2-cookies.js b/test/parallel/test-http2-cookies.js index 48b08b6367b4b3..cf763915389287 100644 --- a/test/parallel/test-http2-cookies.js +++ b/test/parallel/test-http2-cookies.js @@ -54,7 +54,7 @@ server.on('listening', common.mustCall(() => { req.on('end', common.mustCall(() => { server.close(); - client.destroy(); + client.close(); })); req.end(); diff --git a/test/parallel/test-http2-create-client-connect.js b/test/parallel/test-http2-create-client-connect.js index 373c258c7dd286..02c6c70642acb0 100644 --- a/test/parallel/test-http2-create-client-connect.js +++ b/test/parallel/test-http2-create-client-connect.js @@ -30,7 +30,7 @@ const URL = url.URL; () => setImmediate(() => server.close())); const maybeClose = common.mustCall((client) => { - client.destroy(); + client.close(); serverClose.dec(); }, items.length); @@ -42,7 +42,7 @@ const URL = url.URL; // Will fail because protocol does not match the server. h2.connect({ port: port, protocol: 'https:' }) - .on('socketError', common.mustCall(() => serverClose.dec())); + .on('error', common.mustCall(() => serverClose.dec())); })); } @@ -55,10 +55,8 @@ const URL = url.URL; }; const server = h2.createSecureServer(options); - server.listen(0); - - server.on('listening', common.mustCall(function() { - const port = this.address().port; + server.listen(0, common.mustCall(() => { + const port = server.address().port; const opts = { rejectUnauthorized: false }; @@ -74,7 +72,7 @@ const URL = url.URL; () => setImmediate(() => server.close())); const maybeClose = common.mustCall((client) => { - client.destroy(); + client.close(); serverClose.dec(); }, items.length); diff --git a/test/parallel/test-http2-create-client-secure-session.js b/test/parallel/test-http2-create-client-secure-session.js index 811ef772d5903a..b0111e15b69c15 100644 --- a/test/parallel/test-http2-create-client-secure-session.js +++ b/test/parallel/test-http2-create-client-secure-session.js @@ -19,6 +19,14 @@ function loadKey(keyname) { function onStream(stream, headers) { const socket = stream.session[kSocket]; + + assert(stream.session.encrypted); + assert(stream.session.alpnProtocol, 'h2'); + const originSet = stream.session.originSet; + assert(Array.isArray(originSet)); + assert.strictEqual(originSet[0], + `https://${socket.servername}:${socket.remotePort}`); + assert(headers[':authority'].startsWith(socket.servername)); stream.respond({ 'content-type': 'application/json' }); stream.end(JSON.stringify({ @@ -30,6 +38,7 @@ function onStream(stream, headers) { function verifySecureSession(key, cert, ca, opts) { const server = h2.createSecureServer({ cert, key }); server.on('stream', common.mustCall(onStream)); + server.on('close', common.mustCall()); server.listen(0, common.mustCall(() => { opts = opts || { }; opts.secureContext = tls.createSecureContext({ ca }); @@ -39,6 +48,17 @@ function verifySecureSession(key, cert, ca, opts) { assert.strictEqual(client.socket.listenerCount('secureConnect'), 1); const req = client.request(); + client.on('connect', common.mustCall(() => { + assert(client.encrypted); + assert.strictEqual(client.alpnProtocol, 'h2'); + const originSet = client.originSet; + assert(Array.isArray(originSet)); + assert.strictEqual(originSet.length, 1); + assert.strictEqual( + originSet[0], + `https://${opts.servername || 'localhost'}:${server.address().port}`); + })); + req.on('response', common.mustCall((headers) => { assert.strictEqual(headers[':status'], 200); assert.strictEqual(headers['content-type'], 'application/json'); @@ -53,7 +73,7 @@ function verifySecureSession(key, cert, ca, opts) { assert.strictEqual(jsonData.servername, opts.servername || 'localhost'); assert.strictEqual(jsonData.alpnProtocol, 'h2'); - server.close(); + server.close(common.mustCall()); client[kSocket].destroy(); })); })); diff --git a/test/parallel/test-http2-create-client-session.js b/test/parallel/test-http2-create-client-session.js index 149b5164231a21..963db2faa173b7 100644 --- a/test/parallel/test-http2-create-client-session.js +++ b/test/parallel/test-http2-create-client-session.js @@ -5,6 +5,8 @@ if (!common.hasCrypto) common.skip('missing crypto'); const assert = require('assert'); const h2 = require('http2'); +const Countdown = require('../common/countdown'); + const body = '

this is some data

'; @@ -23,21 +25,34 @@ function onStream(stream, headers, flags) { 'content-type': 'text/html', ':status': 200 }); - stream.end(body); + stream.write(body.slice(0, 20)); + stream.end(body.slice(20)); } +server.on('close', common.mustCall()); + server.listen(0); -let expected = count; +server.on('listening', common.mustCall(() => { -server.on('listening', common.mustCall(function() { + const client = h2.connect(`http://localhost:${server.address().port}`); + client.setMaxListeners(101); - const client = h2.connect(`http://localhost:${this.address().port}`); + client.on('goaway', console.log); - const headers = { ':path': '/' }; + client.on('connect', common.mustCall(() => { + assert(!client.encrypted); + assert(!client.originSet); + assert.strictEqual(client.alpnProtocol, 'h2c'); + })); + + const countdown = new Countdown(count, () => { + client.close(); + server.close(common.mustCall()); + }); for (let n = 0; n < count; n++) { - const req = client.request(headers); + const req = client.request(); req.on('response', common.mustCall(function(headers) { assert.strictEqual(headers[':status'], 200, 'status code is set'); @@ -51,12 +66,7 @@ server.on('listening', common.mustCall(function() { req.on('data', (d) => data += d); req.on('end', common.mustCall(() => { assert.strictEqual(body, data); - if (--expected === 0) { - server.close(); - client.destroy(); - } })); - req.end(); + req.on('close', common.mustCall(() => countdown.dec())); } - })); diff --git a/test/parallel/test-http2-createsecureserver-nooptions.js b/test/parallel/test-http2-createsecureserver-nooptions.js index 05029cba2bb638..58e1600790041e 100644 --- a/test/parallel/test-http2-createsecureserver-nooptions.js +++ b/test/parallel/test-http2-createsecureserver-nooptions.js @@ -6,7 +6,7 @@ if (!common.hasCrypto) const http2 = require('http2'); -const invalidOptions = [() => {}, 1, 'test', null, undefined]; +const invalidOptions = [() => {}, 1, 'test', null]; const invalidArgTypeError = { type: TypeError, code: 'ERR_INVALID_ARG_TYPE', @@ -14,9 +14,9 @@ const invalidArgTypeError = { }; // Error if options are not passed to createSecureServer -invalidOptions.forEach((invalidOption) => +invalidOptions.forEach((invalidOption) => { common.expectsError( () => http2.createSecureServer(invalidOption), invalidArgTypeError - ) -); + ); +}); diff --git a/test/parallel/test-http2-createwritereq.js b/test/parallel/test-http2-createwritereq.js index ca394a5d425470..1575424d1609b4 100644 --- a/test/parallel/test-http2-createwritereq.js +++ b/test/parallel/test-http2-createwritereq.js @@ -1,5 +1,7 @@ 'use strict'; +// Flags: --expose-gc + const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); @@ -54,14 +56,23 @@ server.listen(0, common.mustCall(function() { req.resume(); req.on('end', common.mustCall(function() { - client.destroy(); + client.close(); testsFinished++; if (testsFinished === testsToRun) { - server.close(); + server.close(common.mustCall()); } })); + // Ref: https://github.com/nodejs/node/issues/17840 + const origDestroy = req.destroy; + req.destroy = function(...args) { + // Schedule a garbage collection event at the end of the current + // MakeCallback() run. + process.nextTick(global.gc); + return origDestroy.call(this, ...args); + }; + req.end(); }); })); diff --git a/test/parallel/test-http2-date-header.js b/test/parallel/test-http2-date-header.js index ab0654e64cbcd7..2b63e1b7899a2e 100644 --- a/test/parallel/test-http2-date-header.js +++ b/test/parallel/test-http2-date-header.js @@ -24,6 +24,6 @@ server.listen(0, common.mustCall(() => { req.resume(); req.on('end', common.mustCall(() => { server.close(); - client.destroy(); + client.close(); })); })); diff --git a/test/parallel/test-http2-dont-lose-data.js b/test/parallel/test-http2-dont-lose-data.js new file mode 100644 index 00000000000000..eb85277b7b124c --- /dev/null +++ b/test/parallel/test-http2-dont-lose-data.js @@ -0,0 +1,58 @@ +'use strict'; + +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); +const assert = require('assert'); +const http2 = require('http2'); + +const server = http2.createServer(); + +server.on('stream', (s) => { + assert(s.pushAllowed); + + s.pushStream({ ':path': '/file' }, common.mustCall((err, pushStream) => { + assert.ifError(err); + pushStream.respond(); + pushStream.end('a push stream'); + })); + + s.respond(); + s.end('hello world'); +}); + +server.listen(0, () => { + server.unref(); + + const url = `http://localhost:${server.address().port}`; + + const client = http2.connect(url); + const req = client.request(); + + let pushStream; + + client.on('stream', common.mustCall((s, headers) => { + assert.strictEqual(headers[':path'], '/file'); + pushStream = s; + })); + + req.on('response', common.mustCall((headers) => { + let pushData = ''; + pushStream.setEncoding('utf8'); + pushStream.on('data', (d) => pushData += d); + pushStream.on('end', common.mustCall(() => { + assert.strictEqual(pushData, 'a push stream'); + + // removing the setImmediate causes the test to pass + setImmediate(function() { + let data = ''; + req.setEncoding('utf8'); + req.on('data', (d) => data += d); + req.on('end', common.mustCall(() => { + assert.strictEqual(data, 'hello world'); + client.close(); + })); + }); + })); + })); +}); diff --git a/test/parallel/test-http2-dont-override.js b/test/parallel/test-http2-dont-override.js index cc60a2fe802a82..b45713deb3ca60 100644 --- a/test/parallel/test-http2-dont-override.js +++ b/test/parallel/test-http2-dont-override.js @@ -44,6 +44,6 @@ server.listen(0, common.mustCall(() => { req.resume(); req.on('end', common.mustCall(() => { server.close(); - client.destroy(); + client.close(); })); })); diff --git a/test/parallel/test-http2-generic-streams-sendfile.js b/test/parallel/test-http2-generic-streams-sendfile.js index 1054574a8b1ca2..b752b0fdcb815a 100644 --- a/test/parallel/test-http2-generic-streams-sendfile.js +++ b/test/parallel/test-http2-generic-streams-sendfile.js @@ -20,7 +20,7 @@ const makeDuplexPair = require('../common/duplexpair'); createConnection: common.mustCall(() => clientSide) }); - const req = client.request({ ':path': '/' }); + const req = client.request(); req.on('response', common.mustCall((headers) => { assert.strictEqual(headers[':status'], 200); @@ -28,9 +28,7 @@ const makeDuplexPair = require('../common/duplexpair'); req.setEncoding('utf8'); let data = ''; - req.on('data', (chunk) => { - data += chunk; - }); + req.on('data', (chunk) => data += chunk); req.on('end', common.mustCall(() => { assert.strictEqual(data, fs.readFileSync(__filename, 'utf8')); clientSide.destroy(); diff --git a/test/parallel/test-http2-goaway-opaquedata.js b/test/parallel/test-http2-goaway-opaquedata.js index d8895a82edf464..3f1fb4d7954414 100644 --- a/test/parallel/test-http2-goaway-opaquedata.js +++ b/test/parallel/test-http2-goaway-opaquedata.js @@ -10,32 +10,23 @@ const server = http2.createServer(); const data = Buffer.from([0x1, 0x2, 0x3, 0x4, 0x5]); server.on('stream', common.mustCall((stream) => { - stream.session.shutdown({ - errorCode: 1, - opaqueData: data - }); + stream.session.goaway(0, 0, data); + stream.respond(); stream.end(); - stream.on('error', common.mustCall(common.expectsError({ - code: 'ERR_HTTP2_STREAM_ERROR', - type: Error, - message: 'Stream closed with error code 7' - }))); })); server.listen(0, () => { const client = http2.connect(`http://localhost:${server.address().port}`); - client.on('goaway', common.mustCall((code, lastStreamID, buf) => { - assert.deepStrictEqual(code, 1); - assert.deepStrictEqual(lastStreamID, 0); + client.once('goaway', common.mustCall((code, lastStreamID, buf) => { + assert.deepStrictEqual(code, 0); + assert.deepStrictEqual(lastStreamID, 1); assert.deepStrictEqual(data, buf); - // Call shutdown() here so that emitGoaway calls destroy() - client.shutdown(); server.close(); })); - const req = client.request({ ':path': '/' }); + const req = client.request(); req.resume(); req.on('end', common.mustCall()); + req.on('close', common.mustCall()); req.end(); - }); diff --git a/test/parallel/test-http2-head-request.js b/test/parallel/test-http2-head-request.js index 8c91132b5fdeeb..f780394f3d6289 100644 --- a/test/parallel/test-http2-head-request.js +++ b/test/parallel/test-http2-head-request.js @@ -53,6 +53,6 @@ server.listen(0, () => { req.on('data', common.mustNotCall()); req.on('end', common.mustCall(() => { server.close(); - client.destroy(); + client.close(); })); }); diff --git a/test/parallel/test-http2-https-fallback.js b/test/parallel/test-http2-https-fallback.js index 04e9ca480c9099..a872d686d34f85 100644 --- a/test/parallel/test-http2-https-fallback.js +++ b/test/parallel/test-http2-https-fallback.js @@ -6,7 +6,7 @@ const fixtures = require('../common/fixtures'); if (!common.hasCrypto) common.skip('missing crypto'); -const { strictEqual } = require('assert'); +const { strictEqual, ok } = require('assert'); const { createSecureContext } = require('tls'); const { createSecureServer, connect } = require('http2'); const { get } = require('https'); @@ -31,7 +31,7 @@ function onRequest(request, response) { })); } -function onSession(session) { +function onSession(session, next) { const headers = { ':path': '/', ':method': 'GET', @@ -52,8 +52,12 @@ function onSession(session) { strictEqual(alpnProtocol, 'h2'); strictEqual(httpVersion, '2.0'); - session.destroy(); + session.close(); this.cleanup(); + + if (typeof next === 'function') { + next(); + } })); request.end(); } @@ -126,15 +130,31 @@ function onSession(session) { connect( origin, clientOptions, - common.mustCall(onSession.bind({ cleanup, server })) + common.mustCall(function(session) { + onSession.call({ cleanup, server }, + session, + common.mustCall(testNoTls)); + }) ); - // HTTP/1.1 client - get(Object.assign(parse(origin), clientOptions), common.mustNotCall()) - .on('error', common.mustCall(cleanup)); - - // Incompatible ALPN TLS client - tls(Object.assign({ port, ALPNProtocols: ['fake'] }, clientOptions)) - .on('error', common.mustCall(cleanup)); + function testNoTls() { + // HTTP/1.1 client + get(Object.assign(parse(origin), clientOptions), common.mustNotCall) + .on('error', common.mustCall(cleanup)) + .on('error', common.mustCall(testWrongALPN)) + .end(); + } + + function testWrongALPN() { + // Incompatible ALPN TLS client + let text = ''; + tls(Object.assign({ port, ALPNProtocols: ['fake'] }, clientOptions)) + .setEncoding('utf8') + .on('data', (chunk) => text += chunk) + .on('end', common.mustCall(() => { + ok(/Unknown ALPN Protocol, expected `h2` to be available/.test(text)); + cleanup(); + })); + } })); } diff --git a/test/parallel/test-http2-info-headers-errors.js b/test/parallel/test-http2-info-headers-errors.js index 83f85b279d5c6e..555b22242664ae 100644 --- a/test/parallel/test-http2-info-headers-errors.js +++ b/test/parallel/test-http2-info-headers-errors.js @@ -49,10 +49,6 @@ server.on('stream', common.mustCall((stream, headers) => { if (currentError.type === 'stream') { stream.session.on('error', errorMustNotCall); stream.on('error', errorMustCall); - stream.on('error', common.mustCall(() => { - stream.respond(); - stream.end(); - })); } else { stream.session.once('error', errorMustCall); stream.on('error', errorMustNotCall); @@ -64,24 +60,21 @@ server.on('stream', common.mustCall((stream, headers) => { server.listen(0, common.mustCall(() => runTest(tests.shift()))); function runTest(test) { - const port = server.address().port; - const url = `http://localhost:${port}`; - const headers = { - ':path': '/', - ':method': 'POST', - ':scheme': 'http', - ':authority': `localhost:${port}` - }; - - const client = http2.connect(url); - const req = client.request(headers); + const client = http2.connect(`http://localhost:${server.address().port}`); + const req = client.request({ ':method': 'POST' }); currentError = test; req.resume(); req.end(); - req.on('end', common.mustCall(() => { - client.destroy(); + req.on('error', common.expectsError({ + code: 'ERR_HTTP2_STREAM_ERROR', + type: Error, + message: 'Stream closed with error code 2' + })); + + req.on('close', common.mustCall(() => { + client.close(); if (!tests.length) { server.close(); diff --git a/test/parallel/test-http2-info-headers.js b/test/parallel/test-http2-info-headers.js index 609f56e8b8566c..a71a3121b53c26 100644 --- a/test/parallel/test-http2-info-headers.js +++ b/test/parallel/test-http2-info-headers.js @@ -88,7 +88,7 @@ server.on('listening', common.mustCall(() => { req.on('end', common.mustCall(() => { server.close(); - client.destroy(); + client.close(); })); req.end(); diff --git a/test/parallel/test-http2-invalidargtypes-errors.js b/test/parallel/test-http2-invalidargtypes-errors.js index 3471e46fdf4ca4..ff189a2977559f 100644 --- a/test/parallel/test-http2-invalidargtypes-errors.js +++ b/test/parallel/test-http2-invalidargtypes-errors.js @@ -7,29 +7,25 @@ const http2 = require('http2'); const server = http2.createServer(); -server.on( - 'stream', - common.mustCall((stream) => { - const invalidArgTypeError = (param, type) => ({ - type: TypeError, +server.on('stream', common.mustCall((stream) => { + common.expectsError( + () => stream.close('string'), + { code: 'ERR_INVALID_ARG_TYPE', - message: `The "${param}" argument must be of type ${type}` - }); - common.expectsError( - () => stream.rstStream('string'), - invalidArgTypeError('code', 'number') - ); - stream.session.destroy(); - }) -); + type: TypeError, + message: 'The "code" argument must be of type number' + } + ); + stream.respond(); + stream.end('ok'); +})); -server.listen( - 0, - common.mustCall(() => { - const client = http2.connect(`http://localhost:${server.address().port}`); - const req = client.request(); - req.resume(); - req.on('end', common.mustCall(() => server.close())); - req.end(); - }) -); +server.listen(0, common.mustCall(() => { + const client = http2.connect(`http://localhost:${server.address().port}`); + const req = client.request(); + req.resume(); + req.on('close', common.mustCall(() => { + server.close(); + client.close(); + })); +})); diff --git a/test/parallel/test-http2-max-concurrent-streams.js b/test/parallel/test-http2-max-concurrent-streams.js index a65ac90c535b03..ffc04e98f134b2 100644 --- a/test/parallel/test-http2-max-concurrent-streams.js +++ b/test/parallel/test-http2-max-concurrent-streams.js @@ -5,64 +5,52 @@ if (!common.hasCrypto) common.skip('missing crypto'); const assert = require('assert'); const h2 = require('http2'); - -const { - HTTP2_HEADER_METHOD, - HTTP2_HEADER_STATUS, - HTTP2_HEADER_PATH, - HTTP2_METHOD_POST -} = h2.constants; +const Countdown = require('../common/countdown'); // Only allow one stream to be open at a time const server = h2.createServer({ settings: { maxConcurrentStreams: 1 } }); // The stream handler must be called only once server.on('stream', common.mustCall((stream) => { - stream.respond({ [HTTP2_HEADER_STATUS]: 200 }); + stream.respond(); stream.end('hello world'); })); -server.listen(0); - -server.on('listening', common.mustCall(() => { +server.listen(0, common.mustCall(() => { const client = h2.connect(`http://localhost:${server.address().port}`); - let reqs = 2; - function onEnd() { - if (--reqs === 0) { - server.close(); - client.destroy(); - } - } + const countdown = new Countdown(2, () => { + server.close(); + client.close(); + }); client.on('remoteSettings', common.mustCall((settings) => { assert.strictEqual(settings.maxConcurrentStreams, 1); })); // This one should go through with no problems - const req1 = client.request({ - [HTTP2_HEADER_PATH]: '/', - [HTTP2_HEADER_METHOD]: HTTP2_METHOD_POST - }); - req1.on('aborted', common.mustNotCall()); - req1.on('response', common.mustCall()); - req1.resume(); - req1.on('end', onEnd); - req1.end(); - - // This one should be aborted - const req2 = client.request({ - [HTTP2_HEADER_PATH]: '/', - [HTTP2_HEADER_METHOD]: HTTP2_METHOD_POST - }); - req2.on('aborted', common.mustCall()); - req2.on('response', common.mustNotCall()); - req2.resume(); - req2.on('end', onEnd); - req2.on('error', common.mustCall(common.expectsError({ - code: 'ERR_HTTP2_STREAM_ERROR', - type: Error, - message: 'Stream closed with error code 7' - }))); + { + const req = client.request({ ':method': 'POST' }); + req.on('aborted', common.mustNotCall()); + req.on('response', common.mustCall()); + req.resume(); + req.on('end', common.mustCall()); + req.on('close', common.mustCall(() => countdown.dec())); + req.end(); + } + { + // This one should be aborted + const req = client.request({ ':method': 'POST' }); + req.on('aborted', common.mustCall()); + req.on('response', common.mustNotCall()); + req.resume(); + req.on('end', common.mustCall()); + req.on('close', common.mustCall(() => countdown.dec())); + req.on('error', common.expectsError({ + code: 'ERR_HTTP2_STREAM_ERROR', + type: Error, + message: 'Stream closed with error code 7' + })); + } })); diff --git a/test/parallel/test-http2-methods.js b/test/parallel/test-http2-methods.js index 36f64f13abcf86..a291bdf00800d5 100644 --- a/test/parallel/test-http2-methods.js +++ b/test/parallel/test-http2-methods.js @@ -41,7 +41,7 @@ server.on('listening', common.mustCall(() => { req.on('end', common.mustCall(() => { if (--expected === 0) { server.close(); - client.destroy(); + client.close(); } })); req.end(); diff --git a/test/parallel/test-http2-misbehaving-flow-control-paused.js b/test/parallel/test-http2-misbehaving-flow-control-paused.js index ee799b1d5a27d3..d69e0fd802979a 100644 --- a/test/parallel/test-http2-misbehaving-flow-control-paused.js +++ b/test/parallel/test-http2-misbehaving-flow-control-paused.js @@ -56,32 +56,27 @@ let client; const server = h2.createServer({ settings: { initialWindowSize: 36 } }); server.on('stream', (stream) => { - - // Not reading causes the flow control window to get backed up. + // Set the high water mark to zero, since otherwise we still accept + // reads from the source stream (if we can consume them). + stream._readableState.highWaterMark = 0; stream.pause(); - - stream.on('error', common.mustCall((err) => { - common.expectsError({ - code: 'ERR_HTTP2_STREAM_ERROR', - type: Error, - message: 'Stream closed with error code 3' - })(err); + stream.on('error', common.expectsError({ + code: 'ERR_HTTP2_STREAM_ERROR', + type: Error, + message: 'Stream closed with error code 3' + })); + stream.on('close', common.mustCall(() => { server.close(); client.destroy(); })); - stream.on('end', common.mustNotCall()); - stream.respond(); stream.end('ok'); }); server.listen(0, () => { client = net.connect(server.address().port, () => { - client.on('error', console.log); - client.write(preamble); - client.write(data); client.write(data); client.write(data); diff --git a/test/parallel/test-http2-misbehaving-flow-control.js b/test/parallel/test-http2-misbehaving-flow-control.js index 010e07741316b6..161a88ea1fb407 100644 --- a/test/parallel/test-http2-misbehaving-flow-control.js +++ b/test/parallel/test-http2-misbehaving-flow-control.js @@ -29,6 +29,21 @@ const preamble = Buffer.from([ ]); const data = Buffer.from([ + 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x0a, 0x68, 0x65, 0x6c, + 0x6c, 0x6f, 0x0a, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x0a, + 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x0a, 0x68, 0x65, 0x6c, + 0x6c, 0x6f, 0x0a, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x0a, + 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x0a, 0x68, 0x65, 0x6c, + 0x6c, 0x6f, 0x0a, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x0a, + 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x0a, 0x68, 0x65, 0x6c, + 0x6c, 0x6f, 0x0a, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x0a, + 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x0a, 0x68, 0x65, 0x6c, + 0x6c, 0x6f, 0x0a, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x0a, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x0a, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x0a, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x0a @@ -51,30 +66,25 @@ const data = Buffer.from([ let client; const server = h2.createServer({ settings: { initialWindowSize: 18 } }); server.on('stream', (stream) => { - - stream.resume(); - - stream.on('error', common.mustCall((err) => { - common.expectsError({ - code: 'ERR_HTTP2_STREAM_ERROR', - type: Error, - message: 'Stream closed with error code 3' - })(err); - server.close(); + stream.on('error', common.expectsError({ + code: 'ERR_HTTP2_STREAM_ERROR', + type: Error, + message: 'Stream closed with error code 3' + })); + stream.on('close', common.mustCall(() => { + server.close(common.mustCall()); client.destroy(); })); - + stream.resume(); stream.respond(); stream.end('ok'); }); +server.on('close', common.mustCall()); + server.listen(0, () => { client = net.connect(server.address().port, () => { - client.on('error', console.log); - client.write(preamble); - - client.write(data); client.write(data); client.write(data); }); diff --git a/test/parallel/test-http2-misbehaving-multiplex.js b/test/parallel/test-http2-misbehaving-multiplex.js new file mode 100644 index 00000000000000..7d5a7a2f552d49 --- /dev/null +++ b/test/parallel/test-http2-misbehaving-multiplex.js @@ -0,0 +1,59 @@ +'use strict'; + +const common = require('../common'); + +if (!common.hasCrypto) + common.skip('missing crypto'); + +const h2 = require('http2'); +const net = require('net'); +const h2test = require('../common/http2'); +let client; + +const server = h2.createServer(); +server.on('stream', common.mustCall((stream) => { + stream.respond(); + stream.end('ok'); +}, 2)); +server.on('session', common.mustCall((session) => { + session.on('error', common.expectsError({ + code: 'ERR_HTTP2_ERROR', + type: Error, + message: 'Stream was already closed or invalid' + })); +})); + +const settings = new h2test.SettingsFrame(); +const settingsAck = new h2test.SettingsFrame(true); +const head1 = new h2test.HeadersFrame(1, h2test.kFakeRequestHeaders, 0, true); +const head2 = new h2test.HeadersFrame(3, h2test.kFakeRequestHeaders, 0, true); +const head3 = new h2test.HeadersFrame(1, h2test.kFakeRequestHeaders, 0, true); +const head4 = new h2test.HeadersFrame(5, h2test.kFakeRequestHeaders, 0, true); + +server.listen(0, () => { + client = net.connect(server.address().port, () => { + client.write(h2test.kClientMagic, () => { + client.write(settings.data, () => { + client.write(settingsAck.data); + // This will make it ok. + client.write(head1.data, () => { + // This will make it ok. + client.write(head2.data, () => { + // This will cause an error to occur because the client is + // attempting to reuse an already closed stream. This must + // cause the server session to be torn down. + client.write(head3.data, () => { + // This won't ever make it to the server + client.write(head4.data); + }); + }); + }); + }); + }); + }); + + // An error may or may not be emitted on the client side, we don't care + // either way if it is, but we don't want to die if it is. + client.on('error', () => {}); + client.on('close', common.mustCall(() => server.close())); +}); diff --git a/test/parallel/test-http2-misused-pseudoheaders.js b/test/parallel/test-http2-misused-pseudoheaders.js index 2ccb676b2199e5..fc53d01a2f6bb0 100644 --- a/test/parallel/test-http2-misused-pseudoheaders.js +++ b/test/parallel/test-http2-misused-pseudoheaders.js @@ -8,11 +8,7 @@ const h2 = require('http2'); const server = h2.createServer(); -// we use the lower-level API here -server.on('stream', common.mustCall(onStream)); - -function onStream(stream, headers, flags) { - +server.on('stream', common.mustCall((stream) => { [ ':path', ':authority', @@ -25,10 +21,7 @@ function onStream(stream, headers, flags) { })); }); - stream.respond({ - 'content-type': 'text/html', - ':status': 200 - }, { + stream.respond({}, { getTrailers: common.mustCall((trailers) => { trailers[':status'] = 'bar'; }) @@ -39,22 +32,24 @@ function onStream(stream, headers, flags) { })); stream.end('hello world'); -} - -server.listen(0); +})); -server.on('listening', common.mustCall(() => { +server.listen(0, common.mustCall(() => { const client = h2.connect(`http://localhost:${server.address().port}`); + const req = client.request(); - const req = client.request({ ':path': '/' }); + req.on('error', common.expectsError({ + code: 'ERR_HTTP2_STREAM_ERROR', + type: Error, + message: 'Stream closed with error code 2' + })); req.on('response', common.mustCall()); req.resume(); - req.on('end', common.mustCall(() => { + req.on('end', common.mustCall()); + req.on('close', common.mustCall(() => { server.close(); - client.destroy(); + client.close(); })); - req.end(); - })); diff --git a/test/parallel/test-http2-multi-content-length.js b/test/parallel/test-http2-multi-content-length.js index d0f0094d2408aa..4d18356f127da0 100644 --- a/test/parallel/test-http2-multi-content-length.js +++ b/test/parallel/test-http2-multi-content-length.js @@ -4,6 +4,7 @@ const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); const http2 = require('http2'); +const Countdown = require('../common/countdown'); const server = http2.createServer(); @@ -15,29 +16,25 @@ server.on('stream', common.mustCall((stream) => { server.listen(0, common.mustCall(() => { const client = http2.connect(`http://localhost:${server.address().port}`); - let remaining = 3; - function maybeClose() { - if (--remaining === 0) { - server.close(); - client.destroy(); - } - } + const countdown = new Countdown(2, () => { + server.close(); + client.close(); + }); - { - // Request 1 will fail because there are two content-length header values - const req = client.request({ - ':method': 'POST', - 'content-length': 1, - 'Content-Length': 2 - }); - req.on('error', common.expectsError({ + // Request 1 will fail because there are two content-length header values + common.expectsError( + () => { + client.request({ + ':method': 'POST', + 'content-length': 1, + 'Content-Length': 2 + }); + }, { code: 'ERR_HTTP2_HEADER_SINGLE_VALUE', type: Error, message: 'Header field "content-length" must have only a single value' - })); - req.on('error', common.mustCall(maybeClose)); - req.end('a'); - } + } + ); { // Request 2 will succeed @@ -46,7 +43,8 @@ server.listen(0, common.mustCall(() => { 'content-length': 1 }); req.resume(); - req.on('end', common.mustCall(maybeClose)); + req.on('end', common.mustCall()); + req.on('close', common.mustCall(() => countdown.dec())); req.end('a'); } @@ -55,7 +53,8 @@ server.listen(0, common.mustCall(() => { // header to be set for non-payload bearing requests... const req = client.request({ 'content-length': 1 }); req.resume(); - req.on('end', common.mustCall(maybeClose)); + req.on('end', common.mustCall()); + req.on('close', common.mustCall(() => countdown.dec())); req.on('error', common.expectsError({ code: 'ERR_HTTP2_STREAM_ERROR', type: Error, diff --git a/test/parallel/test-http2-multiheaders-raw.js b/test/parallel/test-http2-multiheaders-raw.js index c06bf23bff3071..50486450d5aeb7 100644 --- a/test/parallel/test-http2-multiheaders-raw.js +++ b/test/parallel/test-http2-multiheaders-raw.js @@ -44,6 +44,6 @@ server.listen(0, common.mustCall(() => { const req = client.request(src); req.on('close', common.mustCall(() => { server.close(); - client.destroy(); + client.close(); })); })); diff --git a/test/parallel/test-http2-multiheaders.js b/test/parallel/test-http2-multiheaders.js index 5e477104091cb1..9bf8f76d22e60e 100644 --- a/test/parallel/test-http2-multiheaders.js +++ b/test/parallel/test-http2-multiheaders.js @@ -56,6 +56,6 @@ server.listen(0, common.mustCall(() => { req.on('response', common.mustCall(checkHeaders)); req.on('close', common.mustCall(() => { server.close(); - client.destroy(); + client.close(); })); })); diff --git a/test/parallel/test-http2-multiplex.js b/test/parallel/test-http2-multiplex.js index c818a28572eca7..1778bced5f92f4 100644 --- a/test/parallel/test-http2-multiplex.js +++ b/test/parallel/test-http2-multiplex.js @@ -8,6 +8,7 @@ if (!common.hasCrypto) common.skip('missing crypto'); const assert = require('assert'); const http2 = require('http2'); +const Countdown = require('../common/countdown'); const server = http2.createServer(); @@ -20,15 +21,12 @@ server.on('stream', common.mustCall((stream) => { server.listen(0, common.mustCall(() => { const client = http2.connect(`http://localhost:${server.address().port}`); + client.setMaxListeners(100); - let remaining = count; - - function maybeClose() { - if (--remaining === 0) { - server.close(); - client.destroy(); - } - } + const countdown = new Countdown(count, () => { + server.close(); + client.close(); + }); function doRequest() { const req = client.request({ ':method': 'POST ' }); @@ -38,8 +36,8 @@ server.listen(0, common.mustCall(() => { req.on('data', (chunk) => data += chunk); req.on('end', common.mustCall(() => { assert.strictEqual(data, 'abcdefghij'); - maybeClose(); })); + req.on('close', common.mustCall(() => countdown.dec())); let n = 0; function writeChunk() { diff --git a/test/parallel/test-http2-no-more-streams.js b/test/parallel/test-http2-no-more-streams.js index 6f4169756c0b4a..dd06a709f23023 100644 --- a/test/parallel/test-http2-no-more-streams.js +++ b/test/parallel/test-http2-no-more-streams.js @@ -25,7 +25,7 @@ server.listen(0, common.mustCall(() => { const countdown = new Countdown(2, common.mustCall(() => { server.close(); - client.destroy(); + client.close(); })); { diff --git a/test/parallel/test-http2-options-max-headers-block-length.js b/test/parallel/test-http2-options-max-headers-block-length.js index 402803dd33a73a..a728c28c6576d4 100644 --- a/test/parallel/test-http2-options-max-headers-block-length.js +++ b/test/parallel/test-http2-options-max-headers-block-length.js @@ -10,9 +10,7 @@ const server = h2.createServer(); // we use the lower-level API here server.on('stream', common.mustNotCall()); -server.listen(0); - -server.on('listening', common.mustCall(() => { +server.listen(0, common.mustCall(() => { // Setting the maxSendHeaderBlockLength, then attempting to send a // headers block that is too big should cause a 'frameError' to @@ -24,13 +22,13 @@ server.on('listening', common.mustCall(() => { const client = h2.connect(`http://localhost:${server.address().port}`, options); - const req = client.request({ ':path': '/' }); - + const req = client.request(); req.on('response', common.mustNotCall()); req.resume(); - req.on('end', common.mustCall(() => { - client.destroy(); + req.on('close', common.mustCall(() => { + client.close(); + server.close(); })); req.on('frameError', common.mustCall((type, code) => { @@ -42,33 +40,4 @@ server.on('listening', common.mustCall(() => { type: Error, message: 'Stream closed with error code 7' })); - - req.end(); - - // if no frameError listener, should emit 'error' with - // code ERR_HTTP2_FRAME_ERROR - const req2 = client.request({ ':path': '/' }); - - req2.on('response', common.mustNotCall()); - - req2.resume(); - req2.on('end', common.mustCall(() => { - server.close(); - client.destroy(); - })); - - req2.once('error', common.mustCall((err) => { - common.expectsError({ - code: 'ERR_HTTP2_FRAME_ERROR', - type: Error - })(err); - req2.on('error', common.expectsError({ - code: 'ERR_HTTP2_STREAM_ERROR', - type: Error, - message: 'Stream closed with error code 7' - })); - })); - - req2.end(); - })); diff --git a/test/parallel/test-http2-options-max-reserved-streams.js b/test/parallel/test-http2-options-max-reserved-streams.js index d54ca6a7886b3c..994a8817451686 100644 --- a/test/parallel/test-http2-options-max-reserved-streams.js +++ b/test/parallel/test-http2-options-max-reserved-streams.js @@ -5,20 +5,24 @@ if (!common.hasCrypto) common.skip('missing crypto'); const assert = require('assert'); const h2 = require('http2'); +const Countdown = require('../common/countdown'); const server = h2.createServer(); +let client; + +const countdown = new Countdown(3, () => { + server.close(); + client.close(); +}); // we use the lower-level API here server.on('stream', common.mustCall((stream) => { - stream.respond({ ':status': 200 }); - // The first pushStream will complete as normal stream.pushStream({ - ':scheme': 'http', ':path': '/foobar', - ':authority': `localhost:${server.address().port}`, - }, common.mustCall((pushedStream) => { - pushedStream.respond({ ':status': 200 }); + }, common.mustCall((err, pushedStream) => { + assert.ifError(err); + pushedStream.respond(); pushedStream.end(); pushedStream.on('aborted', common.mustNotCall()); })); @@ -27,52 +31,41 @@ server.on('stream', common.mustCall((stream) => { // will reject it due to the maxReservedRemoteStreams option // being set to only 1 stream.pushStream({ - ':scheme': 'http', ':path': '/foobar', - ':authority': `localhost:${server.address().port}`, - }, common.mustCall((pushedStream) => { - pushedStream.respond({ ':status': 200 }); + }, common.mustCall((err, pushedStream) => { + assert.ifError(err); + pushedStream.respond(); pushedStream.on('aborted', common.mustCall()); pushedStream.on('error', common.mustNotCall()); - pushedStream.on('close', - common.mustCall((code) => assert.strictEqual(code, 8))); + pushedStream.on('close', common.mustCall((code) => { + assert.strictEqual(code, 8); + countdown.dec(); + })); })); + stream.respond(); stream.end('hello world'); })); server.listen(0); server.on('listening', common.mustCall(() => { + client = h2.connect(`http://localhost:${server.address().port}`, + { maxReservedRemoteStreams: 1 }); - const options = { - maxReservedRemoteStreams: 1 - }; - - const client = h2.connect(`http://localhost:${server.address().port}`, - options); - - let remaining = 2; - function maybeClose() { - if (--remaining === 0) { - server.close(); - client.destroy(); - } - } - - const req = client.request({ ':path': '/' }); + const req = client.request(); // Because maxReservedRemoteStream is 1, the stream event // must only be emitted once, even tho the server sends // two push streams. client.on('stream', common.mustCall((stream) => { stream.resume(); + stream.on('push', common.mustCall()); stream.on('end', common.mustCall()); - stream.on('close', common.mustCall(maybeClose)); + stream.on('close', common.mustCall(() => countdown.dec())); })); req.on('response', common.mustCall()); - req.resume(); req.on('end', common.mustCall()); - req.on('close', common.mustCall(maybeClose)); + req.on('close', common.mustCall(() => countdown.dec())); })); diff --git a/test/parallel/test-http2-padding-aligned.js b/test/parallel/test-http2-padding-aligned.js new file mode 100644 index 00000000000000..183eaef7389360 --- /dev/null +++ b/test/parallel/test-http2-padding-aligned.js @@ -0,0 +1,68 @@ +'use strict'; + +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); +const assert = require('assert'); +const http2 = require('http2'); +const { PADDING_STRATEGY_ALIGNED } = http2.constants; +const makeDuplexPair = require('../common/duplexpair'); + +{ + const testData = '

Hello World.

'; + const server = http2.createServer({ + paddingStrategy: PADDING_STRATEGY_ALIGNED + }); + server.on('stream', common.mustCall((stream, headers) => { + stream.respond({ + 'content-type': 'text/html', + ':status': 200 + }); + stream.end(testData); + })); + + const { clientSide, serverSide } = makeDuplexPair(); + + // The lengths of the expected writes... note that this is highly + // sensitive to how the internals are implemented. + const serverLengths = [24, 9, 9, 32]; + const clientLengths = [9, 9, 48, 9, 1, 21, 1, 16]; + + // Adjust for the 24-byte preamble and two 9-byte settings frames, and + // the result must be equally divisible by 8 + assert.strictEqual( + (serverLengths.reduce((i, n) => i + n) - 24 - 9 - 9) % 8, 0); + + // Adjust for two 9-byte settings frames, and the result must be equally + // divisible by 8 + assert.strictEqual( + (clientLengths.reduce((i, n) => i + n) - 9 - 9) % 8, 0); + + serverSide.on('data', common.mustCall((chunk) => { + assert.strictEqual(chunk.length, serverLengths.shift()); + }, serverLengths.length)); + clientSide.on('data', common.mustCall((chunk) => { + assert.strictEqual(chunk.length, clientLengths.shift()); + }, clientLengths.length)); + + server.emit('connection', serverSide); + + const client = http2.connect('http://localhost:80', { + paddingStrategy: PADDING_STRATEGY_ALIGNED, + createConnection: common.mustCall(() => clientSide) + }); + + const req = client.request({ ':path': '/a' }); + + req.on('response', common.mustCall()); + + req.setEncoding('utf8'); + req.on('data', common.mustCall((data) => { + assert.strictEqual(data, testData); + })); + req.on('close', common.mustCall(() => { + clientSide.destroy(); + clientSide.end(); + })); + req.end(); +} diff --git a/test/parallel/test-http2-padding-callback.js b/test/parallel/test-http2-padding-callback.js index af547ad498da1b..6d6a6b27221b07 100644 --- a/test/parallel/test-http2-padding-callback.js +++ b/test/parallel/test-http2-padding-callback.js @@ -45,7 +45,7 @@ server.on('listening', common.mustCall(() => { req.resume(); req.on('end', common.mustCall(() => { server.close(); - client.destroy(); + client.close(); })); req.end(); })); diff --git a/test/parallel/test-http2-perf_hooks.js b/test/parallel/test-http2-perf_hooks.js new file mode 100644 index 00000000000000..e30d0ac83e0d1f --- /dev/null +++ b/test/parallel/test-http2-perf_hooks.js @@ -0,0 +1,110 @@ +'use strict'; + +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); +const assert = require('assert'); +const h2 = require('http2'); + +const { PerformanceObserver, performance } = require('perf_hooks'); + +const obs = new PerformanceObserver(common.mustCall((items) => { + const entry = items.getEntries()[0]; + assert.strictEqual(entry.entryType, 'http2'); + assert.strictEqual(typeof entry.startTime, 'number'); + assert.strictEqual(typeof entry.duration, 'number'); + switch (entry.name) { + case 'Http2Session': + assert.strictEqual(typeof entry.pingRTT, 'number'); + assert.strictEqual(typeof entry.streamAverageDuration, 'number'); + assert.strictEqual(typeof entry.streamCount, 'number'); + assert.strictEqual(typeof entry.framesReceived, 'number'); + assert.strictEqual(typeof entry.framesSent, 'number'); + assert.strictEqual(typeof entry.bytesWritten, 'number'); + assert.strictEqual(typeof entry.bytesRead, 'number'); + assert.strictEqual(typeof entry.maxConcurrentStreams, 'number'); + switch (entry.type) { + case 'server': + assert.strictEqual(entry.streamCount, 1); + assert.strictEqual(entry.framesReceived, 5); + break; + case 'client': + assert.strictEqual(entry.streamCount, 1); + assert.strictEqual(entry.framesReceived, 8); + break; + default: + assert.fail('invalid Http2Session type'); + } + break; + case 'Http2Stream': + assert.strictEqual(typeof entry.timeToFirstByte, 'number'); + assert.strictEqual(typeof entry.timeToFirstByteSent, 'number'); + assert.strictEqual(typeof entry.timeToFirstHeader, 'number'); + assert.strictEqual(typeof entry.bytesWritten, 'number'); + assert.strictEqual(typeof entry.bytesRead, 'number'); + break; + default: + assert.fail('invalid entry name'); + } + performance.clearEntries('http2'); +}, 4)); +obs.observe({ entryTypes: ['http2'] }); + +const body = + '

this is some data

'; + +const server = h2.createServer(); + +// we use the lower-level API here +server.on('stream', common.mustCall(onStream)); + +function onStream(stream, headers, flags) { + assert.strictEqual(headers[':scheme'], 'http'); + assert.ok(headers[':authority']); + assert.strictEqual(headers[':method'], 'GET'); + assert.strictEqual(flags, 5); + stream.respond({ + 'content-type': 'text/html', + ':status': 200 + }); + stream.write(body.slice(0, 20)); + stream.end(body.slice(20)); +} + +server.on('session', common.mustCall((session) => { + session.ping(common.mustCall()); +})); + +server.listen(0); + +server.on('listening', common.mustCall(() => { + + const client = h2.connect(`http://localhost:${server.address().port}`); + + client.on('connect', common.mustCall(() => { + client.ping(common.mustCall()); + })); + + const req = client.request(); + + req.on('response', common.mustCall()); + + let data = ''; + req.setEncoding('utf8'); + req.on('data', (d) => data += d); + req.on('end', common.mustCall(() => { + assert.strictEqual(body, data); + })); + req.on('close', common.mustCall(() => { + client.close(); + server.close(); + })); + +})); + +process.on('exit', () => { + const entries = performance.getEntries(); + // There shouldn't be any http2 entries left over. + assert.strictEqual(entries.length, 1); + assert.strictEqual(entries[0], performance.nodeTiming); +}); diff --git a/test/parallel/test-http2-ping-unsolicited-ack.js b/test/parallel/test-http2-ping-unsolicited-ack.js new file mode 100644 index 00000000000000..5a3a261cb098b1 --- /dev/null +++ b/test/parallel/test-http2-ping-unsolicited-ack.js @@ -0,0 +1,43 @@ +'use strict'; + +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); + +const http2 = require('http2'); +const net = require('net'); +const http2util = require('../common/http2'); + +// Test that ping flooding causes the session to be torn down + +const kSettings = new http2util.SettingsFrame(); +const kPingAck = new http2util.PingFrame(true); + +const server = http2.createServer(); + +server.on('stream', common.mustNotCall()); +server.on('session', common.mustCall((session) => { + session.on('error', common.expectsError({ + code: 'ERR_HTTP2_ERROR', + message: 'Protocol error' + })); + session.on('close', common.mustCall(() => server.close())); +})); + +server.listen(0, common.mustCall(() => { + const client = net.connect(server.address().port); + + client.on('connect', common.mustCall(() => { + client.write(http2util.kClientMagic, () => { + client.write(kSettings.data); + // Send an unsolicited ping ack + client.write(kPingAck.data); + }); + })); + + // An error event may or may not be emitted, depending on operating system + // and timing. We do not really care if one is emitted here or not, as the + // error on the server side is what we are testing for. Do not make this + // a common.mustCall() and there's no need to check the error details. + client.on('error', () => {}); +})); diff --git a/test/parallel/test-http2-ping.js b/test/parallel/test-http2-ping.js index 4892d67b4d738d..32fb8926e4716c 100644 --- a/test/parallel/test-http2-ping.js +++ b/test/parallel/test-http2-ping.js @@ -80,7 +80,7 @@ server.listen(0, common.mustCall(() => { const req = client.request(); req.resume(); req.on('end', common.mustCall(() => { - client.destroy(); + client.close(); server.close(); })); })); diff --git a/test/parallel/test-http2-pipe.js b/test/parallel/test-http2-pipe.js index 8b446f4f88118b..2a759f9848721b 100644 --- a/test/parallel/test-http2-pipe.js +++ b/test/parallel/test-http2-pipe.js @@ -8,40 +8,40 @@ const assert = require('assert'); const http2 = require('http2'); const fs = require('fs'); const path = require('path'); -const Countdown = require('../common/countdown'); // piping should work as expected with createWriteStream -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); const loc = fixtures.path('url-tests.js'); -const fn = path.join(common.tmpDir, 'http2-url-tests.js'); +const fn = path.join(tmpdir.path, 'http2-url-tests.js'); const server = http2.createServer(); server.on('stream', common.mustCall((stream) => { const dest = stream.pipe(fs.createWriteStream(fn)); - dest.on('finish', common.mustCall(() => { - assert.strictEqual(fs.readFileSync(loc).length, fs.readFileSync(fn).length); - fs.unlinkSync(fn); - stream.respond(); - stream.end(); - })); + + dest.on('finish', () => { + assert.strictEqual(fs.readFileSync(loc).length, + fs.readFileSync(fn).length); + }); + stream.respond(); + stream.end(); })); server.listen(0, common.mustCall(() => { - const port = server.address().port; - const client = http2.connect(`http://localhost:${port}`); - - const countdown = new Countdown(2, common.mustCall(() => { - server.close(); - client.destroy(); - })); + const client = http2.connect(`http://localhost:${server.address().port}`); const req = client.request({ ':method': 'POST' }); req.on('response', common.mustCall()); req.resume(); - req.on('end', common.mustCall(() => countdown.dec())); + + req.on('close', common.mustCall(() => { + server.close(); + client.close(); + })); + const str = fs.createReadStream(loc); - str.on('end', common.mustCall(() => countdown.dec())); + str.on('end', common.mustCall()); str.pipe(req); })); diff --git a/test/parallel/test-http2-priority-cycle-.js b/test/parallel/test-http2-priority-cycle-.js new file mode 100644 index 00000000000000..af0d66d8343cbf --- /dev/null +++ b/test/parallel/test-http2-priority-cycle-.js @@ -0,0 +1,69 @@ +'use strict'; + +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); +const assert = require('assert'); +const http2 = require('http2'); +const Countdown = require('../common/countdown'); + +const server = http2.createServer(); +const largeBuffer = Buffer.alloc(1e4); + +// Verify that a dependency cycle may exist, but that it doesn't crash anything + +server.on('stream', common.mustCall((stream) => { + stream.respond(); + setImmediate(() => { + stream.end(largeBuffer); + }); +}, 3)); +server.on('session', common.mustCall((session) => { + session.on('priority', (id, parent, weight, exclusive) => { + assert.strictEqual(weight, 16); + assert.strictEqual(exclusive, false); + switch (id) { + case 1: + assert.strictEqual(parent, 5); + break; + case 3: + assert.strictEqual(parent, 1); + break; + case 5: + assert.strictEqual(parent, 3); + break; + default: + assert.fail('should not happen'); + } + }); +})); + +server.listen(0, common.mustCall(() => { + const client = http2.connect(`http://localhost:${server.address().port}`); + + const countdown = new Countdown(3, () => { + client.close(); + server.close(); + }); + + { + const req = client.request(); + req.priority({ parent: 5 }); + req.resume(); + req.on('close', () => countdown.dec()); + } + + { + const req = client.request(); + req.priority({ parent: 1 }); + req.resume(); + req.on('close', () => countdown.dec()); + } + + { + const req = client.request(); + req.priority({ parent: 3 }); + req.resume(); + req.on('close', () => countdown.dec()); + } +})); diff --git a/test/parallel/test-http2-priority-event.js b/test/parallel/test-http2-priority-event.js index b0704902d31101..fe04ffb342d70d 100644 --- a/test/parallel/test-http2-priority-event.js +++ b/test/parallel/test-http2-priority-event.js @@ -54,7 +54,7 @@ server.on('listening', common.mustCall(() => { req.resume(); req.on('end', common.mustCall(() => { server.close(); - client.destroy(); + client.close(); })); req.end(); diff --git a/test/parallel/test-http2-respond-errors.js b/test/parallel/test-http2-respond-errors.js index 45dbe8530e9018..5854c4fb8d02e4 100644 --- a/test/parallel/test-http2-respond-errors.js +++ b/test/parallel/test-http2-respond-errors.js @@ -5,88 +5,81 @@ const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); const http2 = require('http2'); -const { - constants, - Http2Stream, - nghttp2ErrorString -} = process.binding('http2'); +const { Http2Stream } = process.binding('http2'); + +const types = { + boolean: true, + function: () => {}, + number: 1, + object: {}, + array: [], + null: null, + symbol: Symbol('test') +}; -// tests error handling within respond -// - every other NGHTTP2 error from binding (should emit stream error) +const server = http2.createServer(); -const specificTestKeys = []; +Http2Stream.prototype.respond = () => 1; +server.on('stream', common.mustCall((stream) => { -const specificTests = []; + // Check for all possible TypeError triggers on options.getTrailers + Object.entries(types).forEach(([type, value]) => { + if (type === 'function') { + return; + } -const genericTests = Object.getOwnPropertyNames(constants) - .filter((key) => ( - key.indexOf('NGHTTP2_ERR') === 0 && specificTestKeys.indexOf(key) < 0 - )) - .map((key) => ({ - ngError: constants[key], - error: { - code: 'ERR_HTTP2_ERROR', + common.expectsError( + () => stream.respond({ + 'content-type': 'text/plain' + }, { + ['getTrailers']: value + }), + { + type: TypeError, + code: 'ERR_INVALID_OPT_VALUE', + message: `The value "${String(value)}" is invalid ` + + 'for option "getTrailers"' + } + ); + }); + + // Send headers + stream.respond({ + 'content-type': 'text/plain' + }, { + ['getTrailers']: () => common.mustCall() + }); + + // Should throw if headers already sent + common.expectsError( + () => stream.respond(), + { type: Error, - message: nghttp2ErrorString(constants[key]) - }, - type: 'stream' - })); - - -const tests = specificTests.concat(genericTests); - -let currentError; - -// mock submitResponse because we only care about testing error handling -Http2Stream.prototype.respond = () => currentError.ngError; - -const server = http2.createServer(); -server.on('stream', common.mustCall((stream, headers) => { - const errorMustCall = common.expectsError(currentError.error); - const errorMustNotCall = common.mustNotCall( - `${currentError.error.code} should emit on ${currentError.type}` + code: 'ERR_HTTP2_HEADERS_SENT', + message: 'Response has already been initiated.' + } ); - if (currentError.type === 'stream') { - stream.session.on('error', errorMustNotCall); - stream.on('error', errorMustCall); - stream.on('error', common.mustCall(() => { - stream.destroy(); - })); - } else { - stream.session.once('error', errorMustCall); - stream.on('error', errorMustNotCall); - } - - stream.respond(); -}, tests.length)); - -server.listen(0, common.mustCall(() => runTest(tests.shift()))); - -function runTest(test) { - const port = server.address().port; - const url = `http://localhost:${port}`; - const headers = { - ':path': '/', - ':method': 'POST', - ':scheme': 'http', - ':authority': `localhost:${port}` - }; + // Should throw if stream already destroyed + stream.destroy(); + common.expectsError( + () => stream.respond(), + { + type: Error, + code: 'ERR_HTTP2_INVALID_STREAM', + message: 'The stream has been destroyed' + } + ); +})); - const client = http2.connect(url); - const req = client.request(headers); - - currentError = test; - req.resume(); - req.end(); +server.listen(0, common.mustCall(() => { + const client = http2.connect(`http://localhost:${server.address().port}`); + const req = client.request(); req.on('end', common.mustCall(() => { - client.destroy(); - - if (!tests.length) { - server.close(); - } else { - runTest(tests.shift()); - } + client.close(); + server.close(); })); -} + req.resume(); + req.end(); +})); diff --git a/test/parallel/test-http2-respond-file-204.js b/test/parallel/test-http2-respond-file-204.js index 8181dbb317dab2..1171866e9373ab 100644 --- a/test/parallel/test-http2-respond-file-204.js +++ b/test/parallel/test-http2-respond-file-204.js @@ -35,7 +35,7 @@ server.listen(0, () => { req.on('response', common.mustCall()); req.on('data', common.mustNotCall()); req.on('end', common.mustCall(() => { - client.destroy(); + client.close(); server.close(); })); req.end(); diff --git a/test/parallel/test-http2-respond-file-304.js b/test/parallel/test-http2-respond-file-304.js index e6e0842c7f9448..536c48c624e73c 100644 --- a/test/parallel/test-http2-respond-file-304.js +++ b/test/parallel/test-http2-respond-file-304.js @@ -38,7 +38,7 @@ server.listen(0, () => { req.on('data', common.mustNotCall()); req.on('end', common.mustCall(() => { - client.destroy(); + client.close(); server.close(); })); req.end(); diff --git a/test/parallel/test-http2-respond-file-404.js b/test/parallel/test-http2-respond-file-404.js index ba62f384485bc0..60bc21f185dd5c 100644 --- a/test/parallel/test-http2-respond-file-404.js +++ b/test/parallel/test-http2-respond-file-404.js @@ -40,7 +40,7 @@ server.listen(0, () => { })); req.on('data', common.mustNotCall()); req.on('end', common.mustCall(() => { - client.destroy(); + client.close(); server.close(); })); req.end(); diff --git a/test/parallel/test-http2-respond-file-compat.js b/test/parallel/test-http2-respond-file-compat.js index 0f6e3199d68ab2..0205f2d0d85aaf 100644 --- a/test/parallel/test-http2-respond-file-compat.js +++ b/test/parallel/test-http2-respond-file-compat.js @@ -16,7 +16,7 @@ server.listen(0, () => { const req = client.request(); req.on('response', common.mustCall()); req.on('end', common.mustCall(() => { - client.destroy(); + client.close(); server.close(); })); req.end(); diff --git a/test/parallel/test-http2-respond-file-error-dir.js b/test/parallel/test-http2-respond-file-error-dir.js index 18a9540451f865..6818616227df89 100644 --- a/test/parallel/test-http2-respond-file-error-dir.js +++ b/test/parallel/test-http2-respond-file-error-dir.js @@ -6,14 +6,10 @@ if (!common.hasCrypto) const http2 = require('http2'); const assert = require('assert'); -const { - HTTP2_HEADER_CONTENT_TYPE -} = http2.constants; - const server = http2.createServer(); server.on('stream', (stream) => { stream.respondWithFile(process.cwd(), { - [HTTP2_HEADER_CONTENT_TYPE]: 'text/plain' + 'content-type': 'text/plain' }, { onError(err) { common.expectsError({ @@ -38,7 +34,7 @@ server.listen(0, () => { })); req.on('data', common.mustNotCall()); req.on('end', common.mustCall(() => { - client.destroy(); + client.close(); server.close(); })); req.end(); diff --git a/test/parallel/test-http2-respond-file-errors.js b/test/parallel/test-http2-respond-file-errors.js index c2c749873c82ac..83d3900bc5c288 100644 --- a/test/parallel/test-http2-respond-file-errors.js +++ b/test/parallel/test-http2-respond-file-errors.js @@ -6,11 +6,6 @@ if (!common.hasCrypto) const fixtures = require('../common/fixtures'); const http2 = require('http2'); -const { - HTTP2_HEADER_CONTENT_TYPE, - HTTP2_HEADER_METHOD -} = http2.constants; - const optionsWithTypeError = { offset: 'number', length: 'number', @@ -33,6 +28,7 @@ const fname = fixtures.path('elipses.txt'); const server = http2.createServer(); server.on('stream', common.mustCall((stream) => { + // Check for all possible TypeError triggers on options Object.keys(optionsWithTypeError).forEach((option) => { Object.keys(types).forEach((type) => { @@ -42,7 +38,7 @@ server.on('stream', common.mustCall((stream) => { common.expectsError( () => stream.respondWithFile(fname, { - [http2.constants.HTTP2_HEADER_CONTENT_TYPE]: 'text/plain' + 'content-type': 'text/plain' }, { [option]: types[type] }), @@ -59,7 +55,7 @@ server.on('stream', common.mustCall((stream) => { // Should throw if :status 204, 205 or 304 [204, 205, 304].forEach((status) => common.expectsError( () => stream.respondWithFile(fname, { - [HTTP2_HEADER_CONTENT_TYPE]: 'text/plain', + 'content-type': 'text/plain', ':status': status, }), { @@ -68,31 +64,11 @@ server.on('stream', common.mustCall((stream) => { } )); - // should emit an error on the stream if headers aren't valid - stream.respondWithFile(fname, { - [HTTP2_HEADER_METHOD]: 'POST' - }, { - statCheck: common.mustCall(() => { - // give time to the current test case to finish - process.nextTick(continueTest, stream); - return true; - }) - }); - stream.once('error', common.expectsError({ - code: 'ERR_HTTP2_INVALID_PSEUDOHEADER', - type: Error, - message: '":method" is an invalid pseudoheader or is used incorrectly' - })); -})); - -function continueTest(stream) { // Should throw if headers already sent - stream.respond({ - ':status': 200, - }); + stream.respond({ ':status': 200 }); common.expectsError( () => stream.respondWithFile(fname, { - [HTTP2_HEADER_CONTENT_TYPE]: 'text/plain' + 'content-type': 'text/plain' }), { code: 'ERR_HTTP2_HEADERS_SENT', @@ -104,21 +80,21 @@ function continueTest(stream) { stream.destroy(); common.expectsError( () => stream.respondWithFile(fname, { - [HTTP2_HEADER_CONTENT_TYPE]: 'text/plain' + 'content-type': 'text/plain' }), { code: 'ERR_HTTP2_INVALID_STREAM', message: 'The stream has been destroyed' } ); -} +})); server.listen(0, common.mustCall(() => { const client = http2.connect(`http://localhost:${server.address().port}`); const req = client.request(); req.on('close', common.mustCall(() => { - client.destroy(); + client.close(); server.close(); })); req.end(); diff --git a/test/parallel/test-http2-respond-file-fd-errors.js b/test/parallel/test-http2-respond-file-fd-errors.js index 9458b2f49af087..44876b60e1c4cb 100644 --- a/test/parallel/test-http2-respond-file-fd-errors.js +++ b/test/parallel/test-http2-respond-file-fd-errors.js @@ -7,11 +7,6 @@ const fixtures = require('../common/fixtures'); const http2 = require('http2'); const fs = require('fs'); -const { - HTTP2_HEADER_CONTENT_TYPE, - HTTP2_HEADER_METHOD -} = http2.constants; - const optionsWithTypeError = { offset: 'number', length: 'number', @@ -43,7 +38,7 @@ server.on('stream', common.mustCall((stream) => { common.expectsError( () => stream.respondWithFD(types[type], { - [HTTP2_HEADER_CONTENT_TYPE]: 'text/plain' + 'content-type': 'text/plain' }), { type: TypeError, @@ -62,7 +57,7 @@ server.on('stream', common.mustCall((stream) => { common.expectsError( () => stream.respondWithFD(fd, { - [HTTP2_HEADER_CONTENT_TYPE]: 'text/plain' + 'content-type': 'text/plain' }, { [option]: types[type] }), @@ -79,7 +74,7 @@ server.on('stream', common.mustCall((stream) => { // Should throw if :status 204, 205 or 304 [204, 205, 304].forEach((status) => common.expectsError( () => stream.respondWithFD(fd, { - [HTTP2_HEADER_CONTENT_TYPE]: 'text/plain', + 'content-type': 'text/plain', ':status': status, }), { @@ -89,35 +84,11 @@ server.on('stream', common.mustCall((stream) => { } )); - // should emit an error on the stream if headers aren't valid - stream.respondWithFD(fd, { - [HTTP2_HEADER_METHOD]: 'POST' - }, { - statCheck() { - return true; - } - }); - stream.once('error', common.expectsError({ - code: 'ERR_HTTP2_INVALID_PSEUDOHEADER', - type: Error, - message: '":method" is an invalid pseudoheader or is used incorrectly' - })); - stream.respondWithFD(fd, { - [HTTP2_HEADER_METHOD]: 'POST' - }); - stream.once('error', common.expectsError({ - code: 'ERR_HTTP2_INVALID_PSEUDOHEADER', - type: Error, - message: '":method" is an invalid pseudoheader or is used incorrectly' - })); - // Should throw if headers already sent - stream.respond({ - ':status': 200, - }); + stream.respond(); common.expectsError( () => stream.respondWithFD(fd, { - [HTTP2_HEADER_CONTENT_TYPE]: 'text/plain' + 'content-type': 'text/plain' }), { code: 'ERR_HTTP2_HEADERS_SENT', @@ -130,7 +101,7 @@ server.on('stream', common.mustCall((stream) => { stream.destroy(); common.expectsError( () => stream.respondWithFD(fd, { - [HTTP2_HEADER_CONTENT_TYPE]: 'text/plain' + 'content-type': 'text/plain' }), { code: 'ERR_HTTP2_INVALID_STREAM', @@ -145,7 +116,7 @@ server.listen(0, common.mustCall(() => { const req = client.request(); req.on('close', common.mustCall(() => { - client.destroy(); + client.close(); server.close(); })); req.end(); diff --git a/test/parallel/test-http2-respond-file-fd-invalid.js b/test/parallel/test-http2-respond-file-fd-invalid.js index f3bcab8904bee4..77a4d3df00d0d6 100644 --- a/test/parallel/test-http2-respond-file-fd-invalid.js +++ b/test/parallel/test-http2-respond-file-fd-invalid.js @@ -31,7 +31,7 @@ server.listen(0, () => { req.on('data', common.mustNotCall()); req.on('end', common.mustCall(() => { assert.strictEqual(req.rstCode, NGHTTP2_INTERNAL_ERROR); - client.destroy(); + client.close(); server.close(); })); req.end(); diff --git a/test/parallel/test-http2-respond-file-fd-range.js b/test/parallel/test-http2-respond-file-fd-range.js index 8479dca518558e..2dd73e0001544c 100644 --- a/test/parallel/test-http2-respond-file-fd-range.js +++ b/test/parallel/test-http2-respond-file-fd-range.js @@ -9,6 +9,7 @@ const fixtures = require('../common/fixtures'); const http2 = require('http2'); const assert = require('assert'); const fs = require('fs'); +const Countdown = require('../common/countdown'); const { HTTP2_HEADER_CONTENT_TYPE, @@ -39,7 +40,7 @@ server.on('stream', (stream, headers) => { statCheck: common.mustCall((stat, headers, options) => { assert.strictEqual(options.length, length); assert.strictEqual(options.offset, offset); - headers[HTTP2_HEADER_CONTENT_LENGTH] = + headers['content-length'] = Math.min(options.length, stat.size - offset); }), offset: offset, @@ -47,23 +48,21 @@ server.on('stream', (stream, headers) => { }); }); server.on('close', common.mustCall(() => fs.closeSync(fd))); + server.listen(0, () => { const client = http2.connect(`http://localhost:${server.address().port}`); - let remaining = 2; - function maybeClose() { - if (--remaining === 0) { - client.destroy(); - server.close(); - } - } + const countdown = new Countdown(2, () => { + client.close(); + server.close(); + }); { const req = client.request({ range: 'bytes=8-11' }); req.on('response', common.mustCall((headers) => { - assert.strictEqual(headers[HTTP2_HEADER_CONTENT_TYPE], 'text/plain'); - assert.strictEqual(+headers[HTTP2_HEADER_CONTENT_LENGTH], 3); + assert.strictEqual(headers['content-type'], 'text/plain'); + assert.strictEqual(+headers['content-length'], 3); })); req.setEncoding('utf8'); let check = ''; @@ -71,7 +70,7 @@ server.listen(0, () => { req.on('end', common.mustCall(() => { assert.strictEqual(check, data.toString('utf8', 8, 11)); })); - req.on('close', common.mustCall(maybeClose)); + req.on('close', common.mustCall(() => countdown.dec())); req.end(); } @@ -88,7 +87,7 @@ server.listen(0, () => { req.on('end', common.mustCall(() => { assert.strictEqual(check, data.toString('utf8', 8, 28)); })); - req.on('close', common.mustCall(maybeClose)); + req.on('close', common.mustCall(() => countdown.dec())); req.end(); } diff --git a/test/parallel/test-http2-respond-file-fd.js b/test/parallel/test-http2-respond-file-fd.js index 303d25be3f2b66..7d4395bbc360aa 100644 --- a/test/parallel/test-http2-respond-file-fd.js +++ b/test/parallel/test-http2-respond-file-fd.js @@ -40,7 +40,7 @@ server.listen(0, () => { req.on('data', (chunk) => check += chunk); req.on('end', common.mustCall(() => { assert.strictEqual(check, data.toString('utf8')); - client.destroy(); + client.close(); server.close(); })); req.end(); diff --git a/test/parallel/test-http2-respond-file-push.js b/test/parallel/test-http2-respond-file-push.js index 4f7b179faf81a8..a5229beb07d1a7 100644 --- a/test/parallel/test-http2-respond-file-push.js +++ b/test/parallel/test-http2-respond-file-push.js @@ -29,7 +29,8 @@ server.on('stream', (stream) => { stream.pushStream({ ':path': '/file.txt', ':method': 'GET' - }, (stream) => { + }, (err, stream) => { + assert.ifError(err); stream.respondWithFD(fd, { [HTTP2_HEADER_CONTENT_TYPE]: 'text/plain', [HTTP2_HEADER_CONTENT_LENGTH]: stat.size, @@ -50,7 +51,7 @@ server.listen(0, () => { function maybeClose() { if (--expected === 0) { server.close(); - client.destroy(); + client.close(); } } diff --git a/test/parallel/test-http2-respond-file-range.js b/test/parallel/test-http2-respond-file-range.js index a5995cbba77c1c..4e6a6074514f14 100644 --- a/test/parallel/test-http2-respond-file-range.js +++ b/test/parallel/test-http2-respond-file-range.js @@ -46,7 +46,7 @@ server.listen(0, () => { req.on('data', (chunk) => check += chunk); req.on('end', common.mustCall(() => { assert.strictEqual(check, data.toString('utf8', 8, 11)); - client.destroy(); + client.close(); server.close(); })); req.end(); diff --git a/test/parallel/test-http2-respond-file.js b/test/parallel/test-http2-respond-file.js index c2f513b7cae2b7..9ad8e7a69648dc 100644 --- a/test/parallel/test-http2-respond-file.js +++ b/test/parallel/test-http2-respond-file.js @@ -45,7 +45,7 @@ server.listen(0, () => { req.on('data', (chunk) => check += chunk); req.on('end', common.mustCall(() => { assert.strictEqual(check, data.toString('utf8')); - client.destroy(); + client.close(); server.close(); })); req.end(); diff --git a/test/parallel/test-http2-rststream-errors.js b/test/parallel/test-http2-respond-nghttperrors.js similarity index 78% rename from test/parallel/test-http2-rststream-errors.js rename to test/parallel/test-http2-respond-nghttperrors.js index eacf7855117503..5ec953c5442360 100644 --- a/test/parallel/test-http2-rststream-errors.js +++ b/test/parallel/test-http2-respond-nghttperrors.js @@ -1,5 +1,5 @@ -// Flags: --expose-http2 'use strict'; +// Flags: --expose-internals const common = require('../common'); if (!common.hasCrypto) @@ -10,11 +10,13 @@ const { Http2Stream, nghttp2ErrorString } = process.binding('http2'); +const { NghttpError } = require('internal/http2/util'); -// tests error handling within rstStream +// tests error handling within respond // - every other NGHTTP2 error from binding (should emit stream error) const specificTestKeys = []; + const specificTests = []; const genericTests = Object.getOwnPropertyNames(constants) @@ -25,7 +27,8 @@ const genericTests = Object.getOwnPropertyNames(constants) ngError: constants[key], error: { code: 'ERR_HTTP2_ERROR', - type: Error, + type: NghttpError, + name: 'Error [ERR_HTTP2_ERROR]', message: nghttp2ErrorString(constants[key]) }, type: 'stream' @@ -36,8 +39,8 @@ const tests = specificTests.concat(genericTests); let currentError; -// mock submitRstStream because we only care about testing error handling -Http2Stream.prototype.rstStream = () => currentError.ngError; +// mock submitResponse because we only care about testing error handling +Http2Stream.prototype.respond = () => currentError.ngError; const server = http2.createServer(); server.on('stream', common.mustCall((stream, headers) => { @@ -50,14 +53,14 @@ server.on('stream', common.mustCall((stream, headers) => { stream.session.on('error', errorMustNotCall); stream.on('error', errorMustCall); stream.on('error', common.mustCall(() => { - stream.session.destroy(); + stream.destroy(); })); } else { stream.session.once('error', errorMustCall); stream.on('error', errorMustNotCall); } - stream.rstStream(); + stream.respond(); }, tests.length)); server.listen(0, common.mustCall(() => runTest(tests.shift()))); @@ -74,17 +77,18 @@ function runTest(test) { const client = http2.connect(url); const req = client.request(headers); + req.on('error', common.expectsError({ + code: 'ERR_HTTP2_STREAM_ERROR', + type: Error, + message: 'Stream closed with error code 2' + })); currentError = test; req.resume(); req.end(); - if (currentError.type === 'stream') { - req.on('error', common.mustCall()); - } - req.on('end', common.mustCall(() => { - client.destroy(); + client.close(); if (!tests.length) { server.close(); diff --git a/test/parallel/test-http2-respond-no-data.js b/test/parallel/test-http2-respond-no-data.js index d891fe4e8ddd2b..9572bdffe54927 100644 --- a/test/parallel/test-http2-respond-no-data.js +++ b/test/parallel/test-http2-respond-no-data.js @@ -27,7 +27,7 @@ function makeRequest() { req.resume(); req.on('end', common.mustCall(() => { - client.destroy(); + client.close(); if (!status.length) { server.close(); diff --git a/test/parallel/test-http2-respond-with-fd-errors.js b/test/parallel/test-http2-respond-with-fd-errors.js index 1d32a2f45c28bc..b7ff09225b6202 100644 --- a/test/parallel/test-http2-respond-with-fd-errors.js +++ b/test/parallel/test-http2-respond-with-fd-errors.js @@ -83,12 +83,18 @@ function runTest(test) { const client = http2.connect(url); const req = client.request(headers); + req.on('error', common.expectsError({ + code: 'ERR_HTTP2_STREAM_ERROR', + type: Error, + message: 'Stream closed with error code 2' + })); + currentError = test; req.resume(); req.end(); req.on('end', common.mustCall(() => { - client.destroy(); + client.close(); if (!tests.length) { server.close(); diff --git a/test/parallel/test-http2-response-splitting.js b/test/parallel/test-http2-response-splitting.js index 1d9b616105f450..9613eca9636ae4 100644 --- a/test/parallel/test-http2-response-splitting.js +++ b/test/parallel/test-http2-response-splitting.js @@ -55,7 +55,7 @@ server.listen(0, common.mustCall(() => { function maybeClose() { if (remaining === 0) { server.close(); - client.destroy(); + client.close(); } } diff --git a/test/parallel/test-http2-sent-headers.js b/test/parallel/test-http2-sent-headers.js new file mode 100644 index 00000000000000..bffa4d71c6d5f3 --- /dev/null +++ b/test/parallel/test-http2-sent-headers.js @@ -0,0 +1,47 @@ +'use strict'; + +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); +const assert = require('assert'); +const h2 = require('http2'); + +const server = h2.createServer(); + +server.on('stream', common.mustCall((stream) => { + stream.additionalHeaders({ ':status': 102 }); + assert.strictEqual(stream.sentInfoHeaders[0][':status'], 102); + + stream.respond({ abc: 'xyz' }, { + getTrailers(headers) { + headers.xyz = 'abc'; + } + }); + assert.strictEqual(stream.sentHeaders.abc, 'xyz'); + assert.strictEqual(stream.sentHeaders[':status'], 200); + assert.notStrictEqual(stream.sentHeaders.date, undefined); + stream.end(); + stream.on('close', () => { + assert.strictEqual(stream.sentTrailers.xyz, 'abc'); + }); +})); + +server.listen(0, common.mustCall(() => { + const client = h2.connect(`http://localhost:${server.address().port}`); + const req = client.request(); + + req.on('headers', common.mustCall((headers) => { + assert.strictEqual(headers[':status'], 102); + })); + + assert.strictEqual(req.sentHeaders[':method'], 'GET'); + assert.strictEqual(req.sentHeaders[':authority'], + `localhost:${server.address().port}`); + assert.strictEqual(req.sentHeaders[':scheme'], 'http'); + assert.strictEqual(req.sentHeaders[':path'], '/'); + req.resume(); + req.on('close', () => { + server.close(); + client.close(); + }); +})); diff --git a/test/parallel/test-http2-serve-file.js b/test/parallel/test-http2-serve-file.js index af82360e464b31..7b73fe639e0cc5 100644 --- a/test/parallel/test-http2-serve-file.js +++ b/test/parallel/test-http2-serve-file.js @@ -48,7 +48,7 @@ server.listen(0, () => { let remaining = 2; function maybeClose() { if (--remaining === 0) { - client.destroy(); + client.close(); server.close(); } } diff --git a/test/parallel/test-http2-server-errors.js b/test/parallel/test-http2-server-errors.js index 7d7db6a24538fd..a3586bd64d46e7 100644 --- a/test/parallel/test-http2-server-errors.js +++ b/test/parallel/test-http2-server-errors.js @@ -6,7 +6,6 @@ if (!common.hasCrypto) common.skip('missing crypto'); const assert = require('assert'); const h2 = require('http2'); -const { Http2Stream } = require('internal/http2/core'); // Errors should not be reported both in Http2ServerRequest // and Http2ServerResponse @@ -29,11 +28,6 @@ const { Http2Stream } = require('internal/http2/core'); server.close(); })); - server.on('streamError', common.mustCall(function(err, stream) { - assert.strictEqual(err, expected); - assert.strictEqual(stream instanceof Http2Stream, true); - })); - server.listen(0, common.mustCall(function() { const port = server.address().port; @@ -70,11 +64,6 @@ const { Http2Stream } = require('internal/http2/core'); server.close(); })); - server.on('streamError', common.mustCall(function(err, stream) { - assert.strictEqual(err, expected); - assert.strictEqual(stream instanceof Http2Stream, true); - })); - server.listen(0, common.mustCall(function() { const port = server.address().port; diff --git a/test/parallel/test-http2-server-http1-client.js b/test/parallel/test-http2-server-http1-client.js index ef3a79c0fd143a..34a8f48b5e130d 100644 --- a/test/parallel/test-http2-server-http1-client.js +++ b/test/parallel/test-http2-server-http1-client.js @@ -12,11 +12,14 @@ const server = http2.createServer(); server.on('stream', common.mustNotCall()); server.on('session', common.mustCall((session) => { session.on('close', common.mustCall()); + session.on('error', common.expectsError({ + code: 'ERR_HTTP2_ERROR', + type: Error, + message: 'Received bad client magic byte string' + })); })); server.listen(0, common.mustCall(() => { const req = http.get(`http://localhost:${server.address().port}`); - req.on('error', (error) => { - server.close(); - }); + req.on('error', (error) => server.close()); })); diff --git a/test/parallel/test-http2-server-push-disabled.js b/test/parallel/test-http2-server-push-disabled.js index c0148fe63b672e..eef8194c57e806 100644 --- a/test/parallel/test-http2-server-push-disabled.js +++ b/test/parallel/test-http2-server-push-disabled.js @@ -42,13 +42,13 @@ server.listen(0, common.mustCall(() => { options); const req = client.request({ ':path': '/' }); - // Because push stream sre disabled, this must not be called. + // Because push streams are disabled, this must not be called. client.on('stream', common.mustNotCall()); req.resume(); req.on('end', common.mustCall(() => { server.close(); - client.destroy(); + client.close(); })); req.end(); })); diff --git a/test/parallel/test-http2-server-push-stream-errors-args.js b/test/parallel/test-http2-server-push-stream-errors-args.js index f8bd28137c3368..bea187baf31cdc 100644 --- a/test/parallel/test-http2-server-push-stream-errors-args.js +++ b/test/parallel/test-http2-server-push-stream-errors-args.js @@ -50,7 +50,7 @@ server.listen(0, common.mustCall(() => { req.on('end', common.mustCall(() => { assert.strictEqual(data, 'test'); server.close(); - client.destroy(); + client.close(); })); req.end(); })); diff --git a/test/parallel/test-http2-server-push-stream-errors.js b/test/parallel/test-http2-server-push-stream-errors.js index 56e329dcff1cd2..7eaf4dc94d15e2 100644 --- a/test/parallel/test-http2-server-push-stream-errors.js +++ b/test/parallel/test-http2-server-push-stream-errors.js @@ -34,9 +34,8 @@ const specificTests = [ { ngError: constants.NGHTTP2_ERR_STREAM_CLOSED, error: { - code: 'ERR_HTTP2_STREAM_CLOSED', - type: Error, - message: 'The stream is already closed' + code: 'ERR_HTTP2_INVALID_STREAM', + type: Error }, type: 'stream' }, @@ -66,47 +65,25 @@ Http2Stream.prototype.pushPromise = () => currentError.ngError; const server = http2.createServer(); server.on('stream', common.mustCall((stream, headers) => { - const errorMustCall = common.expectsError(currentError.error); - const errorMustNotCall = common.mustNotCall( - `${currentError.error.code} should emit on ${currentError.type}` - ); - - if (currentError.type === 'stream') { - stream.session.on('error', errorMustNotCall); - stream.on('error', errorMustCall); - stream.on('error', common.mustCall(() => { - stream.respond(); - stream.end(); - })); - } else { - stream.session.once('error', errorMustCall); - stream.on('error', errorMustNotCall); - } - - stream.pushStream({}, () => {}); + stream.pushStream({}, common.expectsError(currentError.error)); + stream.respond(); + stream.end(); }, tests.length)); server.listen(0, common.mustCall(() => runTest(tests.shift()))); function runTest(test) { - const port = server.address().port; - const url = `http://localhost:${port}`; - const headers = { - ':path': '/', - ':method': 'POST', - ':scheme': 'http', - ':authority': `localhost:${port}` - }; + const url = `http://localhost:${server.address().port}`; const client = http2.connect(url); - const req = client.request(headers); + const req = client.request(); currentError = test; req.resume(); req.end(); - req.on('end', common.mustCall(() => { - client.destroy(); + req.on('close', common.mustCall(() => { + client.close(); if (!tests.length) { server.close(); diff --git a/test/parallel/test-http2-server-push-stream-head.js b/test/parallel/test-http2-server-push-stream-head.js index c2fc8db4a92eba..cd2276746f4bdd 100644 --- a/test/parallel/test-http2-server-push-stream-head.js +++ b/test/parallel/test-http2-server-push-stream-head.js @@ -5,6 +5,7 @@ if (!common.hasCrypto) common.skip('missing crypto'); const assert = require('assert'); const http2 = require('http2'); +const Countdown = require('../common/countdown'); // Check that pushStream handles method HEAD correctly // - stream should end immediately (no body) @@ -17,8 +18,10 @@ server.on('stream', common.mustCall((stream, headers) => { ':scheme': 'http', ':method': 'HEAD', ':authority': `localhost:${port}`, - }, common.mustCall((push, headers) => { + }, common.mustCall((err, push, headers) => { assert.strictEqual(push._writableState.ended, true); + push.respond(); + assert(!push.write('test')); stream.end('test'); })); } @@ -30,15 +33,26 @@ server.on('stream', common.mustCall((stream, headers) => { server.listen(0, common.mustCall(() => { const port = server.address().port; - const headers = { ':path': '/' }; const client = http2.connect(`http://localhost:${port}`); - const req = client.request(headers); + + const countdown = new Countdown(2, () => { + server.close(); + client.close(); + }); + + const req = client.request(); req.setEncoding('utf8'); client.on('stream', common.mustCall((stream, headers) => { + assert.strictEqual(headers[':method'], 'HEAD'); assert.strictEqual(headers[':scheme'], 'http'); assert.strictEqual(headers[':path'], '/'); assert.strictEqual(headers[':authority'], `localhost:${port}`); + stream.on('push', common.mustCall(() => { + stream.on('data', common.mustNotCall()); + stream.on('end', common.mustCall()); + })); + stream.on('close', common.mustCall(() => countdown.dec())); })); let data = ''; @@ -46,8 +60,7 @@ server.listen(0, common.mustCall(() => { req.on('data', common.mustCall((d) => data += d)); req.on('end', common.mustCall(() => { assert.strictEqual(data, 'test'); - server.close(); - client.destroy(); })); + req.on('close', common.mustCall(() => countdown.dec())); req.end(); })); diff --git a/test/parallel/test-http2-server-push-stream.js b/test/parallel/test-http2-server-push-stream.js index 395743869198ca..6ac10cae77f951 100644 --- a/test/parallel/test-http2-server-push-stream.js +++ b/test/parallel/test-http2-server-push-stream.js @@ -14,7 +14,8 @@ server.on('stream', common.mustCall((stream, headers) => { ':scheme': 'http', ':path': '/foobar', ':authority': `localhost:${port}`, - }, common.mustCall((push, headers) => { + }, common.mustCall((err, push, headers) => { + assert.ifError(err); push.respond({ 'content-type': 'text/html', ':status': 200, @@ -53,7 +54,7 @@ server.listen(0, common.mustCall(() => { req.on('end', common.mustCall(() => { assert.strictEqual(data, 'test'); server.close(); - client.destroy(); + client.close(); })); req.end(); })); diff --git a/test/parallel/test-http2-server-rst-before-respond.js b/test/parallel/test-http2-server-rst-before-respond.js index 47ba68bd29ed81..2cdea07a168194 100644 --- a/test/parallel/test-http2-server-rst-before-respond.js +++ b/test/parallel/test-http2-server-rst-before-respond.js @@ -12,7 +12,7 @@ const server = h2.createServer(); server.on('stream', common.mustCall(onStream)); function onStream(stream, headers, flags) { - stream.rstStream(); + stream.close(); assert.throws(() => { stream.additionalHeaders({ @@ -28,19 +28,13 @@ function onStream(stream, headers, flags) { server.listen(0); server.on('listening', common.mustCall(() => { - const client = h2.connect(`http://localhost:${server.address().port}`); - - const req = client.request({ ':path': '/' }); - + const req = client.request(); req.on('headers', common.mustNotCall()); - req.on('close', common.mustCall((code) => { assert.strictEqual(h2.constants.NGHTTP2_NO_ERROR, code); server.close(); - client.destroy(); + client.close(); })); - req.on('response', common.mustNotCall()); - })); diff --git a/test/parallel/test-http2-server-rst-stream.js b/test/parallel/test-http2-server-rst-stream.js index 4b04f29c8ec7c0..c2d938c22f4483 100644 --- a/test/parallel/test-http2-server-rst-stream.js +++ b/test/parallel/test-http2-server-rst-stream.js @@ -16,39 +16,38 @@ const { } = http2.constants; const tests = [ - ['rstStream', NGHTTP2_NO_ERROR, false], - ['rstWithNoError', NGHTTP2_NO_ERROR, false], - ['rstWithProtocolError', NGHTTP2_PROTOCOL_ERROR, true], - ['rstWithCancel', NGHTTP2_CANCEL, false], - ['rstWithRefuse', NGHTTP2_REFUSED_STREAM, true], - ['rstWithInternalError', NGHTTP2_INTERNAL_ERROR, true] + [NGHTTP2_NO_ERROR, false], + [NGHTTP2_NO_ERROR, false], + [NGHTTP2_PROTOCOL_ERROR, true], + [NGHTTP2_CANCEL, false], + [NGHTTP2_REFUSED_STREAM, true], + [NGHTTP2_INTERNAL_ERROR, true] ]; const server = http2.createServer(); server.on('stream', (stream, headers) => { - const method = headers['rstmethod']; - stream[method](); + stream.close(headers['rstcode'] | 0); }); server.listen(0, common.mustCall(() => { const client = http2.connect(`http://localhost:${server.address().port}`); const countdown = new Countdown(tests.length, common.mustCall(() => { - client.destroy(); + client.close(); server.close(); })); tests.forEach((test) => { const req = client.request({ ':method': 'POST', - rstmethod: test[0] + rstcode: test[0] }); req.on('close', common.mustCall((code) => { - assert.strictEqual(code, test[1]); + assert.strictEqual(code, test[0]); countdown.dec(); })); req.on('aborted', common.mustCall()); - if (test[2]) + if (test[1]) req.on('error', common.mustCall()); else req.on('error', common.mustNotCall()); diff --git a/test/parallel/test-http2-server-sessionerror.js b/test/parallel/test-http2-server-sessionerror.js new file mode 100644 index 00000000000000..525eb2e6efd11a --- /dev/null +++ b/test/parallel/test-http2-server-sessionerror.js @@ -0,0 +1,48 @@ +// Flags: --expose-internals + +'use strict'; + +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); +const http2 = require('http2'); +const { kSocket } = require('internal/http2/util'); + +const server = http2.createServer(); +server.on('stream', common.mustNotCall()); + +let test = 0; + +server.on('session', common.mustCall((session) => { + switch (++test) { + case 1: + server.on('error', common.mustNotCall()); + session.on('error', common.expectsError({ + type: Error, + message: 'test' + })); + session[kSocket].emit('error', new Error('test')); + break; + case 2: + // If the server does not have a socketError listener, + // error will be silent on the server but will close + // the session + session[kSocket].emit('error', new Error('test')); + break; + } +}, 2)); + +server.listen(0, common.mustCall(() => { + const url = `http://localhost:${server.address().port}`; + http2.connect(url) + // An ECONNRESET error may occur depending on the platform (due largely + // to differences in the timing of socket closing). Do not wrap this in + // a common must call. + .on('error', () => {}) + .on('close', () => { + server.removeAllListeners('error'); + http2.connect(url) + .on('error', () => {}) + .on('close', () => server.close()); + }); +})); diff --git a/test/parallel/test-http2-server-set-header.js b/test/parallel/test-http2-server-set-header.js index ed27638f6849f4..4b6228053f8ece 100644 --- a/test/parallel/test-http2-server-set-header.js +++ b/test/parallel/test-http2-server-set-header.js @@ -29,7 +29,7 @@ server.listen(0, common.mustCall(() => { req.on('end', () => { assert.strictEqual(body, data); server.close(); - client.destroy(); + client.close(); }); req.end(); })); diff --git a/test/parallel/test-http2-server-shutdown-before-respond.js b/test/parallel/test-http2-server-shutdown-before-respond.js index c3ad9714b5f39b..33f224fc69a9d5 100644 --- a/test/parallel/test-http2-server-shutdown-before-respond.js +++ b/test/parallel/test-http2-server-shutdown-before-respond.js @@ -11,24 +11,26 @@ const server = h2.createServer(); server.on('stream', common.mustCall(onStream)); function onStream(stream, headers, flags) { - const session = stream.session; - stream.session.shutdown({ graceful: true }, common.mustCall(() => { - session.destroy(); - })); - stream.respond({}); + stream.session.goaway(1); + stream.respond(); stream.end('data'); } server.listen(0); server.on('listening', common.mustCall(() => { - const client = h2.connect(`http://localhost:${server.address().port}`); client.on('goaway', common.mustCall()); + client.on('error', common.expectsError({ + code: 'ERR_HTTP2_SESSION_ERROR' + })); const req = client.request(); - + req.on('error', common.expectsError({ + code: 'ERR_HTTP2_SESSION_ERROR' + })); req.resume(); + req.on('data', common.mustNotCall()); req.on('end', common.mustCall(() => server.close())); })); diff --git a/test/parallel/test-http2-server-shutdown-options-errors.js b/test/parallel/test-http2-server-shutdown-options-errors.js index 673723e961c87d..2aedec1140701a 100644 --- a/test/parallel/test-http2-server-shutdown-options-errors.js +++ b/test/parallel/test-http2-server-shutdown-options-errors.js @@ -8,55 +8,63 @@ const http2 = require('http2'); const server = http2.createServer(); -const optionsToTest = { - opaqueData: 'Uint8Array', - graceful: 'boolean', - errorCode: 'number', - lastStreamID: 'number' -}; +const types = [ + true, + {}, + [], + null, + new Date() +]; -const types = { - boolean: true, - number: 1, - object: {}, - array: [], - null: null, - Uint8Array: Buffer.from([0x1, 0x2, 0x3, 0x4, 0x5]) -}; +server.on('stream', common.mustCall((stream) => { + const session = stream.session; -server.on( - 'stream', - common.mustCall((stream) => { - Object.keys(optionsToTest).forEach((option) => { - Object.keys(types).forEach((type) => { - if (type === optionsToTest[option]) { - return; - } - common.expectsError( - () => - stream.session.shutdown( - { [option]: types[type] }, - common.mustNotCall() - ), - { - type: TypeError, - code: 'ERR_INVALID_OPT_VALUE', - message: `The value "${String(types[type])}" is invalid ` + - `for option "${option}"` - } - ); - }); - }); - stream.session.destroy(); - }) -); + types.forEach((i) => { + common.expectsError( + () => session.goaway(i), + { + code: 'ERR_INVALID_ARG_TYPE', + type: TypeError, + message: 'The "code" argument must be of type number' + } + ); + common.expectsError( + () => session.goaway(0, i), + { + code: 'ERR_INVALID_ARG_TYPE', + type: TypeError, + message: 'The "lastStreamID" argument must be of type number' + } + ); + common.expectsError( + () => session.goaway(0, 0, i), + { + code: 'ERR_INVALID_ARG_TYPE', + type: TypeError, + message: 'The "opaqueData" argument must be one of type Buffer, ' + + 'TypedArray, or DataView' + } + ); + }); + + stream.session.destroy(); +})); server.listen( 0, common.mustCall(() => { const client = http2.connect(`http://localhost:${server.address().port}`); + // On certain operating systems, an ECONNRESET may occur. We do not need + // to test for it here. Do not make this a mustCall + client.on('error', () => {}); const req = client.request(); + // On certain operating systems, an ECONNRESET may occur. We do not need + // to test for it here. Do not make this a mustCall + req.on('error', () => {}); req.resume(); - req.on('end', common.mustCall(() => server.close())); + req.on('close', common.mustCall(() => { + client.close(); + server.close(); + })); }) ); diff --git a/test/parallel/test-http2-server-shutdown-redundant.js b/test/parallel/test-http2-server-shutdown-redundant.js index 6740728a06343d..ac0893cd46eab6 100644 --- a/test/parallel/test-http2-server-shutdown-redundant.js +++ b/test/parallel/test-http2-server-shutdown-redundant.js @@ -4,27 +4,38 @@ const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); -const assert = require('assert'); const http2 = require('http2'); const server = http2.createServer(); -// Test blank return when a stream.session.shutdown is called twice -// Also tests stream.session.shutdown with just a callback function (no options) server.on('stream', common.mustCall((stream) => { - stream.session.shutdown(common.mustCall(() => { - assert.strictEqual( - stream.session.shutdown(common.mustNotCall()), - undefined + const session = stream.session; + session.goaway(1); + session.goaway(2); + stream.session.on('close', common.mustCall(() => { + common.expectsError( + () => session.goaway(3), + { + code: 'ERR_HTTP2_INVALID_SESSION', + type: Error + } ); })); - stream.session.shutdown(common.mustNotCall()); })); server.listen(0, common.mustCall(() => { const client = http2.connect(`http://localhost:${server.address().port}`); + client.on('error', common.expectsError({ + code: 'ERR_HTTP2_SESSION_ERROR' + })); const req = client.request(); + req.on('error', common.expectsError({ + code: 'ERR_HTTP2_SESSION_ERROR' + })); req.resume(); - req.on('end', common.mustCall(() => server.close())); + req.on('close', common.mustCall(() => { + server.close(); + client.close(); + })); })); diff --git a/test/parallel/test-http2-server-socket-destroy.js b/test/parallel/test-http2-server-socket-destroy.js index 8291c415284571..03afc1957b8af4 100644 --- a/test/parallel/test-http2-server-socket-destroy.js +++ b/test/parallel/test-http2-server-socket-destroy.js @@ -9,22 +9,13 @@ const assert = require('assert'); const h2 = require('http2'); const { kSocket } = require('internal/http2/util'); -const { - HTTP2_HEADER_METHOD, - HTTP2_HEADER_PATH, - HTTP2_METHOD_POST -} = h2.constants; - const server = h2.createServer(); // we use the lower-level API here server.on('stream', common.mustCall(onStream)); function onStream(stream) { - stream.respond({ - 'content-type': 'text/html', - ':status': 200 - }); + stream.respond(); stream.write('test'); const socket = stream.session[kSocket]; @@ -32,6 +23,7 @@ function onStream(stream) { // When the socket is destroyed, the close events must be triggered // on the socket, server and session. socket.on('close', common.mustCall()); + stream.on('close', common.mustCall()); server.on('close', common.mustCall()); stream.session.on('close', common.mustCall(() => server.close())); @@ -40,23 +32,25 @@ function onStream(stream) { assert.notStrictEqual(stream.session, undefined); socket.destroy(); - stream.on('destroy', common.mustCall(() => { - assert.strictEqual(stream.session, undefined); - })); } server.listen(0); server.on('listening', common.mustCall(() => { const client = h2.connect(`http://localhost:${server.address().port}`); + // The client may have an ECONNRESET error here depending on the operating + // system, due mainly to differences in the timing of socket closing. Do + // not wrap this in a common mustCall. + client.on('error', () => {}); + client.on('close', common.mustCall()); - const req = client.request({ - [HTTP2_HEADER_PATH]: '/', - [HTTP2_HEADER_METHOD]: HTTP2_METHOD_POST }); + const req = client.request({ ':method': 'POST' }); + // The client may have an ECONNRESET error here depending on the operating + // system, due mainly to differences in the timing of socket closing. Do + // not wrap this in a common mustCall. + req.on('error', () => {}); req.on('aborted', common.mustCall()); req.resume(); req.on('end', common.mustCall()); - - client.on('close', common.mustCall()); })); diff --git a/test/parallel/test-http2-server-socketerror.js b/test/parallel/test-http2-server-socketerror.js deleted file mode 100644 index 9f52b9280d2779..00000000000000 --- a/test/parallel/test-http2-server-socketerror.js +++ /dev/null @@ -1,56 +0,0 @@ -// Flags: --expose-internals - -'use strict'; - -const common = require('../common'); -if (!common.hasCrypto) - common.skip('missing crypto'); -const assert = require('assert'); -const http2 = require('http2'); -const { kSocket } = require('internal/http2/util'); - -const server = http2.createServer(); -server.on('stream', common.mustCall((stream) => { - stream.respond(); - stream.end('ok'); -})); -server.on('session', common.mustCall((session) => { - // First, test that the socketError event is forwarded to the session object - // and not the server object. - const handler = common.mustCall((error, socket) => { - common.expectsError({ - type: Error, - message: 'test' - })(error); - assert.strictEqual(socket, session[kSocket]); - }); - const isNotCalled = common.mustNotCall(); - session.on('socketError', handler); - server.on('socketError', isNotCalled); - session[kSocket].emit('error', new Error('test')); - session.removeListener('socketError', handler); - server.removeListener('socketError', isNotCalled); - - // Second, test that the socketError is forwarded to the server object when - // no socketError listener is registered for the session - server.on('socketError', common.mustCall((error, socket, session) => { - common.expectsError({ - type: Error, - message: 'test' - })(error); - assert.strictEqual(socket, session[kSocket]); - assert.strictEqual(session, session); - })); - session[kSocket].emit('error', new Error('test')); -})); - -server.listen(0, common.mustCall(() => { - const client = http2.connect(`http://localhost:${server.address().port}`); - const req = client.request(); - req.resume(); - req.on('end', common.mustCall()); - req.on('close', common.mustCall(() => { - client.destroy(); - server.close(); - })); -})); diff --git a/test/parallel/test-http2-server-stream-session-destroy.js b/test/parallel/test-http2-server-stream-session-destroy.js index 24d064a448f87d..5eb04a8d376635 100644 --- a/test/parallel/test-http2-server-stream-session-destroy.js +++ b/test/parallel/test-http2-server-stream-session-destroy.js @@ -8,56 +8,41 @@ const h2 = require('http2'); const server = h2.createServer(); -server.on( - 'stream', - common.mustCall((stream) => { - stream.session.destroy(); - - // Test that stream.state getter returns an empty object - // when the stream session has been destroyed - assert.deepStrictEqual({}, stream.state); - - // Test that ERR_HTTP2_INVALID_STREAM is thrown while calling - // stream operations after the stream session has been destroyed - const invalidStreamError = { - type: Error, - code: 'ERR_HTTP2_INVALID_STREAM', - message: 'The stream has been destroyed' - }; - common.expectsError(() => stream.additionalHeaders(), invalidStreamError); - common.expectsError(() => stream.priority(), invalidStreamError); - common.expectsError( - () => stream.pushStream({}, common.mustNotCall()), - invalidStreamError - ); - common.expectsError(() => stream.respond(), invalidStreamError); - common.expectsError(() => stream.write('data'), invalidStreamError); - - // Test that ERR_HTTP2_INVALID_SESSION is thrown while calling - // session operations after the stream session has been destroyed - const invalidSessionError = { - type: Error, - code: 'ERR_HTTP2_INVALID_SESSION', - message: 'The session has been destroyed' - }; - common.expectsError(() => stream.session.settings(), invalidSessionError); - common.expectsError(() => stream.session.shutdown(), invalidSessionError); - - // Wait for setImmediate call from destroy() to complete - // so that state.destroyed is set to true - setImmediate((session) => { - common.expectsError(() => session.settings(), invalidSessionError); - common.expectsError(() => session.shutdown(), invalidSessionError); - }, stream.session); - }) -); - -server.listen( - 0, - common.mustCall(() => { - const client = h2.connect(`http://localhost:${server.address().port}`); - const req = client.request(); - req.resume(); - req.on('end', common.mustCall(() => server.close())); - }) -); +server.on('stream', common.mustCall((stream) => { + assert(stream.session); + stream.session.destroy(); + assert.strictEqual(stream.session, undefined); + + // Test that stream.state getter returns an empty object + // when the stream session has been destroyed + assert.deepStrictEqual({}, stream.state); + + // Test that ERR_HTTP2_INVALID_STREAM is thrown while calling + // stream operations after the stream session has been destroyed + const invalidStreamError = { + type: Error, + code: 'ERR_HTTP2_INVALID_STREAM', + message: 'The stream has been destroyed' + }; + common.expectsError(() => stream.additionalHeaders(), invalidStreamError); + common.expectsError(() => stream.priority(), invalidStreamError); + common.expectsError(() => stream.respond(), invalidStreamError); + common.expectsError( + () => stream.pushStream({}, common.mustNotCall()), + { + code: 'ERR_HTTP2_PUSH_DISABLED', + type: Error + } + ); + assert.strictEqual(stream.write('data'), false); +})); + +server.listen(0, common.mustCall(() => { + const client = h2.connect(`http://localhost:${server.address().port}`); + client.on('error', () => {}); + const req = client.request(); + req.resume(); + req.on('end', common.mustCall()); + req.on('close', common.mustCall(() => server.close())); + req.on('error', () => {}); +})); diff --git a/test/parallel/test-http2-server-timeout.js b/test/parallel/test-http2-server-timeout.js index 28ab6efb87f6c1..581a409ce9171d 100755 --- a/test/parallel/test-http2-server-timeout.js +++ b/test/parallel/test-http2-server-timeout.js @@ -9,7 +9,7 @@ const server = http2.createServer(); server.setTimeout(common.platformTimeout(1)); const onServerTimeout = common.mustCall((session) => { - session.destroy(); + session.close(() => session.destroy()); }); server.on('stream', common.mustNotCall()); @@ -18,10 +18,14 @@ server.once('timeout', onServerTimeout); server.listen(0, common.mustCall(() => { const url = `http://localhost:${server.address().port}`; const client = http2.connect(url); + // Because of the timeout, an ECONRESET error may or may not happen here. + // Keep this as a non-op and do not use common.mustCall() + client.on('error', () => {}); client.on('close', common.mustCall(() => { - const client2 = http2.connect(url); + // Because of the timeout, an ECONRESET error may or may not happen here. + // Keep this as a non-op and do not use common.mustCall() + client2.on('error', () => {}); client2.on('close', common.mustCall(() => server.close())); - })); })); diff --git a/test/parallel/test-http2-session-gc-while-write-scheduled.js b/test/parallel/test-http2-session-gc-while-write-scheduled.js new file mode 100644 index 00000000000000..bb23760cebf967 --- /dev/null +++ b/test/parallel/test-http2-session-gc-while-write-scheduled.js @@ -0,0 +1,32 @@ +// Flags: --expose-gc + +'use strict'; +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); +const http2 = require('http2'); +const makeDuplexPair = require('../common/duplexpair'); + +// This tests that running garbage collection while an Http2Session has +// a write *scheduled*, it will survive that garbage collection. + +{ + // This creates a session and schedules a write (for the settings frame). + let client = http2.connect('http://localhost:80', { + createConnection: common.mustCall(() => makeDuplexPair().clientSide) + }); + + // First, wait for any nextTicks() and their responses + // from the `connect()` call to run. + tick(10, () => { + // This schedules a write. + client.settings(http2.getDefaultSettings()); + client = null; + global.gc(); + }); +} + +function tick(n, cb) { + if (n--) setImmediate(tick, n, cb); + else cb(); +} diff --git a/test/parallel/test-http2-session-settings.js b/test/parallel/test-http2-session-settings.js index 53ff44dd9ec2fe..75fcc1942104ac 100644 --- a/test/parallel/test-http2-session-settings.js +++ b/test/parallel/test-http2-session-settings.js @@ -68,71 +68,60 @@ server.listen( const req = client.request(headers); - req.on( - 'connect', - common.mustCall(() => { - // pendingSettingsAck will be true if a SETTINGS frame - // has been sent but we are still waiting for an acknowledgement - assert(client.pendingSettingsAck); - }) - ); + req.on('ready', common.mustCall(() => { + // pendingSettingsAck will be true if a SETTINGS frame + // has been sent but we are still waiting for an acknowledgement + assert(client.pendingSettingsAck); + })); // State will only be valid after connect event is emitted - req.on( - 'ready', - common.mustCall(() => { - assert.doesNotThrow(() => { - client.settings({ - maxHeaderListSize: 1 - }); - }); + req.on('ready', common.mustCall(() => { + assert.doesNotThrow(() => { + client.settings({ maxHeaderListSize: 1 }, common.mustCall()); + }); - // Verify valid error ranges - [ - ['headerTableSize', -1], - ['headerTableSize', 2 ** 32], - ['initialWindowSize', -1], - ['initialWindowSize', 2 ** 32], - ['maxFrameSize', 16383], - ['maxFrameSize', 2 ** 24], - ['maxHeaderListSize', -1], - ['maxHeaderListSize', 2 ** 32] - ].forEach((i) => { - const settings = {}; - settings[i[0]] = i[1]; - common.expectsError( - () => client.settings(settings), - { - type: RangeError, - code: 'ERR_HTTP2_INVALID_SETTING_VALUE', - message: `Invalid value for setting "${i[0]}": ${i[1]}` - } - ); - }); + // Verify valid error ranges + [ + ['headerTableSize', -1], + ['headerTableSize', 2 ** 32], + ['initialWindowSize', -1], + ['initialWindowSize', 2 ** 32], + ['maxFrameSize', 16383], + ['maxFrameSize', 2 ** 24], + ['maxHeaderListSize', -1], + ['maxHeaderListSize', 2 ** 32] + ].forEach((i) => { + const settings = {}; + settings[i[0]] = i[1]; + common.expectsError( + () => client.settings(settings), + { + type: RangeError, + code: 'ERR_HTTP2_INVALID_SETTING_VALUE', + message: `Invalid value for setting "${i[0]}": ${i[1]}` + } + ); + }); - // error checks for enablePush - [1, {}, 'test', [], null, Infinity, NaN].forEach((i) => { - common.expectsError( - () => client.settings({ enablePush: i }), - { - type: TypeError, - code: 'ERR_HTTP2_INVALID_SETTING_VALUE', - message: `Invalid value for setting "enablePush": ${i}` - } - ); - }); - }) - ); + // error checks for enablePush + [1, {}, 'test', [], null, Infinity, NaN].forEach((i) => { + common.expectsError( + () => client.settings({ enablePush: i }), + { + type: TypeError, + code: 'ERR_HTTP2_INVALID_SETTING_VALUE', + message: `Invalid value for setting "enablePush": ${i}` + } + ); + }); + })); req.on('response', common.mustCall()); req.resume(); - req.on( - 'end', - common.mustCall(() => { - server.close(); - client.destroy(); - }) - ); + req.on('end', common.mustCall(() => { + server.close(); + client.close(); + })); req.end(); }) ); diff --git a/test/parallel/test-http2-session-stream-state.js b/test/parallel/test-http2-session-stream-state.js index 9bbac3f482cbcf..612feb8cf1e2ca 100644 --- a/test/parallel/test-http2-session-stream-state.js +++ b/test/parallel/test-http2-session-stream-state.js @@ -57,7 +57,7 @@ server.on('listening', common.mustCall(() => { const req = client.request(headers); // State will only be valid after connect event is emitted - req.on('connect', common.mustCall(() => { + req.on('ready', common.mustCall(() => { // Test Stream State. { @@ -91,7 +91,7 @@ server.on('listening', common.mustCall(() => { req.resume(); req.on('end', common.mustCall(() => { server.close(); - client.destroy(); + client.close(); })); req.end(); diff --git a/test/parallel/test-http2-session-unref.js b/test/parallel/test-http2-session-unref.js new file mode 100644 index 00000000000000..e765352cdc615d --- /dev/null +++ b/test/parallel/test-http2-session-unref.js @@ -0,0 +1,53 @@ +'use strict'; +// Flags: --expose-internals + +// Tests that calling unref() on Http2Session: +// (1) Prevents it from keeping the process alive +// (2) Doesn't crash + +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); +const http2 = require('http2'); +const makeDuplexPair = require('../common/duplexpair'); + +const server = http2.createServer(); +const { clientSide, serverSide } = makeDuplexPair(); + +// 'session' event should be emitted 3 times: +// - the vanilla client +// - the destroyed client +// - manual 'connection' event emission with generic Duplex stream +server.on('session', common.mustCallAtLeast((session) => { + session.unref(); +}, 3)); + +server.listen(0, common.mustCall(() => { + const port = server.address().port; + + // unref new client + { + const client = http2.connect(`http://localhost:${port}`); + client.unref(); + } + + // unref destroyed client + { + const client = http2.connect(`http://localhost:${port}`); + client.destroy(); + client.unref(); + } + + // unref destroyed client + { + const client = http2.connect(`http://localhost:${port}`, { + createConnection: common.mustCall(() => clientSide) + }); + client.destroy(); + client.unref(); + } +})); +server.emit('connection', serverSide); +server.unref(); + +setTimeout(common.mustNotCall(() => {}), 1000).unref(); diff --git a/test/parallel/test-http2-settings-unsolicited-ack.js b/test/parallel/test-http2-settings-unsolicited-ack.js new file mode 100644 index 00000000000000..fa63e9ee3f6425 --- /dev/null +++ b/test/parallel/test-http2-settings-unsolicited-ack.js @@ -0,0 +1,50 @@ +'use strict'; + +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); + +const assert = require('assert'); +const http2 = require('http2'); +const net = require('net'); +const http2util = require('../common/http2'); +const Countdown = require('../common/countdown'); + +// Test that an unsolicited settings ack is ignored. + +const kSettings = new http2util.SettingsFrame(); +const kSettingsAck = new http2util.SettingsFrame(true); + +const server = http2.createServer(); +let client; + +const countdown = new Countdown(3, () => { + client.destroy(); + server.close(); +}); + +server.on('stream', common.mustNotCall()); +server.on('session', common.mustCall((session) => { + session.on('remoteSettings', common.mustCall(() => countdown.dec())); +})); + +server.listen(0, common.mustCall(() => { + client = net.connect(server.address().port); + + // Ensures that the clients settings frames are not sent until the + // servers are received, so that the first ack is actually expected. + client.once('data', (chunk) => { + // The very first chunk of data we get from the server should + // be a settings frame. + assert.deepStrictEqual(chunk.slice(0, 9), kSettings.data); + // The first ack is expected. + client.write(kSettingsAck.data, () => countdown.dec()); + // The second one is not and will be ignored. + client.write(kSettingsAck.data, () => countdown.dec()); + }); + + client.on('connect', common.mustCall(() => { + client.write(http2util.kClientMagic); + client.write(kSettings.data); + })); +})); diff --git a/test/parallel/test-http2-short-stream-client-server.js b/test/parallel/test-http2-short-stream-client-server.js new file mode 100644 index 00000000000000..e632b8d96b9ea9 --- /dev/null +++ b/test/parallel/test-http2-short-stream-client-server.js @@ -0,0 +1,55 @@ +'use strict'; + +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); +const assert = require('assert'); +const http2 = require('http2'); +const { Readable } = require('stream'); + +const server = http2.createServer(); +server.on('stream', common.mustCall((stream) => { + stream.respond({ + ':status': 200, + 'content-type': 'text/html' + }); + const input = new Readable({ + read() { + this.push('test'); + this.push(null); + } + }); + input.pipe(stream); +})); + + +server.listen(0, common.mustCall(() => { + const port = server.address().port; + const client = http2.connect(`http://localhost:${port}`); + + const req = client.request(); + + req.on('response', common.mustCall((headers) => { + assert.strictEqual(headers[':status'], 200); + assert.strictEqual(headers['content-type'], 'text/html'); + })); + + let data = ''; + + const notCallClose = common.mustNotCall(); + + setTimeout(() => { + req.setEncoding('utf8'); + req.removeListener('close', notCallClose); + req.on('close', common.mustCall(() => { + server.close(); + client.close(); + })); + req.on('data', common.mustCallAtLeast((d) => data += d)); + req.on('end', common.mustCall(() => { + assert.strictEqual(data, 'test'); + })); + }, common.platformTimeout(100)); + + req.on('close', notCallClose); +})); diff --git a/test/parallel/test-http2-shutdown-errors.js b/test/parallel/test-http2-shutdown-errors.js deleted file mode 100644 index 638f9a60f2c395..00000000000000 --- a/test/parallel/test-http2-shutdown-errors.js +++ /dev/null @@ -1,76 +0,0 @@ -// Flags: --expose-http2 -'use strict'; - -const common = require('../common'); -if (!common.hasCrypto) - common.skip('missing crypto'); -const http2 = require('http2'); -const { - constants, - Http2Session, - nghttp2ErrorString -} = process.binding('http2'); - -// tests error handling within shutdown -// - should emit ERR_HTTP2_ERROR on session for all errors - -const tests = Object.getOwnPropertyNames(constants) - .filter((key) => ( - key.indexOf('NGHTTP2_ERR') === 0 - )) - .map((key) => ({ - ngError: constants[key], - error: { - code: 'ERR_HTTP2_ERROR', - type: Error, - message: nghttp2ErrorString(constants[key]) - } - })); - -let currentError; - -// mock submitGoaway because we only care about testing error handling -Http2Session.prototype.goaway = () => currentError.ngError; - -const server = http2.createServer(); -server.on('stream', common.mustCall((stream, headers) => { - const errorMustCall = common.expectsError(currentError.error); - const errorMustNotCall = common.mustNotCall( - `${currentError.error.code} should emit on session` - ); - - stream.session.once('error', errorMustCall); - stream.on('error', errorMustNotCall); - - stream.session.shutdown(); -}, tests.length)); - -server.listen(0, common.mustCall(() => runTest(tests.shift()))); - -function runTest(test) { - const port = server.address().port; - const url = `http://localhost:${port}`; - const headers = { - ':path': '/', - ':method': 'POST', - ':scheme': 'http', - ':authority': `localhost:${port}` - }; - - const client = http2.connect(url); - const req = client.request(headers); - - currentError = test; - req.resume(); - req.end(); - - req.on('end', common.mustCall(() => { - client.destroy(); - - if (!tests.length) { - server.close(); - } else { - runTest(tests.shift()); - } - })); -} diff --git a/test/parallel/test-http2-single-headers.js b/test/parallel/test-http2-single-headers.js index bb2f57cba1a939..c545b065015050 100644 --- a/test/parallel/test-http2-single-headers.js +++ b/test/parallel/test-http2-single-headers.js @@ -26,35 +26,26 @@ server.on('stream', common.mustNotCall()); server.listen(0, common.mustCall(() => { const client = http2.connect(`http://localhost:${server.address().port}`); - let remaining = singles.length * 2; - function maybeClose() { - if (--remaining === 0) { - server.close(); - client.destroy(); - } - } - singles.forEach((i) => { - const req = client.request({ - [i]: 'abc', - [i.toUpperCase()]: 'xyz' - }); - req.on('error', common.expectsError({ - code: 'ERR_HTTP2_HEADER_SINGLE_VALUE', - type: Error, - message: `Header field "${i}" must have only a single value` - })); - req.on('error', common.mustCall(maybeClose)); - - const req2 = client.request({ - [i]: ['abc', 'xyz'] - }); - req2.on('error', common.expectsError({ - code: 'ERR_HTTP2_HEADER_SINGLE_VALUE', - type: Error, - message: `Header field "${i}" must have only a single value` - })); - req2.on('error', common.mustCall(maybeClose)); + common.expectsError( + () => client.request({ [i]: 'abc', [i.toUpperCase()]: 'xyz' }), + { + code: 'ERR_HTTP2_HEADER_SINGLE_VALUE', + type: Error, + message: `Header field "${i}" must have only a single value` + } + ); + + common.expectsError( + () => client.request({ [i]: ['abc', 'xyz'] }), + { + code: 'ERR_HTTP2_HEADER_SINGLE_VALUE', + type: Error, + message: `Header field "${i}" must have only a single value` + } + ); }); + server.close(); + client.close(); })); diff --git a/test/parallel/test-http2-socket-proxy.js b/test/parallel/test-http2-socket-proxy.js index 60f31837790d51..17830495addc63 100644 --- a/test/parallel/test-http2-socket-proxy.js +++ b/test/parallel/test-http2-socket-proxy.js @@ -57,7 +57,7 @@ server.on('stream', common.mustCall(function(stream, headers) { assert.strictEqual(socket.writable, 0); assert.strictEqual(socket.readable, 0); - stream.session.destroy(); + stream.end(); socket.setTimeout = undefined; assert.strictEqual(session.setTimeout, undefined); @@ -71,18 +71,11 @@ server.listen(0, common.mustCall(function() { const port = server.address().port; const url = `http://localhost:${port}`; const client = h2.connect(url, common.mustCall(() => { - const headers = { - ':path': '/', - ':method': 'GET', - ':scheme': 'http', - ':authority': `localhost:${port}` - }; - const request = client.request(headers); + const request = client.request(); request.on('end', common.mustCall(() => { - client.destroy(); + client.close(); server.close(); })); - request.end(); request.resume(); })); })); diff --git a/test/parallel/test-http2-status-code-invalid.js b/test/parallel/test-http2-status-code-invalid.js index 3a0d882dea19da..3337aad32d7f70 100644 --- a/test/parallel/test-http2-status-code-invalid.js +++ b/test/parallel/test-http2-status-code-invalid.js @@ -36,6 +36,6 @@ server.listen(0, common.mustCall(() => { req.resume(); req.on('end', common.mustCall(() => { server.close(); - client.destroy(); + client.close(); })); })); diff --git a/test/parallel/test-http2-status-code.js b/test/parallel/test-http2-status-code.js index e8f64da368a5f5..d3642b4ff0217f 100644 --- a/test/parallel/test-http2-status-code.js +++ b/test/parallel/test-http2-status-code.js @@ -22,7 +22,7 @@ server.listen(0, common.mustCall(() => { let remaining = codes.length; function maybeClose() { if (--remaining === 0) { - client.destroy(); + client.close(); server.close(); } } diff --git a/test/parallel/test-http2-stream-client.js b/test/parallel/test-http2-stream-client.js index aa722c5ff2b6d9..3e6c6b2a8a1b5e 100644 --- a/test/parallel/test-http2-stream-client.js +++ b/test/parallel/test-http2-stream-client.js @@ -22,7 +22,7 @@ server.listen(0, common.mustCall(() => { const req = client.request(); req.resume(); req.on('close', common.mustCall(() => { - client.destroy(); + client.close(); server.close(); })); })); diff --git a/test/parallel/test-http2-stream-destroy-event-order.js b/test/parallel/test-http2-stream-destroy-event-order.js index 6db511f7d11c59..7d4bcb102f0d0a 100644 --- a/test/parallel/test-http2-stream-destroy-event-order.js +++ b/test/parallel/test-http2-stream-destroy-event-order.js @@ -4,26 +4,27 @@ const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); -const assert = require('assert'); const http2 = require('http2'); let client; let req; const server = http2.createServer(); server.on('stream', common.mustCall((stream) => { - stream.on('error', common.mustCall(() => { - stream.on('close', common.mustCall((code) => { - assert.strictEqual(code, 2); - client.destroy(); + stream.on('close', common.mustCall(() => { + stream.on('error', common.mustCall(() => { server.close(); })); })); - req.rstStream(2); + req.close(2); })); server.listen(0, common.mustCall(() => { client = http2.connect(`http://localhost:${server.address().port}`); req = client.request(); req.resume(); - req.on('error', common.mustCall()); + req.on('close', common.mustCall(() => { + req.on('error', common.mustCall(() => { + client.close(); + })); + })); })); diff --git a/test/parallel/test-http2-timeouts.js b/test/parallel/test-http2-timeouts.js index 88ddfcf54c3fa9..c6a7676a472655 100755 --- a/test/parallel/test-http2-timeouts.js +++ b/test/parallel/test-http2-timeouts.js @@ -51,7 +51,7 @@ server.on('listening', common.mustCall(() => { req.resume(); req.on('end', common.mustCall(() => { server.close(); - client.destroy(); + client.close(); })); req.end(); })); diff --git a/test/parallel/test-http2-too-large-headers.js b/test/parallel/test-http2-too-large-headers.js index f7ac25170b846f..7a7082736160f8 100644 --- a/test/parallel/test-http2-too-large-headers.js +++ b/test/parallel/test-http2-too-large-headers.js @@ -25,7 +25,7 @@ server.listen(0, common.mustCall(() => { req.on('close', common.mustCall((code) => { assert.strictEqual(code, NGHTTP2_ENHANCE_YOUR_CALM); server.close(); - client.destroy(); + client.close(); })); }); diff --git a/test/parallel/test-http2-too-many-headers.js b/test/parallel/test-http2-too-many-headers.js index eff0fa9c351c32..f05511cff657e0 100644 --- a/test/parallel/test-http2-too-many-headers.js +++ b/test/parallel/test-http2-too-many-headers.js @@ -28,7 +28,7 @@ server.listen(0, common.mustCall(() => { req.on('close', common.mustCall((code) => { assert.strictEqual(code, NGHTTP2_ENHANCE_YOUR_CALM); server.close(); - client.destroy(); + client.close(); })); })); diff --git a/test/parallel/test-http2-too-many-settings.js b/test/parallel/test-http2-too-many-settings.js index 4feda98c05d522..0302fe623da07c 100644 --- a/test/parallel/test-http2-too-many-settings.js +++ b/test/parallel/test-http2-too-many-settings.js @@ -1,7 +1,7 @@ 'use strict'; // Tests that attempting to send too many non-acknowledged -// settings frames will result in a throw. +// settings frames will result in an error const common = require('../common'); if (!common.hasCrypto) @@ -9,53 +9,41 @@ if (!common.hasCrypto) const assert = require('assert'); const h2 = require('http2'); -const maxPendingAck = 2; -const server = h2.createServer({ maxPendingAck: maxPendingAck + 1 }); - -let clients = 2; +const maxOutstandingSettings = 2; function doTest(session) { - for (let n = 0; n < maxPendingAck; n++) - assert.doesNotThrow(() => session.settings({ enablePush: false })); - assert.throws(() => session.settings({ enablePush: false }), - common.expectsError({ - code: 'ERR_HTTP2_MAX_PENDING_SETTINGS_ACK', - type: Error - })); + session.on('error', common.expectsError({ + code: 'ERR_HTTP2_MAX_PENDING_SETTINGS_ACK', + type: Error + })); + for (let n = 0; n < maxOutstandingSettings; n++) { + session.settings({ enablePush: false }); + assert.strictEqual(session.pendingSettingsAck, true); + } } -server.on('stream', common.mustNotCall()); - -server.once('session', common.mustCall((session) => doTest(session))); - -server.listen(0); - -const closeServer = common.mustCall(() => { - if (--clients === 0) - server.close(); -}, clients); - -server.on('listening', common.mustCall(() => { - const client = h2.connect(`http://localhost:${server.address().port}`, - { maxPendingAck: maxPendingAck + 1 }); - let remaining = maxPendingAck + 1; - - client.on('close', closeServer); - client.on('localSettings', common.mustCall(() => { - if (--remaining <= 0) { - client.destroy(); - } - }, maxPendingAck + 1)); - client.on('connect', common.mustCall(() => doTest(client))); -})); +{ + const server = h2.createServer({ maxOutstandingSettings }); + server.on('stream', common.mustNotCall()); + server.once('session', common.mustCall((session) => doTest(session))); + + server.listen(0, common.mustCall(() => { + const client = h2.connect(`http://localhost:${server.address().port}`); + // On some operating systems, an ECONNRESET error may be emitted. + // On others it won't be. Do not make this a mustCall + client.on('error', () => {}); + client.on('close', common.mustCall(() => server.close())); + })); +} -// Setting maxPendingAck to 0, defaults it to 1 -server.on('listening', common.mustCall(() => { - const client = h2.connect(`http://localhost:${server.address().port}`, - { maxPendingAck: 0 }); +{ + const server = h2.createServer(); + server.on('stream', common.mustNotCall()); - client.on('close', closeServer); - client.on('localSettings', common.mustCall(() => { - client.destroy(); + server.listen(0, common.mustCall(() => { + const client = h2.connect(`http://localhost:${server.address().port}`, + { maxOutstandingSettings }); + client.on('connect', () => doTest(client)); + client.on('close', () => server.close()); })); -})); +} diff --git a/test/parallel/test-http2-too-many-streams.js b/test/parallel/test-http2-too-many-streams.js new file mode 100644 index 00000000000000..a4a67befa0f50a --- /dev/null +++ b/test/parallel/test-http2-too-many-streams.js @@ -0,0 +1,60 @@ +'use strict'; + +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); + +const Countdown = require('../common/countdown'); +const http2 = require('http2'); +const assert = require('assert'); + +// Test that the maxConcurrentStreams setting is strictly enforced + +const server = http2.createServer({ settings: { maxConcurrentStreams: 1 } }); + +let c = 0; + +server.on('stream', common.mustCall((stream) => { + // Because we only allow one open stream at a time, + // c should never be greater than 1. + assert.strictEqual(++c, 1); + stream.respond(); + // Force some asynchronos stuff. + setImmediate(() => { + stream.end('ok'); + assert.strictEqual(--c, 0); + }); +}, 3)); + +server.listen(0, common.mustCall(() => { + const client = http2.connect(`http://localhost:${server.address().port}`); + + const countdown = new Countdown(3, common.mustCall(() => { + server.close(); + client.destroy(); + })); + + client.on('remoteSettings', common.mustCall(() => { + assert.strictEqual(client.remoteSettings.maxConcurrentStreams, 1); + + { + const req = client.request(); + req.resume(); + req.on('close', () => { + countdown.dec(); + + setImmediate(() => { + const req = client.request(); + req.resume(); + req.on('close', () => countdown.dec()); + }); + }); + } + + { + const req = client.request(); + req.resume(); + req.on('close', () => countdown.dec()); + } + })); +})); diff --git a/test/parallel/test-http2-trailers.js b/test/parallel/test-http2-trailers.js index 5e0db6a30c3db0..1ca5bdf70d05b0 100644 --- a/test/parallel/test-http2-trailers.js +++ b/test/parallel/test-http2-trailers.js @@ -45,7 +45,7 @@ server.on('listening', common.mustCall(function() { })); req.on('end', common.mustCall(() => { server.close(); - client.destroy(); + client.close(); })); req.end('data'); diff --git a/test/parallel/test-http2-util-headers-list.js b/test/parallel/test-http2-util-headers-list.js index 0bbe1972d07981..0ff6b558d9a51b 100644 --- a/test/parallel/test-http2-util-headers-list.js +++ b/test/parallel/test-http2-util-headers-list.js @@ -5,6 +5,8 @@ // to pass to the internal binding layer. const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); const assert = require('assert'); const { mapToHeaders } = require('internal/http2/util'); diff --git a/test/parallel/test-http2-util-update-options-buffer.js b/test/parallel/test-http2-util-update-options-buffer.js index 4388d55682a54b..6ab8bcff02866e 100644 --- a/test/parallel/test-http2-util-update-options-buffer.js +++ b/test/parallel/test-http2-util-update-options-buffer.js @@ -1,7 +1,9 @@ // Flags: --expose-internals 'use strict'; -require('../common'); +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); // Test coverage for the updateOptionsBuffer method used internally // by the http2 implementation. @@ -17,7 +19,9 @@ const IDX_OPTIONS_PEER_MAX_CONCURRENT_STREAMS = 3; const IDX_OPTIONS_PADDING_STRATEGY = 4; const IDX_OPTIONS_MAX_HEADER_LIST_PAIRS = 5; const IDX_OPTIONS_MAX_OUTSTANDING_PINGS = 6; -const IDX_OPTIONS_FLAGS = 7; +const IDX_OPTIONS_MAX_OUTSTANDING_SETTINGS = 7; +const IDX_OPTIONS_MAX_SESSION_MEMORY = 8; +const IDX_OPTIONS_FLAGS = 9; { updateOptionsBuffer({ @@ -27,7 +31,9 @@ const IDX_OPTIONS_FLAGS = 7; peerMaxConcurrentStreams: 4, paddingStrategy: 5, maxHeaderListPairs: 6, - maxOutstandingPings: 7 + maxOutstandingPings: 7, + maxOutstandingSettings: 8, + maxSessionMemory: 9 }); strictEqual(optionsBuffer[IDX_OPTIONS_MAX_DEFLATE_DYNAMIC_TABLE_SIZE], 1); @@ -37,6 +43,8 @@ const IDX_OPTIONS_FLAGS = 7; strictEqual(optionsBuffer[IDX_OPTIONS_PADDING_STRATEGY], 5); strictEqual(optionsBuffer[IDX_OPTIONS_MAX_HEADER_LIST_PAIRS], 6); strictEqual(optionsBuffer[IDX_OPTIONS_MAX_OUTSTANDING_PINGS], 7); + strictEqual(optionsBuffer[IDX_OPTIONS_MAX_OUTSTANDING_SETTINGS], 8); + strictEqual(optionsBuffer[IDX_OPTIONS_MAX_SESSION_MEMORY], 9); const flags = optionsBuffer[IDX_OPTIONS_FLAGS]; @@ -47,6 +55,7 @@ const IDX_OPTIONS_FLAGS = 7; ok(flags & (1 << IDX_OPTIONS_PADDING_STRATEGY)); ok(flags & (1 << IDX_OPTIONS_MAX_HEADER_LIST_PAIRS)); ok(flags & (1 << IDX_OPTIONS_MAX_OUTSTANDING_PINGS)); + ok(flags & (1 << IDX_OPTIONS_MAX_OUTSTANDING_SETTINGS)); } { diff --git a/test/parallel/test-http2-window-size.js b/test/parallel/test-http2-window-size.js index 381416c0d23cc6..3d1c14de847e48 100644 --- a/test/parallel/test-http2-window-size.js +++ b/test/parallel/test-http2-window-size.js @@ -67,7 +67,7 @@ function run(buffers, initialWindowSize) { const actualBuffer = Buffer.concat(responses); assert.strictEqual(Buffer.compare(actualBuffer, expectedBuffer), 0); // shut down - client.destroy(); + client.close(); server.close(() => { resolve(); }); diff --git a/test/parallel/test-http2-write-callbacks.js b/test/parallel/test-http2-write-callbacks.js index 44e33573a680b6..eca7f00ea7e292 100644 --- a/test/parallel/test-http2-write-callbacks.js +++ b/test/parallel/test-http2-write-callbacks.js @@ -31,7 +31,7 @@ server.listen(0, common.mustCall(() => { req.on('data', (chunk) => actual += chunk); req.on('end', common.mustCall(() => assert.strictEqual(actual, 'abcxyz'))); req.on('close', common.mustCall(() => { - client.destroy(); + client.close(); server.close(); })); })); diff --git a/test/parallel/test-http2-write-empty-string.js b/test/parallel/test-http2-write-empty-string.js index fea261917187da..6e6ce5254ddcfc 100644 --- a/test/parallel/test-http2-write-empty-string.js +++ b/test/parallel/test-http2-write-empty-string.js @@ -34,7 +34,7 @@ server.listen(0, common.mustCall(function() { req.on('end', common.mustCall(function() { assert.strictEqual('1\n2\n3\n', res); - client.destroy(); + client.close(); })); req.end(); diff --git a/test/parallel/test-http2-write-finishes-after-stream-destroy.js b/test/parallel/test-http2-write-finishes-after-stream-destroy.js new file mode 100644 index 00000000000000..3b2dd4bcd4e548 --- /dev/null +++ b/test/parallel/test-http2-write-finishes-after-stream-destroy.js @@ -0,0 +1,62 @@ +// Flags: --expose-gc +'use strict'; +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); +const assert = require('assert'); +const http2 = require('http2'); +const makeDuplexPair = require('../common/duplexpair'); + +// Make sure the Http2Stream destructor works, since we don't clean the +// stream up like we would otherwise do. +process.on('exit', global.gc); + +{ + const { clientSide, serverSide } = makeDuplexPair(); + + let serverSideHttp2Stream; + let serverSideHttp2StreamDestroyed = false; + const server = http2.createServer(); + server.on('stream', common.mustCall((stream, headers) => { + serverSideHttp2Stream = stream; + stream.respond({ + 'content-type': 'text/html', + ':status': 200 + }); + + const originalWrite = serverSide._write; + serverSide._write = (buf, enc, cb) => { + if (serverSideHttp2StreamDestroyed) { + serverSide.destroy(); + serverSide.write = () => {}; + } else { + setImmediate(() => { + originalWrite.call(serverSide, buf, enc, () => setImmediate(cb)); + }); + } + }; + + // Enough data to fit into a single *session* window, + // not enough data to fit into a single *stream* window. + stream.write(Buffer.alloc(40000)); + })); + + server.emit('connection', serverSide); + + const client = http2.connect('http://localhost:80', { + createConnection: common.mustCall(() => clientSide) + }); + + const req = client.request({ ':path': '/' }); + + req.on('response', common.mustCall((headers) => { + assert.strictEqual(headers[':status'], 200); + })); + + req.on('data', common.mustCallAtLeast(() => { + if (!serverSideHttp2StreamDestroyed) { + serverSideHttp2Stream.destroy(); + serverSideHttp2StreamDestroyed = true; + } + })); +} diff --git a/test/parallel/test-http2-zero-length-write.js b/test/parallel/test-http2-zero-length-write.js index 899c28bace6f53..0b50715330a1c4 100644 --- a/test/parallel/test-http2-zero-length-write.js +++ b/test/parallel/test-http2-zero-length-write.js @@ -41,11 +41,12 @@ server.listen(0, common.mustCall(() => { let actual = ''; const req = client.request({ ':method': 'POST' }); req.on('response', common.mustCall()); + req.setEncoding('utf8'); req.on('data', (chunk) => actual += chunk); req.on('end', common.mustCall(() => { assert.strictEqual(actual, expect); server.close(); - client.destroy(); + client.close(); })); getSrc().pipe(req); })); diff --git a/test/parallel/test-https-agent-secure-protocol.js b/test/parallel/test-https-agent-secure-protocol.js index 4963b55febe345..82554952e8446b 100644 --- a/test/parallel/test-https-agent-secure-protocol.js +++ b/test/parallel/test-https-agent-secure-protocol.js @@ -42,7 +42,7 @@ server.listen(0, common.mustCall(function() { }, common.mustCall(function(res) { res.resume(); globalAgent.once('free', common.mustCall(function() { - // Verify that two keep-alived connections are created + // Verify that two keep-alive connections are created // due to the different secureProtocol settings: const keys = Object.keys(globalAgent.freeSockets); assert.strictEqual(keys.length, 2); diff --git a/test/parallel/test-https-socket-options.js b/test/parallel/test-https-socket-options.js index 2ba1b883684205..b41054d5aa0824 100644 --- a/test/parallel/test-https-socket-options.js +++ b/test/parallel/test-https-socket-options.js @@ -61,7 +61,7 @@ server_http.listen(0, function() { }); // Then try https server (requires functions to be -// mirroed in tls.js's CryptoStream) +// mirrored in tls.js's CryptoStream) const server_https = https.createServer(options, function(req, res) { console.log('got HTTPS request'); diff --git a/test/parallel/test-https-unix-socket-self-signed.js b/test/parallel/test-https-unix-socket-self-signed.js index 6e7cf827472a88..48207a7a22f52a 100644 --- a/test/parallel/test-https-unix-socket-self-signed.js +++ b/test/parallel/test-https-unix-socket-self-signed.js @@ -4,7 +4,8 @@ const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); const fixtures = require('../common/fixtures'); const https = require('https'); diff --git a/test/parallel/test-internal-fs-syncwritestream.js b/test/parallel/test-internal-fs-syncwritestream.js index 166692f4e6236e..c474d21cb43826 100644 --- a/test/parallel/test-internal-fs-syncwritestream.js +++ b/test/parallel/test-internal-fs-syncwritestream.js @@ -7,9 +7,10 @@ const fs = require('fs'); const path = require('path'); const SyncWriteStream = require('internal/fs').SyncWriteStream; -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); -const filename = path.join(common.tmpDir, 'sync-write-stream.txt'); +const filename = path.join(tmpdir.path, 'sync-write-stream.txt'); // Verify constructing the instance with default options. { @@ -31,7 +32,7 @@ const filename = path.join(common.tmpDir, 'sync-write-stream.txt'); assert.strictEqual(stream.listenerCount('end'), 1); } -// Verfiy that the file will be written synchronously. +// Verify that the file will be written synchronously. { const fd = fs.openSync(filename, 'w'); const stream = new SyncWriteStream(fd); @@ -54,7 +55,7 @@ const filename = path.join(common.tmpDir, 'sync-write-stream.txt'); assert.strictEqual(stream.destroySoon(), true); } -// Verfit that the 'end' event listener will also destroy the stream. +// Verify that the 'end' event listener will also destroy the stream. { const fd = fs.openSync(filename, 'w'); const stream = new SyncWriteStream(fd); diff --git a/test/parallel/test-module-circular-symlinks.js b/test/parallel/test-module-circular-symlinks.js index b5e04a9c622da8..e8d80640df0b17 100644 --- a/test/parallel/test-module-circular-symlinks.js +++ b/test/parallel/test-module-circular-symlinks.js @@ -29,8 +29,9 @@ const fs = require('fs'); // └── node_modules // └── moduleA -> {tmpDir}/node_modules/moduleA -common.refreshTmpDir(); -const tmpDir = common.tmpDir; +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); +const tmpDir = tmpdir.path; const node_modules = path.join(tmpDir, 'node_modules'); const moduleA = path.join(node_modules, 'moduleA'); diff --git a/test/parallel/test-module-loading-globalpaths.js b/test/parallel/test-module-loading-globalpaths.js index cd3144f8cd41a4..e3c36cb21c202e 100644 --- a/test/parallel/test-module-loading-globalpaths.js +++ b/test/parallel/test-module-loading-globalpaths.js @@ -10,10 +10,11 @@ const pkgName = 'foo'; if (process.argv[2] === 'child') { console.log(require(pkgName).string); } else { - common.refreshTmpDir(); + const tmpdir = require('../common/tmpdir'); + tmpdir.refresh(); // Copy node binary into a test $PREFIX directory. - const prefixPath = path.join(common.tmpDir, 'install'); + const prefixPath = path.join(tmpdir.path, 'install'); fs.mkdirSync(prefixPath); let testExecPath; if (common.isWindows) { @@ -43,7 +44,7 @@ if (process.argv[2] === 'child') { delete env['NODE_PATH']; // Test empty global path. - const noPkgHomeDir = path.join(common.tmpDir, 'home-no-pkg'); + const noPkgHomeDir = path.join(tmpdir.path, 'home-no-pkg'); fs.mkdirSync(noPkgHomeDir); env['HOME'] = env['USERPROFILE'] = noPkgHomeDir; assert.throws( diff --git a/test/parallel/test-module-symlinked-peer-modules.js b/test/parallel/test-module-symlinked-peer-modules.js index e3d538c42b0bf6..f93dea720f9a12 100644 --- a/test/parallel/test-module-symlinked-peer-modules.js +++ b/test/parallel/test-module-symlinked-peer-modules.js @@ -13,9 +13,10 @@ const fs = require('fs'); const path = require('path'); const assert = require('assert'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); -const tmpDir = common.tmpDir; +const tmpDir = tmpdir.path; // Creates the following structure // {tmpDir} diff --git a/test/parallel/test-net-better-error-messages-port-hostname.js b/test/parallel/test-net-better-error-messages-port-hostname.js index 818ea4bfff41f6..1a8aa770b44a22 100644 --- a/test/parallel/test-net-better-error-messages-port-hostname.js +++ b/test/parallel/test-net-better-error-messages-port-hostname.js @@ -1,21 +1,30 @@ 'use strict'; + +// This tests that the error thrown from net.createConnection +// comes with host and port properties. +// See https://github.com/nodejs/node-v0.x-archive/issues/7005 + const common = require('../common'); const net = require('net'); const assert = require('assert'); +const { addresses } = require('../common/internet'); +const { + errorLookupMock, + mockedErrorCode +} = require('../common/dns'); + // Using port 0 as hostname used is already invalid. -const c = net.createConnection(0, 'this.hostname.is.invalid'); +const c = net.createConnection({ + port: 0, + host: addresses.INVALID_HOST, + lookup: common.mustCall(errorLookupMock()) +}); c.on('connect', common.mustNotCall()); c.on('error', common.mustCall(function(e) { - // If Name Service Switch is available on the operating system then it - // might be configured differently (/etc/nsswitch.conf). - // If the system is configured with no dns the error code will be EAI_AGAIN, - // but if there are more services after the dns entry, for example some - // linux distributions ship a myhostname service by default which would - // still produce the ENOTFOUND error. - assert.ok(e.code === 'ENOTFOUND' || e.code === 'EAI_AGAIN'); + assert.strictEqual(e.code, mockedErrorCode); assert.strictEqual(e.port, 0); - assert.strictEqual(e.hostname, 'this.hostname.is.invalid'); + assert.strictEqual(e.hostname, addresses.INVALID_HOST); })); diff --git a/test/parallel/test-net-connect-immediate-finish.js b/test/parallel/test-net-connect-immediate-finish.js index e2e5e1c6715b9a..27d988ab5af45f 100644 --- a/test/parallel/test-net-connect-immediate-finish.js +++ b/test/parallel/test-net-connect-immediate-finish.js @@ -20,28 +20,35 @@ // USE OR OTHER DEALINGS IN THE SOFTWARE. 'use strict'; + +// This tests that if the socket is still in the 'connecting' state +// when the user calls socket.end() ('finish'), the socket would emit +// 'connect' and defer the handling until the 'connect' event is handled. + const common = require('../common'); const assert = require('assert'); const net = require('net'); +const { addresses } = require('../common/internet'); +const { + errorLookupMock, + mockedErrorCode, + mockedSysCall +} = require('../common/dns'); + const client = net.connect({ - host: 'this.hostname.is.invalid', - port: common.PORT -}); + host: addresses.INVALID_HOST, + port: 80, // port number doesn't matter because host name is invalid + lookup: common.mustCall(errorLookupMock()) +}, common.mustNotCall()); client.once('error', common.mustCall((err) => { assert(err); assert.strictEqual(err.code, err.errno); - // If Name Service Switch is available on the operating system then it - // might be configured differently (/etc/nsswitch.conf). - // If the system is configured with no dns the error code will be EAI_AGAIN, - // but if there are more services after the dns entry, for example some - // linux distributions ship a myhostname service by default which would - // still produce the ENOTFOUND error. - assert.ok(err.code === 'ENOTFOUND' || err.code === 'EAI_AGAIN'); + assert.strictEqual(err.code, mockedErrorCode); assert.strictEqual(err.host, err.hostname); - assert.strictEqual(err.host, 'this.hostname.is.invalid'); - assert.strictEqual(err.syscall, 'getaddrinfo'); + assert.strictEqual(err.host, addresses.INVALID_HOST); + assert.strictEqual(err.syscall, mockedSysCall); })); client.end(); diff --git a/test/parallel/test-net-connect-options-fd.js b/test/parallel/test-net-connect-options-fd.js index 50c2a08efeb194..76a5e30755b15c 100644 --- a/test/parallel/test-net-connect-options-fd.js +++ b/test/parallel/test-net-connect-options-fd.js @@ -8,7 +8,8 @@ const net = require('net'); const path = require('path'); const { Pipe, constants: PipeConstants } = process.binding('pipe_wrap'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); function testClients(getSocketOpt, getConnectOpt, getConnectCb) { const cloneOptions = (index) => diff --git a/test/parallel/test-net-connect-options-path.js b/test/parallel/test-net-connect-options-path.js index 3868b85a78a6d2..9a2737c371bbf5 100644 --- a/test/parallel/test-net-connect-options-path.js +++ b/test/parallel/test-net-connect-options-path.js @@ -5,7 +5,8 @@ const net = require('net'); // This file tests the option handling of net.connect, // net.createConnect, and new Socket().connect -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); const CLIENT_VARIANTS = 12; diff --git a/test/parallel/test-net-dns-error.js b/test/parallel/test-net-dns-error.js index beebcd8cb9cf44..a5ae415592fed4 100644 --- a/test/parallel/test-net-dns-error.js +++ b/test/parallel/test-net-dns-error.js @@ -27,17 +27,21 @@ const net = require('net'); const host = '*'.repeat(256); +let errCode = 'ENOTFOUND'; +if (common.isOpenBSD) + errCode = 'EAI_FAIL'; + function do_not_call() { throw new Error('This function should not have been called.'); } const socket = net.connect(42, host, do_not_call); socket.on('error', common.mustCall(function(err) { - assert.strictEqual(err.code, 'ENOTFOUND'); + assert.strictEqual(err.code, errCode); })); socket.on('lookup', function(err, ip, type) { assert(err instanceof Error); - assert.strictEqual(err.code, 'ENOTFOUND'); + assert.strictEqual(err.code, errCode); assert.strictEqual(ip, undefined); assert.strictEqual(type, undefined); }); diff --git a/test/parallel/test-net-listen-error.js b/test/parallel/test-net-listen-error.js index 26a74a72c38b85..05ca799d3e7351 100644 --- a/test/parallel/test-net-listen-error.js +++ b/test/parallel/test-net-listen-error.js @@ -25,5 +25,5 @@ const net = require('net'); const server = net.createServer(function(socket) { }); -server.listen(1, '1.1.1.1', common.mustNotCall()); // EACCESS or EADDRNOTAVAIL +server.listen(1, '1.1.1.1', common.mustNotCall()); // EACCES or EADDRNOTAVAIL server.on('error', common.mustCall()); diff --git a/test/parallel/test-net-pingpong.js b/test/parallel/test-net-pingpong.js index c83cfaf94349df..9fc59db4e2ff2f 100644 --- a/test/parallel/test-net-pingpong.js +++ b/test/parallel/test-net-pingpong.js @@ -128,7 +128,8 @@ function pingPongTest(port, host) { } /* All are run at once, so run on different ports */ -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); pingPongTest(common.PIPE); pingPongTest(0); pingPongTest(0, 'localhost'); diff --git a/test/parallel/test-net-pipe-connect-errors.js b/test/parallel/test-net-pipe-connect-errors.js index 91c4f7efd07966..8db452669991f0 100644 --- a/test/parallel/test-net-pipe-connect-errors.js +++ b/test/parallel/test-net-pipe-connect-errors.js @@ -36,12 +36,13 @@ if (common.isWindows) { // file instead emptyTxt = fixtures.path('empty.txt'); } else { - common.refreshTmpDir(); - // Keep the file name very short so tht we don't exceed the 108 char limit + const tmpdir = require('../common/tmpdir'); + tmpdir.refresh(); + // Keep the file name very short so that we don't exceed the 108 char limit // on CI for a POSIX socket. Even though this isn't actually a socket file, // the error will be different from the one we are expecting if we exceed the // limit. - emptyTxt = `${common.tmpDir}0.txt`; + emptyTxt = `${tmpdir.path}0.txt`; function cleanup() { try { diff --git a/test/parallel/test-net-server-listen-handle.js b/test/parallel/test-net-server-listen-handle.js index 2b56817d2c61d7..532674bc7a2819 100644 --- a/test/parallel/test-net-server-listen-handle.js +++ b/test/parallel/test-net-server-listen-handle.js @@ -8,7 +8,8 @@ const uv = process.binding('uv'); const { TCP, constants: TCPConstants } = process.binding('tcp_wrap'); const { Pipe, constants: PipeConstants } = process.binding('pipe_wrap'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); function closeServer() { return common.mustCall(function() { diff --git a/test/parallel/test-net-server-listen-path.js b/test/parallel/test-net-server-listen-path.js index 53173fa66d120a..b16b7c7ba81236 100644 --- a/test/parallel/test-net-server-listen-path.js +++ b/test/parallel/test-net-server-listen-path.js @@ -3,7 +3,8 @@ const common = require('../common'); const net = require('net'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); function closeServer() { return common.mustCall(function() { diff --git a/test/parallel/test-net-write-after-close.js b/test/parallel/test-net-write-after-close.js index 05669e476c7670..f17273f9417a2a 100644 --- a/test/parallel/test-net-write-after-close.js +++ b/test/parallel/test-net-write-after-close.js @@ -39,7 +39,7 @@ const server = net.createServer(common.mustCall(function(socket) { server.listen(0, function() { const client = net.connect(this.address().port, function() { - // cliend.end() will close both the readable and writable side + // client.end() will close both the readable and writable side // of the duplex because allowHalfOpen defaults to false. // Then 'end' will be emitted when it receives a FIN packet from // the other side. diff --git a/test/parallel/test-npm-install.js b/test/parallel/test-npm-install.js index d826eb09ed4756..dc9f60b799e0f6 100644 --- a/test/parallel/test-npm-install.js +++ b/test/parallel/test-npm-install.js @@ -9,10 +9,11 @@ const assert = require('assert'); const fs = require('fs'); const fixtures = require('../common/fixtures'); -common.refreshTmpDir(); -const npmSandbox = path.join(common.tmpDir, 'npm-sandbox'); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); +const npmSandbox = path.join(tmpdir.path, 'npm-sandbox'); fs.mkdirSync(npmSandbox); -const installDir = path.join(common.tmpDir, 'install-dir'); +const installDir = path.join(tmpdir.path, 'install-dir'); fs.mkdirSync(installDir); const npmPath = path.join( diff --git a/test/parallel/test-os.js b/test/parallel/test-os.js index f8e383e497a154..47d4209c36a20b 100644 --- a/test/parallel/test-os.js +++ b/test/parallel/test-os.js @@ -113,7 +113,7 @@ is.string(arch); assert.ok(arch.length > 0); if (!common.isSunOS) { - // not implemeneted yet + // not implemented yet assert.ok(os.loadavg().length > 0); assert.ok(os.freemem() > 0); assert.ok(os.totalmem() > 0); diff --git a/test/parallel/test-pipe-address.js b/test/parallel/test-pipe-address.js index 10552abee7b60f..3550434932e934 100644 --- a/test/parallel/test-pipe-address.js +++ b/test/parallel/test-pipe-address.js @@ -4,7 +4,8 @@ const assert = require('assert'); const net = require('net'); const server = net.createServer(common.mustNotCall()); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); server.listen(common.PIPE, common.mustCall(function() { assert.strictEqual(server.address(), common.PIPE); diff --git a/test/parallel/test-pipe-file-to-http.js b/test/parallel/test-pipe-file-to-http.js index 244dcd1a990fbf..cfe289c30caa9d 100644 --- a/test/parallel/test-pipe-file-to-http.js +++ b/test/parallel/test-pipe-file-to-http.js @@ -27,9 +27,10 @@ const http = require('http'); const path = require('path'); const cp = require('child_process'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); -const filename = path.join(common.tmpDir || '/tmp', 'big'); +const filename = path.join(tmpdir.path || '/tmp', 'big'); let count = 0; const server = http.createServer(function(req, res) { diff --git a/test/parallel/test-pipe-stream.js b/test/parallel/test-pipe-stream.js index 8fd9d31d499089..c7d9a0a626559e 100644 --- a/test/parallel/test-pipe-stream.js +++ b/test/parallel/test-pipe-stream.js @@ -3,7 +3,8 @@ const common = require('../common'); const assert = require('assert'); const net = require('net'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); function test(clazz, cb) { let have_ping = false; diff --git a/test/parallel/test-pipe-unref.js b/test/parallel/test-pipe-unref.js index cfe7a97ca59fd3..1e0245b5444f62 100644 --- a/test/parallel/test-pipe-unref.js +++ b/test/parallel/test-pipe-unref.js @@ -4,7 +4,8 @@ const net = require('net'); // This test should end immediately after `unref` is called -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); const s = net.Server(); s.listen(common.PIPE); diff --git a/test/parallel/test-pipe-writev.js b/test/parallel/test-pipe-writev.js index db95a4b181849f..5e5b42e6a78d88 100644 --- a/test/parallel/test-pipe-writev.js +++ b/test/parallel/test-pipe-writev.js @@ -7,7 +7,8 @@ if (common.isWindows) const assert = require('assert'); const net = require('net'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); const server = net.createServer((connection) => { connection.on('error', (err) => { diff --git a/test/parallel/test-process-chdir.js b/test/parallel/test-process-chdir.js index 61707706a322bc..c0a245ffd3483b 100644 --- a/test/parallel/test-process-chdir.js +++ b/test/parallel/test-process-chdir.js @@ -1,10 +1,12 @@ 'use strict'; -const common = require('../common'); +require('../common'); const assert = require('assert'); const fs = require('fs'); const path = require('path'); +const tmpdir = require('../common/tmpdir'); + process.chdir('..'); assert.notStrictEqual(process.cwd(), __dirname); process.chdir(__dirname); @@ -18,10 +20,10 @@ if (process.versions.icu) { // ICU is unavailable, use characters that can't be decomposed dirName = 'weird \ud83d\udc04 characters \ud83d\udc05'; } -const dir = path.resolve(common.tmpDir, dirName); +const dir = path.resolve(tmpdir.path, dirName); // Make sure that the tmp directory is clean -common.refreshTmpDir(); +tmpdir.refresh(); fs.mkdirSync(dir); process.chdir(dir); @@ -29,7 +31,7 @@ assert.strictEqual(process.cwd().normalize(), dir.normalize()); process.chdir('..'); assert.strictEqual(process.cwd().normalize(), - path.resolve(common.tmpDir).normalize()); + path.resolve(tmpdir.path).normalize()); const errMessage = /^TypeError: Bad argument\.$/; assert.throws(function() { process.chdir({}); }, diff --git a/test/parallel/test-process-execpath.js b/test/parallel/test-process-execpath.js index d70d1dfd389875..68aef90b303449 100644 --- a/test/parallel/test-process-execpath.js +++ b/test/parallel/test-process-execpath.js @@ -14,9 +14,10 @@ if (process.argv[2] === 'child') { // The console.log() output is part of the test here. console.log(process.execPath); } else { - common.refreshTmpDir(); + const tmpdir = require('../common/tmpdir'); + tmpdir.refresh(); - const symlinkedNode = path.join(common.tmpDir, 'symlinked-node'); + const symlinkedNode = path.join(tmpdir.path, 'symlinked-node'); fs.symlinkSync(process.execPath, symlinkedNode); const proc = child_process.spawnSync(symlinkedNode, [__filename, 'child']); diff --git a/test/parallel/test-process-redirect-warnings-env.js b/test/parallel/test-process-redirect-warnings-env.js index 59e236ab89f8e3..5031152a48baf0 100644 --- a/test/parallel/test-process-redirect-warnings-env.js +++ b/test/parallel/test-process-redirect-warnings-env.js @@ -12,10 +12,11 @@ const fork = require('child_process').fork; const path = require('path'); const assert = require('assert'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); const warnmod = require.resolve(fixtures.path('warnings.js')); -const warnpath = path.join(common.tmpDir, 'warnings.txt'); +const warnpath = path.join(tmpdir.path, 'warnings.txt'); fork(warnmod, { env: Object.assign({}, process.env, { NODE_REDIRECT_WARNINGS: warnpath }) }) diff --git a/test/parallel/test-process-redirect-warnings.js b/test/parallel/test-process-redirect-warnings.js index 76f376240ba9b9..b4f55fa8345409 100644 --- a/test/parallel/test-process-redirect-warnings.js +++ b/test/parallel/test-process-redirect-warnings.js @@ -12,10 +12,11 @@ const fork = require('child_process').fork; const path = require('path'); const assert = require('assert'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); const warnmod = fixtures.path('warnings.js'); -const warnpath = path.join(common.tmpDir, 'warnings.txt'); +const warnpath = path.join(tmpdir.path, 'warnings.txt'); fork(warnmod, { execArgv: [`--redirect-warnings=${warnpath}`] }) .on('exit', common.mustCall(() => { diff --git a/test/parallel/test-process-versions.js b/test/parallel/test-process-versions.js index 65634ece6ce8e7..29325d531cff27 100644 --- a/test/parallel/test-process-versions.js +++ b/test/parallel/test-process-versions.js @@ -3,7 +3,7 @@ const common = require('../common'); const assert = require('assert'); const expected_keys = ['ares', 'http_parser', 'modules', 'node', - 'uv', 'v8', 'zlib', 'nghttp2']; + 'uv', 'v8', 'zlib', 'nghttp2', 'napi']; if (common.hasCrypto) { expected_keys.push('openssl'); diff --git a/test/parallel/test-promises-unhandled-rejections.js b/test/parallel/test-promises-unhandled-rejections.js index 0d49ee796b7f70..099d762b671fed 100644 --- a/test/parallel/test-promises-unhandled-rejections.js +++ b/test/parallel/test-promises-unhandled-rejections.js @@ -293,7 +293,7 @@ asyncTest('While inside setImmediate, catching a rejected promise derived ' + }); }); -// State adapation tests +// State adaptation tests asyncTest('catching a promise which is asynchronously rejected (via ' + 'resolution to an asynchronously-rejected promise) prevents' + ' unhandledRejection', function(done) { diff --git a/test/parallel/test-readline-interface.js b/test/parallel/test-readline-interface.js index 03ea05dd2a44a5..e58b7917bcb563 100644 --- a/test/parallel/test-readline-interface.js +++ b/test/parallel/test-readline-interface.js @@ -812,7 +812,7 @@ function isWarned(emitter) { assert.strictEqual(isWarned(process.stdout._events), false); } - // can create a new readline Interface with a null output arugument + // can create a new readline Interface with a null output argument { const fi = new FakeInput(); const rli = new readline.Interface( @@ -873,3 +873,80 @@ function isWarned(emitter) { assert.strictEqual(rl._prompt, '$ '); } }); + +// For the purposes of the following tests, we do not care about the exact +// value of crlfDelay, only that the behaviour conforms to what's expected. +// Setting it to Infinity allows the test to succeed even under extreme +// CPU stress. +const crlfDelay = Infinity; + +[ true, false ].forEach(function(terminal) { + // sending multiple newlines at once that does not end with a new line + // and a `end` event(last line is) + + // \r\n should emit one line event, not two + { + const fi = new FakeInput(); + const rli = new readline.Interface( + { + input: fi, + output: fi, + terminal: terminal, + crlfDelay + } + ); + const expectedLines = ['foo', 'bar', 'baz', 'bat']; + let callCount = 0; + rli.on('line', function(line) { + assert.strictEqual(line, expectedLines[callCount]); + callCount++; + }); + fi.emit('data', expectedLines.join('\r\n')); + assert.strictEqual(callCount, expectedLines.length - 1); + rli.close(); + } + + // \r\n should emit one line event when split across multiple writes. + { + const fi = new FakeInput(); + const rli = new readline.Interface({ + input: fi, + output: fi, + terminal: terminal, + crlfDelay + }); + const expectedLines = ['foo', 'bar', 'baz', 'bat']; + let callCount = 0; + rli.on('line', function(line) { + assert.strictEqual(line, expectedLines[callCount]); + callCount++; + }); + expectedLines.forEach(function(line) { + fi.emit('data', `${line}\r`); + fi.emit('data', '\n'); + }); + assert.strictEqual(callCount, expectedLines.length); + rli.close(); + } + + // Emit one line event when the delay between \r and \n is + // over the default crlfDelay but within the setting value. + { + const fi = new FakeInput(); + const delay = 125; + const rli = new readline.Interface({ + input: fi, + output: fi, + terminal: terminal, + crlfDelay + }); + let callCount = 0; + rli.on('line', () => callCount++); + fi.emit('data', '\r'); + setTimeout(common.mustCall(() => { + fi.emit('data', '\n'); + assert.strictEqual(callCount, 1); + rli.close(); + }), delay); + } +}); diff --git a/test/parallel/test-regress-GH-3739.js b/test/parallel/test-regress-GH-3739.js index d41accc2e6ec0a..dbf77ad785cac9 100644 --- a/test/parallel/test-regress-GH-3739.js +++ b/test/parallel/test-regress-GH-3739.js @@ -5,10 +5,12 @@ const assert = require('assert'); const fs = require('fs'); const path = require('path'); -let dir = path.resolve(common.tmpDir); +const tmpdir = require('../common/tmpdir'); + +let dir = path.resolve(tmpdir.path); // Make sure that the tmp directory is clean -common.refreshTmpDir(); +tmpdir.refresh(); // Make a long path. for (let i = 0; i < 50; i++) { diff --git a/test/parallel/test-repl-history-perm.js b/test/parallel/test-repl-history-perm.js index 9f14ece568dc80..b125fa551dc858 100644 --- a/test/parallel/test-repl-history-perm.js +++ b/test/parallel/test-repl-history-perm.js @@ -31,8 +31,9 @@ stream._write = function(c, e, cb) { }; stream.readable = stream.writable = true; -common.refreshTmpDir(); -const replHistoryPath = path.join(common.tmpDir, '.node_repl_history'); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); +const replHistoryPath = path.join(tmpdir.path, '.node_repl_history'); const checkResults = common.mustCall(function(err, r) { assert.ifError(err); diff --git a/test/parallel/test-repl-persistent-history.js b/test/parallel/test-repl-persistent-history.js index 3ba71f1f175413..396203d949f4ba 100644 --- a/test/parallel/test-repl-persistent-history.js +++ b/test/parallel/test-repl-persistent-history.js @@ -11,11 +11,12 @@ const fs = require('fs'); const path = require('path'); const os = require('os'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); // Mock os.homedir() os.homedir = function() { - return common.tmpDir; + return tmpdir.path; }; // Create an input stream specialized for testing an array of actions @@ -55,16 +56,16 @@ const CLEAR = { ctrl: true, name: 'u' }; // File paths const historyFixturePath = fixtures.path('.node_repl_history'); -const historyPath = path.join(common.tmpDir, '.fixture_copy_repl_history'); -const historyPathFail = path.join(common.tmpDir, '.node_repl\u0000_history'); +const historyPath = path.join(tmpdir.path, '.fixture_copy_repl_history'); +const historyPathFail = path.join(tmpdir.path, '.node_repl\u0000_history'); const oldHistoryPathObj = fixtures.path('old-repl-history-file-obj.json'); const oldHistoryPathFaulty = fixtures.path('old-repl-history-file-faulty.json'); const oldHistoryPath = fixtures.path('old-repl-history-file.json'); const enoentHistoryPath = fixtures.path('enoent-repl-history-file.json'); const emptyHistoryPath = fixtures.path('.empty-repl-history-file'); -const defaultHistoryPath = path.join(common.tmpDir, '.node_repl_history'); +const defaultHistoryPath = path.join(tmpdir.path, '.node_repl_history'); const emptyHiddenHistoryPath = fixtures.path('.empty-hidden-repl-history-file'); -const devNullHistoryPath = path.join(common.tmpDir, +const devNullHistoryPath = path.join(tmpdir.path, '.dev-null-repl-history-file'); // Common message bits const prompt = '> '; diff --git a/test/parallel/test-repl-save-load.js b/test/parallel/test-repl-save-load.js index 2be272473b652a..3778ffac3ec379 100644 --- a/test/parallel/test-repl-save-load.js +++ b/test/parallel/test-repl-save-load.js @@ -25,7 +25,8 @@ const assert = require('assert'); const join = require('path').join; const fs = require('fs'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); const repl = require('repl'); @@ -39,7 +40,7 @@ const testFile = [ 'var top = function() {', 'var inner = {one:1};' ]; -const saveFileName = join(common.tmpDir, 'test.save.js'); +const saveFileName = join(tmpdir.path, 'test.save.js'); // input some data putIn.run(testFile); @@ -91,7 +92,7 @@ testMe.complete('inner.o', function(error, data) { // clear the REPL putIn.run(['.clear']); -let loadFile = join(common.tmpDir, 'file.does.not.exist'); +let loadFile = join(tmpdir.path, 'file.does.not.exist'); // should not break putIn.write = function(data) { @@ -103,7 +104,7 @@ putIn.write = function(data) { putIn.run([`.load ${loadFile}`]); // throw error on loading directory -loadFile = common.tmpDir; +loadFile = tmpdir.path; putIn.write = function(data) { assert.strictEqual(data, `Failed to load:${loadFile} is not a valid file\n`); putIn.write = () => {}; @@ -115,7 +116,7 @@ putIn.run(['.clear']); // NUL (\0) is disallowed in filenames in UNIX-like operating systems and // Windows so we can use that to test failed saves -const invalidFileName = join(common.tmpDir, '\0\0\0\0\0'); +const invalidFileName = join(tmpdir.path, '\0\0\0\0\0'); // should not break putIn.write = function(data) { diff --git a/test/parallel/test-repl.js b/test/parallel/test-repl.js index 6d4f0d8c7a5862..74db2ba0cd6d7f 100644 --- a/test/parallel/test-repl.js +++ b/test/parallel/test-repl.js @@ -22,10 +22,11 @@ 'use strict'; const common = require('../common'); const fixtures = require('../common/fixtures'); +const tmpdir = require('../common/tmpdir'); const assert = require('assert'); common.globalCheck = false; -common.refreshTmpDir(); +tmpdir.refresh(); const net = require('net'); const repl = require('repl'); @@ -304,7 +305,7 @@ function error_test() { { client: client_unix, send: '/(.)(.)(.)(.)(.)(.)(.)(.)(.)/.test(\'123456789\')\n', expect: `true\n${prompt_unix}` }, - // the following test's result depends on the RegEx's match from the above + // the following test's result depends on the RegExp's match from the above { client: client_unix, send: 'RegExp.$1\nRegExp.$2\nRegExp.$3\nRegExp.$4\nRegExp.$5\n' + 'RegExp.$6\nRegExp.$7\nRegExp.$8\nRegExp.$9\n', diff --git a/test/parallel/test-require-long-path.js b/test/parallel/test-require-long-path.js index aaaf07d48ae897..548a0b5425df39 100644 --- a/test/parallel/test-require-long-path.js +++ b/test/parallel/test-require-long-path.js @@ -6,15 +6,17 @@ if (!common.isWindows) const fs = require('fs'); const path = require('path'); +const tmpdir = require('../common/tmpdir'); + // make a path that is more than 260 chars long. -const dirNameLen = Math.max(260 - common.tmpDir.length, 1); -const dirName = path.join(common.tmpDir, 'x'.repeat(dirNameLen)); +const dirNameLen = Math.max(260 - tmpdir.path.length, 1); +const dirName = path.join(tmpdir.path, 'x'.repeat(dirNameLen)); const fullDirPath = path.resolve(dirName); const indexFile = path.join(fullDirPath, 'index.js'); const otherFile = path.join(fullDirPath, 'other.js'); -common.refreshTmpDir(); +tmpdir.refresh(); fs.mkdirSync(fullDirPath); fs.writeFileSync(indexFile, 'require("./other");'); @@ -23,4 +25,4 @@ fs.writeFileSync(otherFile, ''); require(indexFile); require(otherFile); -common.refreshTmpDir(); +tmpdir.refresh(); diff --git a/test/parallel/test-require-symlink.js b/test/parallel/test-require-symlink.js index 7dde2a1a974087..d245c21dd1fdb4 100644 --- a/test/parallel/test-require-symlink.js +++ b/test/parallel/test-require-symlink.js @@ -14,12 +14,13 @@ const process = require('process'); // Setup: Copy fixtures to tmp directory. const fixtures = require('../common/fixtures'); +const tmpdir = require('../common/tmpdir'); const dirName = 'module-require-symlink'; const fixtureSource = fixtures.path(dirName); -const tmpDirTarget = path.join(common.tmpDir, dirName); +const tmpDirTarget = path.join(tmpdir.path, dirName); // Copy fixtureSource to linkTarget recursively. -common.refreshTmpDir(); +tmpdir.refresh(); function copyDir(source, target) { fs.mkdirSync(target); @@ -38,8 +39,9 @@ function copyDir(source, target) { copyDir(fixtureSource, tmpDirTarget); // Move to tmp dir and do everything with relative paths there so that the test -// doesn't incorrectly fail due to a symlink somewhere else in the absolte path. -process.chdir(common.tmpDir); +// doesn't incorrectly fail due to a symlink somewhere else in the absolute +// path. +process.chdir(tmpdir.path); const linkDir = path.join(dirName, 'node_modules', diff --git a/test/parallel/test-require-unicode.js b/test/parallel/test-require-unicode.js index 93a8787cdfe7c8..530ff3bb56c3d0 100644 --- a/test/parallel/test-require-unicode.js +++ b/test/parallel/test-require-unicode.js @@ -1,13 +1,14 @@ 'use strict'; -const common = require('../common'); +require('../common'); const assert = require('assert'); const fs = require('fs'); const path = require('path'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); -const dirname = path.join(common.tmpDir, '\u4e2d\u6587\u76ee\u5f55'); +const dirname = path.join(tmpdir.path, '\u4e2d\u6587\u76ee\u5f55'); fs.mkdirSync(dirname); fs.writeFileSync(path.join(dirname, 'file.js'), 'module.exports = 42;'); fs.writeFileSync(path.join(dirname, 'package.json'), diff --git a/test/parallel/test-setproctitle.js b/test/parallel/test-setproctitle.js index 4bb88c4ba06922..1ab6bff6a30848 100644 --- a/test/parallel/test-setproctitle.js +++ b/test/parallel/test-setproctitle.js @@ -34,7 +34,7 @@ exec(cmd, common.mustCall((error, stdout, stderr) => { assert.strictEqual(stderr, ''); // freebsd always add ' (procname)' to the process title - if (common.isFreeBSD) + if (common.isFreeBSD || common.isOpenBSD) title += ` (${path.basename(process.execPath)})`; // omitting trailing whitespace and \n diff --git a/test/parallel/test-stdin-from-file.js b/test/parallel/test-stdin-from-file.js index 148464e51a8c1a..eda8e068fe37fe 100644 --- a/test/parallel/test-stdin-from-file.js +++ b/test/parallel/test-stdin-from-file.js @@ -1,13 +1,14 @@ 'use strict'; const common = require('../common'); const fixtures = require('../common/fixtures'); +const tmpdir = require('../common/tmpdir'); const assert = require('assert'); const { join } = require('path'); const childProcess = require('child_process'); const fs = require('fs'); const stdoutScript = fixtures.path('echo-close-check.js'); -const tmpFile = join(common.tmpDir, 'stdin.txt'); +const tmpFile = join(tmpdir.path, 'stdin.txt'); const cmd = `"${process.argv[0]}" "${stdoutScript}" < "${tmpFile}"`; @@ -24,7 +25,7 @@ const string = 'abc\nümlaut.\nsomething else\n' + '有效的改善了岭南地区落后的政治、##济现状。\n'; -common.refreshTmpDir(); +tmpdir.refresh(); console.log(`${cmd}\n\n`); diff --git a/test/parallel/test-stdout-to-file.js b/test/parallel/test-stdout-to-file.js index 6869fafa1cfaf9..a02531ca41fbdb 100644 --- a/test/parallel/test-stdout-to-file.js +++ b/test/parallel/test-stdout-to-file.js @@ -5,12 +5,13 @@ const path = require('path'); const childProcess = require('child_process'); const fs = require('fs'); const fixtures = require('../common/fixtures'); +const tmpdir = require('../common/tmpdir'); const scriptString = fixtures.path('print-chars.js'); const scriptBuffer = fixtures.path('print-chars-from-buffer.js'); -const tmpFile = path.join(common.tmpDir, 'stdout.txt'); +const tmpFile = path.join(tmpdir.path, 'stdout.txt'); -common.refreshTmpDir(); +tmpdir.refresh(); function test(size, useBuffer, cb) { const cmd = `"${process.argv[0]}" "${ diff --git a/test/parallel/test-stream-base-prototype-accessors.js b/test/parallel/test-stream-base-prototype-accessors.js deleted file mode 100644 index f9e12582a098d8..00000000000000 --- a/test/parallel/test-stream-base-prototype-accessors.js +++ /dev/null @@ -1,27 +0,0 @@ -'use strict'; - -require('../common'); - -// This tests that the prototype accessors added by StreamBase::AddMethods -// do not raise assersions when called with incompatible receivers. - -const assert = require('assert'); - -// Or anything that calls StreamBase::AddMethods when setting up its prototype -const TTY = process.binding('tty_wrap').TTY; - -// Should throw instead of raise assertions -{ - const msg = /TypeError: Method \w+ called on incompatible receiver/; - assert.throws(() => { - TTY.prototype.bytesRead; - }, msg); - - assert.throws(() => { - TTY.prototype.fd; - }, msg); - - assert.throws(() => { - TTY.prototype._externalStream; - }, msg); -} diff --git a/test/parallel/test-stream-buffer-list.js b/test/parallel/test-stream-buffer-list.js index ddbff452de4be9..6ea359b458f61b 100644 --- a/test/parallel/test-stream-buffer-list.js +++ b/test/parallel/test-stream-buffer-list.js @@ -14,14 +14,19 @@ assert.strictEqual(emptyList.join(','), ''); assert.deepStrictEqual(emptyList.concat(0), Buffer.alloc(0)); +const buf = Buffer.from('foo'); + // Test buffer list with one element. const list = new BufferList(); -list.push('foo'); +list.push(buf); + +const copy = list.concat(3); -assert.strictEqual(list.concat(1), 'foo'); +assert.notStrictEqual(copy, buf); +assert.deepStrictEqual(copy, buf); assert.strictEqual(list.join(','), 'foo'); const shifted = list.shift(); -assert.strictEqual(shifted, 'foo'); +assert.strictEqual(shifted, buf); assert.deepStrictEqual(list, new BufferList()); diff --git a/test/parallel/test-stream-pipe-unpipe-streams.js b/test/parallel/test-stream-pipe-unpipe-streams.js index cd29b66365ae0f..49e02bea9cb695 100644 --- a/test/parallel/test-stream-pipe-unpipe-streams.js +++ b/test/parallel/test-stream-pipe-unpipe-streams.js @@ -31,3 +31,57 @@ source.unpipe(dest2); source.unpipe(dest1); assert.strictEqual(source._readableState.pipes, null); + +{ + // test `cleanup()` if we unpipe all streams. + const source = Readable({ read: () => {} }); + const dest1 = Writable({ write: () => {} }); + const dest2 = Writable({ write: () => {} }); + + let destCount = 0; + const srcCheckEventNames = ['end', 'data']; + const destCheckEventNames = ['close', 'finish', 'drain', 'error', 'unpipe']; + + const checkSrcCleanup = common.mustCall(() => { + assert.strictEqual(source._readableState.pipes, null); + assert.strictEqual(source._readableState.pipesCount, 0); + assert.strictEqual(source._readableState.flowing, false); + + srcCheckEventNames.forEach((eventName) => { + assert.strictEqual( + source.listenerCount(eventName), 0, + `source's '${eventName}' event listeners not removed` + ); + }); + }); + + function checkDestCleanup(dest) { + const currentDestId = ++destCount; + source.pipe(dest); + + const unpipeChecker = common.mustCall(() => { + assert.deepStrictEqual( + dest.listeners('unpipe'), [unpipeChecker], + `destination{${currentDestId}} should have a 'unpipe' event ` + + 'listener which is `unpipeChecker`' + ); + dest.removeListener('unpipe', unpipeChecker); + destCheckEventNames.forEach((eventName) => { + assert.strictEqual( + dest.listenerCount(eventName), 0, + `destination{${currentDestId}}'s '${eventName}' event ` + + 'listeners not removed' + ); + }); + + if (--destCount === 0) + checkSrcCleanup(); + }); + + dest.on('unpipe', unpipeChecker); + } + + checkDestCleanup(dest1); + checkDestCleanup(dest2); + source.unpipe(); +} diff --git a/test/parallel/test-stream-transform-final-sync.js b/test/parallel/test-stream-transform-final-sync.js index de3f0904885bb9..7dbd06d60c3625 100644 --- a/test/parallel/test-stream-transform-final-sync.js +++ b/test/parallel/test-stream-transform-final-sync.js @@ -7,7 +7,7 @@ let state = 0; /* What you do -var stream = new tream.Transform({ +var stream = new stream.Transform({ transform: function transformCallback(chunk, _, next) { // part 1 this.push(chunk); diff --git a/test/parallel/test-stream-transform-final.js b/test/parallel/test-stream-transform-final.js index 56566152e69165..22128b4d9a9f7b 100644 --- a/test/parallel/test-stream-transform-final.js +++ b/test/parallel/test-stream-transform-final.js @@ -7,7 +7,7 @@ let state = 0; /* What you do -var stream = new tream.Transform({ +var stream = new stream.Transform({ transform: function transformCallback(chunk, _, next) { // part 1 this.push(chunk); diff --git a/test/parallel/test-stream2-transform.js b/test/parallel/test-stream2-transform.js index 819b088e2757f2..769684e3f4f907 100644 --- a/test/parallel/test-stream2-transform.js +++ b/test/parallel/test-stream2-transform.js @@ -175,7 +175,7 @@ const Transform = require('_stream_transform'); } { - // Verify assymetric transform (expand) + // Verify asymmetric transform (expand) const pt = new Transform(); // emit each chunk 2 times. @@ -207,7 +207,7 @@ const Transform = require('_stream_transform'); } { - // Verify assymetric trasform (compress) + // Verify asymmetric transform (compress) const pt = new Transform(); // each output is the first char of 3 consecutive chunks, @@ -262,7 +262,7 @@ const Transform = require('_stream_transform'); // this tests for a stall when data is written to a full stream // that has empty transforms. { - // Verify compex transform behavior + // Verify complex transform behavior let count = 0; let saved = null; const pt = new Transform({ highWaterMark: 3 }); diff --git a/test/parallel/test-stream3-cork-uncork.js b/test/parallel/test-stream3-cork-uncork.js index 2e8e86be1ef058..f8b411c84eded6 100644 --- a/test/parallel/test-stream3-cork-uncork.js +++ b/test/parallel/test-stream3-cork-uncork.js @@ -65,7 +65,7 @@ writeChunks(inputChunks, () => { // trigger writing out the buffer w.uncork(); - // buffered bytes shoud be seen in current tick + // buffered bytes should be seen in current tick assert.strictEqual(seenChunks.length, 4); // did the chunks match diff --git a/test/parallel/test-string-decoder-end.js b/test/parallel/test-string-decoder-end.js index 0284ee9f6c48c7..2762ef096289db 100644 --- a/test/parallel/test-string-decoder-end.js +++ b/test/parallel/test-string-decoder-end.js @@ -39,6 +39,46 @@ for (let i = 1; i <= 16; i++) { encodings.forEach(testEncoding); +testEnd('utf8', Buffer.of(0xE2), Buffer.of(0x61), '\uFFFDa'); +testEnd('utf8', Buffer.of(0xE2), Buffer.of(0x82), '\uFFFD\uFFFD'); +testEnd('utf8', Buffer.of(0xE2), Buffer.of(0xE2), '\uFFFD\uFFFD'); +testEnd('utf8', Buffer.of(0xE2, 0x82), Buffer.of(0x61), '\uFFFDa'); +testEnd('utf8', Buffer.of(0xE2, 0x82), Buffer.of(0xAC), '\uFFFD\uFFFD'); +testEnd('utf8', Buffer.of(0xE2, 0x82), Buffer.of(0xE2), '\uFFFD\uFFFD'); +testEnd('utf8', Buffer.of(0xE2, 0x82, 0xAC), Buffer.of(0x61), '€a'); + +testEnd('utf16le', Buffer.of(0x3D), Buffer.of(0x61, 0x00), 'a'); +testEnd('utf16le', Buffer.of(0x3D), Buffer.of(0xD8, 0x4D, 0xDC), '\u4DD8'); +testEnd('utf16le', Buffer.of(0x3D, 0xD8), Buffer.of(), '\uD83D'); +testEnd('utf16le', Buffer.of(0x3D, 0xD8), Buffer.of(0x61, 0x00), '\uD83Da'); +testEnd( + 'utf16le', + Buffer.of(0x3D, 0xD8), + Buffer.of(0x4D, 0xDC), + '\uD83D\uDC4D' +); +testEnd('utf16le', Buffer.of(0x3D, 0xD8, 0x4D), Buffer.of(), '\uD83D'); +testEnd( + 'utf16le', + Buffer.of(0x3D, 0xD8, 0x4D), + Buffer.of(0x61, 0x00), + '\uD83Da' +); +testEnd('utf16le', Buffer.of(0x3D, 0xD8, 0x4D), Buffer.of(0xDC), '\uD83D'); +testEnd( + 'utf16le', + Buffer.of(0x3D, 0xD8, 0x4D, 0xDC), + Buffer.of(0x61, 0x00), + '👍a' +); + +testEnd('base64', Buffer.of(0x61), Buffer.of(), 'YQ=='); +testEnd('base64', Buffer.of(0x61), Buffer.of(0x61), 'YQ==YQ=='); +testEnd('base64', Buffer.of(0x61, 0x61), Buffer.of(), 'YWE='); +testEnd('base64', Buffer.of(0x61, 0x61), Buffer.of(0x61), 'YWE=YQ=='); +testEnd('base64', Buffer.of(0x61, 0x61, 0x61), Buffer.of(), 'YWFh'); +testEnd('base64', Buffer.of(0x61, 0x61, 0x61), Buffer.of(0x61), 'YWFhYQ=='); + function testEncoding(encoding) { bufs.forEach((buf) => { testBuf(encoding, buf); @@ -66,3 +106,14 @@ function testBuf(encoding, buf) { assert.strictEqual(res1, res3, 'one byte at a time should match toString'); assert.strictEqual(res2, res3, 'all bytes at once should match toString'); } + +function testEnd(encoding, incomplete, next, expected) { + let res = ''; + const s = new SD(encoding); + res += s.write(incomplete); + res += s.end(); + res += s.write(next); + res += s.end(); + + assert.strictEqual(res, expected); +} diff --git a/test/parallel/test-stringbytes-external.js b/test/parallel/test-stringbytes-external.js index 35d773da26f4d4..2a04159ce9db3c 100644 --- a/test/parallel/test-stringbytes-external.js +++ b/test/parallel/test-stringbytes-external.js @@ -82,7 +82,7 @@ assert.strictEqual(c_bin.toString('latin1'), ucs2_control); assert.strictEqual(c_ucs.toString('latin1'), ucs2_control); -// now let's test BASE64 and HEX ecoding/decoding +// now let's test BASE64 and HEX encoding/decoding const RADIOS = 2; const PRE_HALF_APEX = Math.ceil(EXTERN_APEX / 2) - RADIOS; const PRE_3OF4_APEX = Math.ceil((EXTERN_APEX / 4) * 3) - RADIOS; diff --git a/test/parallel/test-timers-throw-reschedule.js b/test/parallel/test-timers-throw-reschedule.js new file mode 100644 index 00000000000000..b804b6effa9c69 --- /dev/null +++ b/test/parallel/test-timers-throw-reschedule.js @@ -0,0 +1,27 @@ +'use strict'; + +const common = require('../common'); +const assert = require('assert'); + +// This test checks that throwing inside a setTimeout where that Timeout +// instance is the only one within its list of timeouts, doesn't cause +// an issue with an unref timeout scheduled in the error handler. +// Refs: https://github.com/nodejs/node/issues/19970 + +const timeout = common.platformTimeout(50); + +const timer = setTimeout(common.mustNotCall(), 2 ** 31 - 1); + +process.once('uncaughtException', common.mustCall((err) => { + assert.strictEqual(err.message, 'setTimeout Error'); + + const now = Date.now(); + setTimeout(common.mustCall(() => { + assert(Date.now() - now >= timeout); + clearTimeout(timer); + }), timeout).unref(); +})); + +setTimeout(() => { + throw new Error('setTimeout Error'); +}, timeout); diff --git a/test/parallel/test-timers-unref.js b/test/parallel/test-timers-unref.js index 0078d2dae352d5..9e5a4228ba92ed 100644 --- a/test/parallel/test-timers-unref.js +++ b/test/parallel/test-timers-unref.js @@ -71,7 +71,8 @@ const check_unref = setInterval(() => { setInterval(() => timeout.unref(), SHORT_TIME); } -// Should not assert on args.Holder()->InternalFieldCount() > 0. See #4261. +// Should not assert on args.Holder()->InternalFieldCount() > 0. +// See https://github.com/nodejs/node-v0.x-archive/issues/4261. { const t = setInterval(() => {}, 1); process.nextTick(t.unref.bind({})); diff --git a/test/parallel/test-tls-cnnic-whitelist.js b/test/parallel/test-tls-cnnic-whitelist.js index 4887526435fa2d..80f188f36670a1 100644 --- a/test/parallel/test-tls-cnnic-whitelist.js +++ b/test/parallel/test-tls-cnnic-whitelist.js @@ -31,7 +31,7 @@ const testCases = [ errorCode: 'UNABLE_TO_VERIFY_LEAF_SIGNATURE' }, // Test 1: for the fix of node#2061 - // agent6-cert.pem is signed by intermidate cert of ca3. + // agent6-cert.pem is signed by intermediate cert of ca3. // The server has a cert chain of agent6->ca3->ca1(root) but // tls.connect should be failed with an error of // UNABLE_TO_GET_ISSUER_CERT_LOCALLY since the root CA of ca1 is not diff --git a/test/parallel/test-tls-connect-pipe.js b/test/parallel/test-tls-connect-pipe.js index f609659d195499..88e78b7a2b2e59 100644 --- a/test/parallel/test-tls-connect-pipe.js +++ b/test/parallel/test-tls-connect-pipe.js @@ -33,7 +33,8 @@ const options = { cert: fixtures.readKey('agent1-cert.pem') }; -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); const server = tls.Server(options, common.mustCall(function(socket) { server.close(); diff --git a/test/parallel/test-tls-ecdh-disable.js b/test/parallel/test-tls-ecdh-disable.js index 72b51771c87280..835f6a7fa8cb4a 100644 --- a/test/parallel/test-tls-ecdh-disable.js +++ b/test/parallel/test-tls-ecdh-disable.js @@ -19,7 +19,7 @@ // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE // USE OR OTHER DEALINGS IN THE SOFTWARE. -// Test that the usage of eliptic curves are not permitted if disabled during +// Test that the usage of elliptic curves are not permitted if disabled during // server initialization. 'use strict'; diff --git a/test/parallel/test-tls-external-accessor.js b/test/parallel/test-tls-external-accessor.js index 2d7b1f62b98977..33d371923a600c 100644 --- a/test/parallel/test-tls-external-accessor.js +++ b/test/parallel/test-tls-external-accessor.js @@ -11,12 +11,12 @@ const tls = require('tls'); { const pctx = tls.createSecureContext().context; const cctx = Object.create(pctx); - assert.throws(() => cctx._external, /incompatible receiver/); + assert.throws(() => cctx._external, TypeError); pctx._external; } { const pctx = tls.createSecurePair().credentials.context; const cctx = Object.create(pctx); - assert.throws(() => cctx._external, /incompatible receiver/); + assert.throws(() => cctx._external, TypeError); pctx._external; } diff --git a/test/parallel/test-tls-net-connect-prefer-path.js b/test/parallel/test-tls-net-connect-prefer-path.js index 19a3ba4b37b383..263501ae0330ac 100644 --- a/test/parallel/test-tls-net-connect-prefer-path.js +++ b/test/parallel/test-tls-net-connect-prefer-path.js @@ -8,7 +8,8 @@ const fixtures = require('../common/fixtures'); if (!common.hasCrypto) common.skip('missing crypto'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); const tls = require('tls'); const net = require('net'); diff --git a/test/parallel/test-tls-server-verify.js b/test/parallel/test-tls-server-verify.js index 92c4752a186317..eeea8b030da719 100644 --- a/test/parallel/test-tls-server-verify.js +++ b/test/parallel/test-tls-server-verify.js @@ -286,8 +286,8 @@ function runTest(port, testIndex) { let renegotiated = false; const server = tls.Server(serverOptions, function handleConnection(c) { c.on('error', function(e) { - // child.kill() leads ECONNRESET errro in the TLS connection of - // openssl s_client via spawn(). A Test result is already + // child.kill() leads ECONNRESET error in the TLS connection of + // openssl s_client via spawn(). A test result is already // checked by the data of client.stdout before child.kill() so // these tls errors can be ignored. }); diff --git a/test/parallel/test-tls-session-cache.js b/test/parallel/test-tls-session-cache.js index 2bbf3b642da1b8..7778dd03100857 100644 --- a/test/parallel/test-tls-session-cache.js +++ b/test/parallel/test-tls-session-cache.js @@ -69,11 +69,11 @@ function doTest(testOptions, callback) { server.on('newSession', function(id, data, cb) { ++newSessionCount; // Emulate asynchronous store - setTimeout(function() { + setImmediate(() => { assert.ok(!session); session = { id, data }; cb(); - }, 1000); + }); }); server.on('resumeSession', function(id, callback) { ++resumeCount; @@ -89,9 +89,9 @@ function doTest(testOptions, callback) { } // Just to check that async really works there - setTimeout(function() { + setImmediate(() => { callback(null, data); - }, 100); + }); }); server.listen(0, function() { @@ -132,7 +132,7 @@ function doTest(testOptions, callback) { } assert.strictEqual(code, 0); server.close(common.mustCall(function() { - setTimeout(callback, 100); + setImmediate(callback); })); })); } diff --git a/test/parallel/test-tls-set-encoding.js b/test/parallel/test-tls-set-encoding.js index cf621420774973..b3aa52a22a73f3 100644 --- a/test/parallel/test-tls-set-encoding.js +++ b/test/parallel/test-tls-set-encoding.js @@ -63,7 +63,7 @@ server.listen(0, function() { client.on('close', function() { // readyState is deprecated but we want to make // sure this isn't triggering an assert in lib/net.js - // See issue #1069. + // See https://github.com/nodejs/node-v0.x-archive/issues/1069. assert.strictEqual('closed', client.readyState); // Confirming the buffer string is encoded in ASCII diff --git a/test/parallel/test-tls-wrap-econnreset-pipe.js b/test/parallel/test-tls-wrap-econnreset-pipe.js index ef6efaedc34aa7..b400e35d412392 100644 --- a/test/parallel/test-tls-wrap-econnreset-pipe.js +++ b/test/parallel/test-tls-wrap-econnreset-pipe.js @@ -8,7 +8,8 @@ const assert = require('assert'); const tls = require('tls'); const net = require('net'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); const server = net.createServer((c) => { c.end(); diff --git a/test/parallel/test-trace-events-all.js b/test/parallel/test-trace-events-all.js index 329f99f591244d..07c53236597220 100644 --- a/test/parallel/test-trace-events-all.js +++ b/test/parallel/test-trace-events-all.js @@ -8,8 +8,9 @@ const CODE = 'setTimeout(() => { for (var i = 0; i < 100000; i++) { "test" + i } }, 1)'; const FILE_NAME = 'node_trace.1.log'; -common.refreshTmpDir(); -process.chdir(common.tmpDir); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); +process.chdir(tmpdir.path); const proc = cp.spawn(process.execPath, [ '--trace-events-enabled', '-e', CODE ]); diff --git a/test/parallel/test-trace-events-async-hooks.js b/test/parallel/test-trace-events-async-hooks.js index e1f78f791a636d..b15d83b07a5799 100644 --- a/test/parallel/test-trace-events-async-hooks.js +++ b/test/parallel/test-trace-events-async-hooks.js @@ -8,8 +8,9 @@ const CODE = 'setTimeout(() => { for (var i = 0; i < 100000; i++) { "test" + i } }, 1)'; const FILE_NAME = 'node_trace.1.log'; -common.refreshTmpDir(); -process.chdir(common.tmpDir); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); +process.chdir(tmpdir.path); const proc = cp.spawn(process.execPath, [ '--trace-events-enabled', diff --git a/test/parallel/test-trace-events-binding.js b/test/parallel/test-trace-events-binding.js index 9a182821bac18e..fc4e7f99f87c1b 100644 --- a/test/parallel/test-trace-events-binding.js +++ b/test/parallel/test-trace-events-binding.js @@ -20,8 +20,9 @@ const CODE = ` `; const FILE_NAME = 'node_trace.1.log'; -common.refreshTmpDir(); -process.chdir(common.tmpDir); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); +process.chdir(tmpdir.path); const proc = cp.spawn(process.execPath, [ '--trace-events-enabled', diff --git a/test/parallel/test-trace-events-category-used.js b/test/parallel/test-trace-events-category-used.js index 39d09ad862d787..aa0662b7493568 100644 --- a/test/parallel/test-trace-events-category-used.js +++ b/test/parallel/test-trace-events-category-used.js @@ -7,8 +7,9 @@ const CODE = `console.log( process.binding("trace_events").categoryGroupEnabled("custom") );`; -common.refreshTmpDir(); -process.chdir(common.tmpDir); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); +process.chdir(tmpdir.path); const procEnabled = cp.spawn( process.execPath, diff --git a/test/parallel/test-trace-events-none.js b/test/parallel/test-trace-events-none.js index 9a4d587f2db0e1..7a87fc5cbd3a8d 100644 --- a/test/parallel/test-trace-events-none.js +++ b/test/parallel/test-trace-events-none.js @@ -7,8 +7,9 @@ const CODE = 'setTimeout(() => { for (var i = 0; i < 100000; i++) { "test" + i } }, 1)'; const FILE_NAME = 'node_trace.1.log'; -common.refreshTmpDir(); -process.chdir(common.tmpDir); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); +process.chdir(tmpdir.path); const proc_no_categories = cp.spawn( process.execPath, diff --git a/test/parallel/test-trace-events-process-exit.js b/test/parallel/test-trace-events-process-exit.js index be45cb1d3e0f22..9f164ee6279720 100644 --- a/test/parallel/test-trace-events-process-exit.js +++ b/test/parallel/test-trace-events-process-exit.js @@ -4,10 +4,12 @@ const assert = require('assert'); const cp = require('child_process'); const fs = require('fs'); +const tmpdir = require('../common/tmpdir'); + const FILE_NAME = 'node_trace.1.log'; -common.refreshTmpDir(); -process.chdir(common.tmpDir); +tmpdir.refresh(); +process.chdir(tmpdir.path); const proc = cp.spawn(process.execPath, [ '--trace-events-enabled', diff --git a/test/parallel/test-trace-events-v8.js b/test/parallel/test-trace-events-v8.js index b17b1473ecaf0c..49c34b8f17bbb2 100644 --- a/test/parallel/test-trace-events-v8.js +++ b/test/parallel/test-trace-events-v8.js @@ -8,8 +8,9 @@ const CODE = 'setTimeout(() => { for (var i = 0; i < 100000; i++) { "test" + i } }, 1)'; const FILE_NAME = 'node_trace.1.log'; -common.refreshTmpDir(); -process.chdir(common.tmpDir); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); +process.chdir(tmpdir.path); const proc = cp.spawn(process.execPath, [ '--trace-events-enabled', diff --git a/test/parallel/test-util-inspect.js b/test/parallel/test-util-inspect.js index 81c669fa126d8e..d9f8e30616657d 100644 --- a/test/parallel/test-util-inspect.js +++ b/test/parallel/test-util-inspect.js @@ -558,25 +558,31 @@ assert.doesNotThrow(() => { assert.strictEqual(util.inspect(x).includes('inspect'), true); } -// util.inspect should not display the escaped value of a key. +// util.inspect should display the escaped value of a key. { const w = { '\\': 1, '\\\\': 2, '\\\\\\': 3, '\\\\\\\\': 4, + '\n': 5, + '\r': 6 }; const y = ['a', 'b', 'c']; - y['\\\\\\'] = 'd'; + y['\\\\'] = 'd'; + y['\n'] = 'e'; + y['\r'] = 'f'; assert.strictEqual( util.inspect(w), - '{ \'\\\': 1, \'\\\\\': 2, \'\\\\\\\': 3, \'\\\\\\\\\': 4 }' + '{ \'\\\\\': 1, \'\\\\\\\\\': 2, \'\\\\\\\\\\\\\': 3, ' + + '\'\\\\\\\\\\\\\\\\\': 4, \'\\n\': 5, \'\\r\': 6 }' ); assert.strictEqual( util.inspect(y), - '[ \'a\', \'b\', \'c\', \'\\\\\\\': \'d\' ]' + '[ \'a\', \'b\', \'c\', \'\\\\\\\\\': \'d\', ' + + '\'\\n\': \'e\', \'\\r\': \'f\' ]' ); } diff --git a/test/parallel/test-util.js b/test/parallel/test-util.js index 3b2729c107b4b1..a4aec080a9a0ad 100644 --- a/test/parallel/test-util.js +++ b/test/parallel/test-util.js @@ -43,9 +43,10 @@ assert.strictEqual(false, util.isArray(Object.create(Array.prototype))); // isRegExp assert.strictEqual(true, util.isRegExp(/regexp/)); -assert.strictEqual(true, util.isRegExp(RegExp())); +assert.strictEqual(true, util.isRegExp(RegExp(), 'foo')); assert.strictEqual(true, util.isRegExp(new RegExp())); assert.strictEqual(true, util.isRegExp(context('RegExp')())); +assert.strictEqual(false, util.isRegExp()); assert.strictEqual(false, util.isRegExp({})); assert.strictEqual(false, util.isRegExp([])); assert.strictEqual(false, util.isRegExp(new Date())); @@ -53,7 +54,7 @@ assert.strictEqual(false, util.isRegExp(Object.create(RegExp.prototype))); // isDate assert.strictEqual(true, util.isDate(new Date())); -assert.strictEqual(true, util.isDate(new Date(0))); +assert.strictEqual(true, util.isDate(new Date(0), 'foo')); assert.strictEqual(true, util.isDate(new (context('Date'))())); assert.strictEqual(false, util.isDate(Date())); assert.strictEqual(false, util.isDate({})); diff --git a/test/parallel/test-whatwg-encoding-textdecoder.js b/test/parallel/test-whatwg-encoding-textdecoder.js index 55c601364d0add..8011f285fcb2b7 100644 --- a/test/parallel/test-whatwg-encoding-textdecoder.js +++ b/test/parallel/test-whatwg-encoding-textdecoder.js @@ -90,18 +90,35 @@ if (common.hasIntl) { } { - const fn = TextDecoder.prototype[inspect]; - assert.doesNotThrow(() => { - fn.call(new TextDecoder(), Infinity, {}); - }); - - [{}, [], true, 1, '', new TextEncoder()].forEach((i) => { - assert.throws(() => fn.call(i, Infinity, {}), - common.expectsError({ - code: 'ERR_INVALID_THIS', - type: TypeError, - message: 'Value of "this" must be of type TextDecoder' - })); + const inspectFn = TextDecoder.prototype[inspect]; + const decodeFn = TextDecoder.prototype.decode; + const { + encoding: { get: encodingGetter }, + fatal: { get: fatalGetter }, + ignoreBOM: { get: ignoreBOMGetter }, + } = Object.getOwnPropertyDescriptors(TextDecoder.prototype); + + const instance = new TextDecoder(); + + const expectedError = { + code: 'ERR_INVALID_THIS', + type: TypeError, + message: 'Value of "this" must be of type TextDecoder' + }; + + assert.doesNotThrow(() => inspectFn.call(instance, Infinity, {})); + assert.doesNotThrow(() => decodeFn.call(instance)); + assert.doesNotThrow(() => encodingGetter.call(instance)); + assert.doesNotThrow(() => fatalGetter.call(instance)); + assert.doesNotThrow(() => ignoreBOMGetter.call(instance)); + + const invalidThisArgs = [{}, [], true, 1, '', new TextEncoder()]; + invalidThisArgs.forEach((i) => { + common.expectsError(() => inspectFn.call(i, Infinity, {}), expectedError); + common.expectsError(() => decodeFn.call(i), expectedError); + common.expectsError(() => encodingGetter.call(i), expectedError); + common.expectsError(() => fatalGetter.call(i), expectedError); + common.expectsError(() => ignoreBOMGetter.call(i), expectedError); }); } diff --git a/test/parallel/test-whatwg-encoding-textencoder.js b/test/parallel/test-whatwg-encoding-textencoder.js index 2e8ca9e9abafd1..4096a02432e900 100644 --- a/test/parallel/test-whatwg-encoding-textencoder.js +++ b/test/parallel/test-whatwg-encoding-textencoder.js @@ -35,17 +35,27 @@ assert(TextEncoder); } { - const fn = TextEncoder.prototype[inspect]; - assert.doesNotThrow(() => { - fn.call(new TextEncoder(), Infinity, {}); - }); - - [{}, [], true, 1, '', new TextDecoder()].forEach((i) => { - assert.throws(() => fn.call(i, Infinity, {}), - common.expectsError({ - code: 'ERR_INVALID_THIS', - type: TypeError, - message: 'Value of "this" must be of type TextEncoder' - })); + const inspectFn = TextEncoder.prototype[inspect]; + const encodeFn = TextEncoder.prototype.encode; + const encodingGetter = + Object.getOwnPropertyDescriptor(TextEncoder.prototype, 'encoding').get; + + const instance = new TextEncoder(); + + const expectedError = { + code: 'ERR_INVALID_THIS', + type: TypeError, + message: 'Value of "this" must be of type TextEncoder' + }; + + assert.doesNotThrow(() => inspectFn.call(instance, Infinity, {})); + assert.doesNotThrow(() => encodeFn.call(instance)); + assert.doesNotThrow(() => encodingGetter.call(instance)); + + const invalidThisArgs = [{}, [], true, 1, '', new TextDecoder()]; + invalidThisArgs.forEach((i) => { + common.expectsError(() => inspectFn.call(i, Infinity, {}), expectedError); + common.expectsError(() => encodeFn.call(i), expectedError); + common.expectsError(() => encodingGetter.call(i), expectedError); }); } diff --git a/test/parallel/test-zlib-from-gzip.js b/test/parallel/test-zlib-from-gzip.js index f62dd10f323adc..99c3f1757e05d8 100644 --- a/test/parallel/test-zlib-from-gzip.js +++ b/test/parallel/test-zlib-from-gzip.js @@ -29,7 +29,8 @@ const zlib = require('zlib'); const path = require('path'); const fixtures = require('../common/fixtures'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); const gunzip = zlib.createGunzip(); @@ -37,7 +38,7 @@ const fs = require('fs'); const fixture = fixtures.path('person.jpg.gz'); const unzippedFixture = fixtures.path('person.jpg'); -const outputFile = path.resolve(common.tmpDir, 'person.jpg'); +const outputFile = path.resolve(tmpdir.path, 'person.jpg'); const expect = fs.readFileSync(unzippedFixture); const inp = fs.createReadStream(fixture); const out = fs.createWriteStream(outputFile); diff --git a/test/pummel/test-fs-largefile.js b/test/pummel/test-fs-largefile.js index b0cb24a60fe4d6..786e325ce3333d 100644 --- a/test/pummel/test-fs-largefile.js +++ b/test/pummel/test-fs-largefile.js @@ -20,15 +20,16 @@ // USE OR OTHER DEALINGS IN THE SOFTWARE. 'use strict'; -const common = require('../common'); +require('../common'); const assert = require('assert'); const fs = require('fs'); const path = require('path'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); -const filepath = path.join(common.tmpDir, 'large.txt'); +const filepath = path.join(tmpdir.path, 'large.txt'); const fd = fs.openSync(filepath, 'w+'); const offset = 5 * 1024 * 1024 * 1024; // 5GB const message = 'Large File'; diff --git a/test/pummel/test-fs-watch-file-slow.js b/test/pummel/test-fs-watch-file-slow.js index 9ae9922ec8b2d6..7b7065cffbfe64 100644 --- a/test/pummel/test-fs-watch-file-slow.js +++ b/test/pummel/test-fs-watch-file-slow.js @@ -25,7 +25,9 @@ const assert = require('assert'); const path = require('path'); const fs = require('fs'); -const FILENAME = path.join(common.tmpDir, 'watch-me'); +const tmpdir = require('../common/tmpdir'); + +const FILENAME = path.join(tmpdir.path, 'watch-me'); const TIMEOUT = 1300; let nevents = 0; diff --git a/test/pummel/test-fs-watch-file.js b/test/pummel/test-fs-watch-file.js index 3b036257b3b492..c893c9dfa6e268 100644 --- a/test/pummel/test-fs-watch-file.js +++ b/test/pummel/test-fs-watch-file.js @@ -25,12 +25,14 @@ const assert = require('assert'); const path = require('path'); const fs = require('fs'); +const tmpdir = require('../common/tmpdir'); + let watchSeenOne = 0; let watchSeenTwo = 0; let watchSeenThree = 0; let watchSeenFour = 0; -const testDir = common.tmpDir; +const testDir = tmpdir.path; const filenameOne = 'watch.txt'; const filepathOne = path.join(testDir, filenameOne); diff --git a/test/pummel/test-fs-watch-non-recursive.js b/test/pummel/test-fs-watch-non-recursive.js index 02447cf5215e5a..2b10f9b24da746 100644 --- a/test/pummel/test-fs-watch-non-recursive.js +++ b/test/pummel/test-fs-watch-non-recursive.js @@ -24,7 +24,9 @@ const common = require('../common'); const path = require('path'); const fs = require('fs'); -const testDir = common.tmpDir; +const tmpdir = require('tmpdir'); + +const testDir = tmpdir.path; const testsubdir = path.join(testDir, 'testsubdir'); const filepath = path.join(testsubdir, 'watch.txt'); diff --git a/test/pummel/test-regress-GH-814.js b/test/pummel/test-regress-GH-814.js index a43a67fe77cc2e..a62df944863062 100644 --- a/test/pummel/test-regress-GH-814.js +++ b/test/pummel/test-regress-GH-814.js @@ -22,9 +22,11 @@ 'use strict'; // Flags: --expose_gc -const common = require('../common'); +require('../common'); const assert = require('assert'); +const tmpdir = require('../common/tmpdir'); + function newBuffer(size, value) { const buffer = Buffer.allocUnsafe(size); while (size--) { @@ -36,7 +38,7 @@ function newBuffer(size, value) { } const fs = require('fs'); -const testFileName = require('path').join(common.tmpDir, 'GH-814_testFile.txt'); +const testFileName = require('path').join(tmpdir.path, 'GH-814_testFile.txt'); const testFileFD = fs.openSync(testFileName, 'w'); console.log(testFileName); diff --git a/test/pummel/test-regress-GH-814_2.js b/test/pummel/test-regress-GH-814_2.js index 516a8727c65012..a183e082f86aa6 100644 --- a/test/pummel/test-regress-GH-814_2.js +++ b/test/pummel/test-regress-GH-814_2.js @@ -22,11 +22,12 @@ 'use strict'; // Flags: --expose_gc -const common = require('../common'); +require('../common'); const assert = require('assert'); const fs = require('fs'); -const testFileName = require('path').join(common.tmpDir, 'GH-814_test.txt'); +const tmpdir = require('../common/tmpdir'); +const testFileName = require('path').join(tmpdir.path, 'GH-814_test.txt'); const testFD = fs.openSync(testFileName, 'w'); console.error(`${testFileName}\n`); diff --git a/test/pummel/test-tls-session-timeout.js b/test/pummel/test-tls-session-timeout.js index 56fdfa16ea7eb5..49c38102fc5c5c 100644 --- a/test/pummel/test-tls-session-timeout.js +++ b/test/pummel/test-tls-session-timeout.js @@ -28,6 +28,8 @@ if (!common.opensslCli) if (!common.hasCrypto) common.skip('missing crypto'); +const tmpdir = require('../common/tmpdir'); + doTest(); // This test consists of three TLS requests -- @@ -65,7 +67,7 @@ function doTest() { const sessionFileName = (function() { const ticketFileName = 'tls-session-ticket.txt'; - const tmpPath = join(common.tmpDir, ticketFileName); + const tmpPath = join(tmpdir.path, ticketFileName); fs.writeFileSync(tmpPath, fixtures.readSync(ticketFileName)); return tmpPath; }()); diff --git a/test/sequential/sequential.status b/test/sequential/sequential.status index 656eb80c4db4ee..5e39392e658a79 100644 --- a/test/sequential/sequential.status +++ b/test/sequential/sequential.status @@ -12,6 +12,8 @@ test-inspector-bindings : PASS, FLAKY test-inspector-debug-end : PASS, FLAKY test-inspector-async-hook-setup-at-signal: PASS, FLAKY test-inspector-stop-profile-after-done: PASS, FLAKY +test-http2-ping-flood : PASS, FLAKY +test-http2-settings-flood : PASS, FLAKY [$system==linux] diff --git a/test/sequential/test-async-wrap-getasyncid.js b/test/sequential/test-async-wrap-getasyncid.js index 1857f4e245b99f..c8c25725957824 100644 --- a/test/sequential/test-async-wrap-getasyncid.js +++ b/test/sequential/test-async-wrap-getasyncid.js @@ -6,6 +6,7 @@ const fs = require('fs'); const net = require('net'); const providers = Object.assign({}, process.binding('async_wrap').Providers); const fixtures = require('../common/fixtures'); +const tmpdir = require('../common/tmpdir'); // Make sure that all Providers are tested. { @@ -25,6 +26,7 @@ const fixtures = require('../common/fixtures'); delete providers.HTTP2SESSION; delete providers.HTTP2STREAM; delete providers.HTTP2PING; + delete providers.HTTP2SETTINGS; const obj_keys = Object.keys(providers); if (obj_keys.length > 0) @@ -145,7 +147,7 @@ if (common.hasCrypto) { // eslint-disable-line crypto-check } { - common.refreshTmpDir(); + tmpdir.refresh(); const server = net.createServer(common.mustCall((socket) => { server.close(); diff --git a/test/sequential/test-benchmark-tls.js b/test/sequential/test-benchmark-tls.js new file mode 100644 index 00000000000000..7c87aa3cbcd89e --- /dev/null +++ b/test/sequential/test-benchmark-tls.js @@ -0,0 +1,25 @@ +'use strict'; + +const common = require('../common'); + +if (!common.enoughTestMem) + common.skip('Insufficient memory for TLS benchmark test'); + +// Because the TLS benchmarks use hardcoded ports, this should be in sequential +// rather than parallel to make sure it does not conflict with tests that choose +// random available ports. + +const runBenchmark = require('../common/benchmark'); + +runBenchmark('tls', + [ + 'concurrency=1', + 'dur=0.1', + 'n=1', + 'size=2', + 'type=asc' + ], + { + NODEJS_BENCHMARK_ZERO_ALLOWED: 1, + duration: 0 + }); diff --git a/test/sequential/test-child-process-execsync.js b/test/sequential/test-child-process-execsync.js index 94810e890cd119..39ddcd1ccde7f9 100644 --- a/test/sequential/test-child-process-execsync.js +++ b/test/sequential/test-child-process-execsync.js @@ -29,6 +29,7 @@ const TIMER = 200; const SLEEP = 2000; const start = Date.now(); +const execOpts = { encoding: 'utf8', shell: true }; let err; let caught = false; @@ -93,7 +94,8 @@ ret = execFileSync(process.execPath, args, { encoding: 'utf8' }); assert.strictEqual(ret, `${msg}\n`); -// Verify that the cwd option works - GH #7824 +// Verify that the cwd option works. +// See https://github.com/nodejs/node-v0.x-archive/issues/7824. { const cwd = common.rootDir; const cmd = common.isWindows ? 'echo %cd%' : 'pwd'; @@ -102,7 +104,8 @@ assert.strictEqual(ret, `${msg}\n`); assert.strictEqual(response.toString().trim(), cwd); } -// Verify that stderr is not accessed when stdio = 'ignore' - GH #7966 +// Verify that stderr is not accessed when stdio = 'ignore'. +// See https://github.com/nodejs/node-v0.x-archive/issues/7966. { assert.throws(function() { execSync('exit -1', { stdio: 'ignore' }); @@ -124,3 +127,8 @@ assert.strictEqual(ret, `${msg}\n`); return true; }); } + +// Verify the shell option works properly +assert.doesNotThrow(() => { + execFileSync(process.execPath, [], execOpts); +}); diff --git a/test/sequential/test-fs-readfile-tostring-fail.js b/test/sequential/test-fs-readfile-tostring-fail.js index c79e13daebac30..88cf7347efbfdf 100644 --- a/test/sequential/test-fs-readfile-tostring-fail.js +++ b/test/sequential/test-fs-readfile-tostring-fail.js @@ -13,9 +13,10 @@ const kStringMaxLength = process.binding('buffer').kStringMaxLength; if (common.isAIX && (Number(cp.execSync('ulimit -f')) * 512) < kStringMaxLength) common.skip('intensive toString tests due to file size confinements'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); -const file = path.join(common.tmpDir, 'toobig.txt'); +const file = path.join(tmpdir.path, 'toobig.txt'); const stream = fs.createWriteStream(file, { flags: 'a' }); diff --git a/test/sequential/test-fs-watch.js b/test/sequential/test-fs-watch.js index 9f1e95e8c0fbe3..31708ee6144c7d 100644 --- a/test/sequential/test-fs-watch.js +++ b/test/sequential/test-fs-watch.js @@ -26,14 +26,16 @@ const assert = require('assert'); const fs = require('fs'); const path = require('path'); +const tmpdir = require('../common/tmpdir'); + const expectFilePath = common.isWindows || common.isLinux || common.isOSX || common.isAIX; -const testDir = common.tmpDir; +const testDir = tmpdir.path; -common.refreshTmpDir(); +tmpdir.refresh(); { const filepath = path.join(testDir, 'watch.txt'); diff --git a/test/sequential/test-http2-max-session-memory.js b/test/sequential/test-http2-max-session-memory.js new file mode 100644 index 00000000000000..e16000d1261ab0 --- /dev/null +++ b/test/sequential/test-http2-max-session-memory.js @@ -0,0 +1,44 @@ +'use strict'; + +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); + +const http2 = require('http2'); + +// Test that maxSessionMemory Caps work + +const largeBuffer = Buffer.alloc(1e6); + +const server = http2.createServer({ maxSessionMemory: 1 }); + +server.on('stream', common.mustCall((stream) => { + stream.respond(); + stream.end(largeBuffer); +})); + +server.listen(0, common.mustCall(() => { + const client = http2.connect(`http://localhost:${server.address().port}`); + + { + const req = client.request(); + + req.on('response', () => { + // This one should be rejected because the server is over budget + // on the current memory allocation + const req = client.request(); + req.on('error', common.expectsError({ + code: 'ERR_HTTP2_STREAM_ERROR', + type: Error, + message: 'Stream closed with error code 11' + })); + req.on('close', common.mustCall(() => { + server.close(); + client.destroy(); + })); + }); + + req.resume(); + req.on('close', common.mustCall()); + } +})); diff --git a/test/sequential/test-http2-ping-flood.js b/test/sequential/test-http2-ping-flood.js new file mode 100644 index 00000000000000..5b47d51be9c5a8 --- /dev/null +++ b/test/sequential/test-http2-ping-flood.js @@ -0,0 +1,56 @@ +'use strict'; + +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); + +const http2 = require('http2'); +const net = require('net'); +const http2util = require('../common/http2'); + +// Test that ping flooding causes the session to be torn down + +const kSettings = new http2util.SettingsFrame(); +const kPing = new http2util.PingFrame(); + +const server = http2.createServer(); + +server.on('stream', common.mustNotCall()); +server.on('session', common.mustCall((session) => { + session.on('error', common.expectsError({ + code: 'ERR_HTTP2_ERROR', + message: + 'Flooding was detected in this HTTP/2 session, and it must be closed' + })); + session.on('close', common.mustCall(() => { + server.close(); + })); +})); + +server.listen(0, common.mustCall(() => { + const client = net.connect(server.address().port); + + // nghttp2 uses a limit of 10000 items in it's outbound queue. + // If this number is exceeded, a flooding error is raised. Set + // this lim higher to account for the ones that nghttp2 is + // successfully able to respond to. + // TODO(jasnell): Unfortunately, this test is inherently flaky because + // it is entirely dependent on how quickly the server is able to handle + // the inbound frames and whether those just happen to overflow nghttp2's + // outbound queue. The threshold at which the flood error occurs can vary + // from one system to another, and from one test run to another. + client.on('connect', common.mustCall(() => { + client.write(http2util.kClientMagic, () => { + client.write(kSettings.data, () => { + for (let n = 0; n < 35000; n++) + client.write(kPing.data); + }); + }); + })); + + // An error event may or may not be emitted, depending on operating system + // and timing. We do not really care if one is emitted here or not, as the + // error on the server side is what we are testing for. Do not make this + // a common.mustCall() and there's no need to check the error details. + client.on('error', () => {}); +})); diff --git a/test/sequential/test-http2-session-timeout.js b/test/sequential/test-http2-session-timeout.js index 7a401e90ea4bbc..fce4570563c584 100644 --- a/test/sequential/test-http2-session-timeout.js +++ b/test/sequential/test-http2-session-timeout.js @@ -38,7 +38,7 @@ server.listen(0, common.mustCall(() => { setTimeout(() => makeReq(attempts - 1), callTimeout); } else { server.removeListener('timeout', mustNotCall); - client.destroy(); + client.close(); server.close(); } }); diff --git a/test/sequential/test-http2-settings-flood.js b/test/sequential/test-http2-settings-flood.js new file mode 100644 index 00000000000000..bad4cec9a8d509 --- /dev/null +++ b/test/sequential/test-http2-settings-flood.js @@ -0,0 +1,53 @@ +'use strict'; + +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); + +const http2 = require('http2'); +const net = require('net'); +const http2util = require('../common/http2'); + +// Test that settings flooding causes the session to be torn down + +const kSettings = new http2util.SettingsFrame(); + +const server = http2.createServer(); + +server.on('stream', common.mustNotCall()); +server.on('session', common.mustCall((session) => { + session.on('error', common.expectsError({ + code: 'ERR_HTTP2_ERROR', + message: + 'Flooding was detected in this HTTP/2 session, and it must be closed' + })); + session.on('close', common.mustCall(() => { + server.close(); + })); +})); + +server.listen(0, common.mustCall(() => { + const client = net.connect(server.address().port); + + // nghttp2 uses a limit of 10000 items in it's outbound queue. + // If this number is exceeded, a flooding error is raised. Set + // this lim higher to account for the ones that nghttp2 is + // successfully able to respond to. + // TODO(jasnell): Unfortunately, this test is inherently flaky because + // it is entirely dependent on how quickly the server is able to handle + // the inbound frames and whether those just happen to overflow nghttp2's + // outbound queue. The threshold at which the flood error occurs can vary + // from one system to another, and from one test run to another. + client.on('connect', common.mustCall(() => { + client.write(http2util.kClientMagic, () => { + for (let n = 0; n < 35000; n++) + client.write(kSettings.data); + }); + })); + + // An error event may or may not be emitted, depending on operating system + // and timing. We do not really care if one is emitted here or not, as the + // error on the server side is what we are testing for. Do not make this + // a common.mustCall() and there's no need to check the error details. + client.on('error', () => {}); +})); diff --git a/test/sequential/test-http2-timeout-large-write-file.js b/test/sequential/test-http2-timeout-large-write-file.js index 922e5f99668da2..910e7a0fc497bd 100644 --- a/test/sequential/test-http2-timeout-large-write-file.js +++ b/test/sequential/test-http2-timeout-large-write-file.js @@ -8,7 +8,8 @@ const fs = require('fs'); const http2 = require('http2'); const path = require('path'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); // This test assesses whether long-running writes can complete // or timeout because the session or stream are not aware that the @@ -29,7 +30,7 @@ let offsetTimeout = common.platformTimeout(100); let didReceiveData = false; const content = Buffer.alloc(writeSize, 0x44); -const filepath = path.join(common.tmpDir, 'http2-large-write.tmp'); +const filepath = path.join(tmpdir.path, 'http2-large-write.tmp'); fs.writeFileSync(filepath, content, 'binary'); const fd = fs.openSync(filepath, 'r'); @@ -79,7 +80,7 @@ server.listen(0, common.mustCall(() => { } }, 1)); req.on('end', common.mustCall(() => { - client.destroy(); + client.close(); server.close(); })); })); diff --git a/test/sequential/test-http2-timeout-large-write.js b/test/sequential/test-http2-timeout-large-write.js index f0a11b2e44469e..a15fb46af6d28a 100644 --- a/test/sequential/test-http2-timeout-large-write.js +++ b/test/sequential/test-http2-timeout-large-write.js @@ -78,7 +78,7 @@ server.listen(0, common.mustCall(() => { } }, 1)); req.on('end', common.mustCall(() => { - client.destroy(); + client.close(); server.close(); })); })); diff --git a/test/sequential/test-inspector-async-hook-setup-at-signal.js b/test/sequential/test-inspector-async-hook-setup-at-signal.js index 96e8b28a7a250e..5ff7dec9473ac2 100644 --- a/test/sequential/test-inspector-async-hook-setup-at-signal.js +++ b/test/sequential/test-inspector-async-hook-setup-at-signal.js @@ -17,7 +17,7 @@ function waitUntilDebugged() { // call stack depth is 0. We need a chance to call // Debugger.setAsyncCallStackDepth *before* activating the actual timer for // async stack traces to work. Directly using a debugger statement would be - // too brittle, and using a longer timeout would unnecesarily slow down the + // too brittle, and using a longer timeout would unnecessarily slow down the // test on most machines. Triggering a debugger break through an interval is // a faster and more reliable way. process._rawDebug('Signal received, waiting for debugger setup'); diff --git a/test/sequential/test-inspector-contexts.js b/test/sequential/test-inspector-contexts.js index c7db962f2af006..79516b6447db97 100644 --- a/test/sequential/test-inspector-contexts.js +++ b/test/sequential/test-inspector-contexts.js @@ -27,9 +27,10 @@ async function testContextCreatedAndDestroyed() { const { name } = contextCreated.params.context; if (common.isSunOS || common.isWindows) { // uv_get_process_title() is unimplemented on Solaris-likes, it returns - // an empy string. On the Windows CI buildbots it returns "Administrator: - // Windows PowerShell[42]" because of a GetConsoleTitle() quirk. Not much - // we can do about either, just verify that it contains the PID. + // an empty string. On the Windows CI buildbots it returns + // "Administrator: Windows PowerShell[42]" because of a GetConsoleTitle() + // quirk. Not much we can do about either, just verify that it contains + // the PID. strictEqual(name.includes(`[${process.pid}]`), true); } else { strictEqual(`${process.argv0}[${process.pid}]`, name); diff --git a/test/sequential/test-inspector-port-cluster.js b/test/sequential/test-inspector-port-cluster.js index 84ec408ebfb178..87469aa7ff77c5 100644 --- a/test/sequential/test-inspector-port-cluster.js +++ b/test/sequential/test-inspector-port-cluster.js @@ -24,6 +24,16 @@ function testRunnerMain() { workers: [{ expectedPort: 9230 }] }); + spawnMaster({ + execArgv: ['--inspect=65534'], + workers: [ + { expectedPort: 65535 }, + { expectedPort: 1024 }, + { expectedPort: 1025 }, + { expectedPort: 1026 } + ] + }); + let port = debuggerPort + offset++ * 5; spawnMaster({ diff --git a/test/sequential/test-inspector-port-zero-cluster.js b/test/sequential/test-inspector-port-zero-cluster.js index f64e05f314c0c6..e522056571d1c2 100644 --- a/test/sequential/test-inspector-port-zero-cluster.js +++ b/test/sequential/test-inspector-port-zero-cluster.js @@ -30,16 +30,14 @@ function serialFork() { if (cluster.isMaster) { Promise.all([serialFork(), serialFork(), serialFork()]) .then(common.mustCall((ports) => { - ports.push(process.debugPort); - ports.sort(); + ports.splice(0, 0, process.debugPort); // 4 = [master, worker1, worker2, worker3].length() assert.strictEqual(ports.length, 4); assert(ports.every((port) => port > 0)); assert(ports.every((port) => port < 65536)); - // Ports should be consecutive. - assert.strictEqual(ports[0] + 1, ports[1]); - assert.strictEqual(ports[1] + 1, ports[2]); - assert.strictEqual(ports[2] + 1, ports[3]); + assert.strictEqual(ports[0] === 65535 ? 1024 : ports[0] + 1, ports[1]); + assert.strictEqual(ports[1] === 65535 ? 1024 : ports[1] + 1, ports[2]); + assert.strictEqual(ports[2] === 65535 ? 1024 : ports[2] + 1, ports[3]); })) .catch( (err) => { diff --git a/test/sequential/test-module-loading.js b/test/sequential/test-module-loading.js index 47916d352d729a..3d0de954c58e8a 100644 --- a/test/sequential/test-module-loading.js +++ b/test/sequential/test-module-loading.js @@ -20,7 +20,7 @@ // USE OR OTHER DEALINGS IN THE SOFTWARE. 'use strict'; -require('../common'); +const common = require('../common'); const assert = require('assert'); const path = require('path'); const fs = require('fs'); @@ -197,7 +197,10 @@ try { require(`${loadOrder}file3`); } catch (e) { // Not a real .node module, but we know we require'd the right thing. - assert.ok(/file3\.node/.test(e.message.replace(backslash, '/'))); + if (common.isOpenBSD) // OpenBSD errors with non-ELF object error + assert.ok(/File not an ELF object/.test(e.message.replace(backslash, '/'))); + else + assert.ok(/file3\.node/.test(e.message.replace(backslash, '/'))); } assert.strictEqual(require(`${loadOrder}file4`).file4, 'file4.reg', msg); assert.strictEqual(require(`${loadOrder}file5`).file5, 'file5.reg2', msg); @@ -205,7 +208,10 @@ try { try { require(`${loadOrder}file7`); } catch (e) { - assert.ok(/file7\/index\.node/.test(e.message.replace(backslash, '/'))); + if (common.isOpenBSD) + assert.ok(/File not an ELF object/.test(e.message.replace(backslash, '/'))); + else + assert.ok(/file7\/index\.node/.test(e.message.replace(backslash, '/'))); } assert.strictEqual(require(`${loadOrder}file8`).file8, 'file8/index.reg', msg); @@ -222,7 +228,8 @@ try { } { - // #1357 Loading JSON files with require() + // Loading JSON files with require() + // See https://github.com/nodejs/node-v0.x-archive/issues/1357. const json = require('../fixtures/packages/main/package.json'); assert.deepStrictEqual(json, { name: 'package-name', @@ -249,7 +256,8 @@ try { assert.deepStrictEqual(children, { 'common/index.js': { - 'common/fixtures.js': {} + 'common/fixtures.js': {}, + 'common/tmpdir.js': {} }, 'fixtures/not-main-module.js': {}, 'fixtures/a.js': { @@ -337,7 +345,8 @@ process.on('exit', function() { }); -// #1440 Loading files with a byte order marker. +// Loading files with a byte order marker. +// See https://github.com/nodejs/node-v0.x-archive/issues/1440. assert.strictEqual(require('../fixtures/utf8-bom.js'), 42); assert.strictEqual(require('../fixtures/utf8-bom.json'), 42); diff --git a/test/sequential/test-readline-interface.js b/test/sequential/test-readline-interface.js deleted file mode 100644 index 5c1a0e08a13ae7..00000000000000 --- a/test/sequential/test-readline-interface.js +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright Joyent, Inc. and other Node contributors. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to permit -// persons to whom the Software is furnished to do so, subject to the -// following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN -// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -// USE OR OTHER DEALINGS IN THE SOFTWARE. - -// Flags: --expose_internals -'use strict'; -const common = require('../common'); - -// These test cases are in `sequential` rather than the analogous test file in -// `parallel` because they become unrelaible under load. The unreliability under -// load was determined empirically when the test cases were in `parallel` by -// running: -// tools/test.py -j 96 --repeat 192 test/parallel/test-readline-interface.js - -const assert = require('assert'); -const readline = require('readline'); -const EventEmitter = require('events').EventEmitter; -const inherits = require('util').inherits; - -function FakeInput() { - EventEmitter.call(this); -} -inherits(FakeInput, EventEmitter); -FakeInput.prototype.resume = () => {}; -FakeInput.prototype.pause = () => {}; -FakeInput.prototype.write = () => {}; -FakeInput.prototype.end = () => {}; - -[ true, false ].forEach(function(terminal) { - // sending multiple newlines at once that does not end with a new line - // and a `end` event(last line is) - - // \r\n should emit one line event, not two - { - const fi = new FakeInput(); - const rli = new readline.Interface( - { input: fi, output: fi, terminal: terminal } - ); - const expectedLines = ['foo', 'bar', 'baz', 'bat']; - let callCount = 0; - rli.on('line', function(line) { - assert.strictEqual(line, expectedLines[callCount]); - callCount++; - }); - fi.emit('data', expectedLines.join('\r\n')); - assert.strictEqual(callCount, expectedLines.length - 1); - rli.close(); - } - - // \r\n should emit one line event when split across multiple writes. - { - const fi = new FakeInput(); - const rli = new readline.Interface( - { input: fi, output: fi, terminal: terminal } - ); - const expectedLines = ['foo', 'bar', 'baz', 'bat']; - let callCount = 0; - rli.on('line', function(line) { - assert.strictEqual(line, expectedLines[callCount]); - callCount++; - }); - expectedLines.forEach(function(line) { - fi.emit('data', `${line}\r`); - fi.emit('data', '\n'); - }); - assert.strictEqual(callCount, expectedLines.length); - rli.close(); - } - - // Emit one line event when the delay between \r and \n is - // over the default crlfDelay but within the setting value. - { - const fi = new FakeInput(); - const delay = 125; - const crlfDelay = common.platformTimeout(1000); - const rli = new readline.Interface({ - input: fi, - output: fi, - terminal: terminal, - crlfDelay - }); - let callCount = 0; - rli.on('line', () => callCount++); - fi.emit('data', '\r'); - setTimeout(common.mustCall(() => { - fi.emit('data', '\n'); - assert.strictEqual(callCount, 1); - rli.close(); - }), delay); - } -}); diff --git a/test/sequential/test-regress-GH-4027.js b/test/sequential/test-regress-GH-4027.js index 6ab6afcfd6bc2e..8936537323581e 100644 --- a/test/sequential/test-regress-GH-4027.js +++ b/test/sequential/test-regress-GH-4027.js @@ -25,9 +25,10 @@ const assert = require('assert'); const path = require('path'); const fs = require('fs'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); -const filename = path.join(common.tmpDir, 'watched'); +const filename = path.join(tmpdir.path, 'watched'); fs.writeFileSync(filename, 'quis custodiet ipsos custodes'); fs.watchFile(filename, { interval: 50 }, common.mustCall(function(curr, prev) { diff --git a/test/sequential/test-timers-block-eventloop.js b/test/sequential/test-timers-block-eventloop.js index f6426e454e0882..811216fcb29e7d 100644 --- a/test/sequential/test-timers-block-eventloop.js +++ b/test/sequential/test-timers-block-eventloop.js @@ -1,24 +1,18 @@ 'use strict'; const common = require('../common'); -const fs = require('fs'); -const platformTimeout = common.platformTimeout; +const assert = require('assert'); +let called = false; const t1 = setInterval(() => { - common.busyLoop(platformTimeout(12)); -}, platformTimeout(10)); - -const t2 = setInterval(() => { - common.busyLoop(platformTimeout(15)); -}, platformTimeout(10)); - -const t3 = - setTimeout(common.mustNotCall('eventloop blocked!'), platformTimeout(200)); - -setTimeout(function() { - fs.stat('/dev/nonexistent', () => { + assert(!called); + called = true; + setImmediate(common.mustCall(() => { clearInterval(t1); clearInterval(t2); - clearTimeout(t3); - }); -}, platformTimeout(50)); + })); +}, 10); + +const t2 = setInterval(() => { + common.busyLoop(20); +}, 10); diff --git a/test/parallel/test-tls-connect.js b/test/sequential/test-tls-connect.js similarity index 100% rename from test/parallel/test-tls-connect.js rename to test/sequential/test-tls-connect.js diff --git a/test/parallel/test-tls-lookup.js b/test/sequential/test-tls-lookup.js similarity index 100% rename from test/parallel/test-tls-lookup.js rename to test/sequential/test-tls-lookup.js diff --git a/test/tick-processor/tick-processor-base.js b/test/tick-processor/tick-processor-base.js index 3017dc6bb47250..33944655258bef 100644 --- a/test/tick-processor/tick-processor-base.js +++ b/test/tick-processor/tick-processor-base.js @@ -1,12 +1,13 @@ 'use strict'; -const common = require('../common'); +require('../common'); const fs = require('fs'); const cp = require('child_process'); const path = require('path'); -common.refreshTmpDir(); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); -const LOG_FILE = path.join(common.tmpDir, 'tick-processor.log'); +const LOG_FILE = path.join(tmpdir.path, 'tick-processor.log'); const RETRY_TIMEOUT = 150; function runTest(test) { diff --git a/tools/doc/type-parser.js b/tools/doc/type-parser.js index 3bcdd817a2f134..7999d55d740719 100644 --- a/tools/doc/type-parser.js +++ b/tools/doc/type-parser.js @@ -13,13 +13,13 @@ const jsPrimitives = { 'undefined': 'Undefined' }; const jsGlobalTypes = [ - 'Error', 'Object', 'Function', 'Array', 'TypedArray', 'Uint8Array', - 'Uint16Array', 'Uint32Array', 'Int8Array', 'Int16Array', 'Int32Array', - 'Uint8ClampedArray', 'Float32Array', 'Float64Array', 'Date', 'RegExp', - 'ArrayBuffer', 'DataView', 'Promise', 'EvalError', 'RangeError', - 'ReferenceError', 'SyntaxError', 'TypeError', 'URIError', 'Proxy', 'Map', - 'Set', 'WeakMap', 'WeakSet', 'Generator', 'GeneratorFunction', - 'AsyncFunction', 'SharedArrayBuffer' + 'Array', 'ArrayBuffer', 'AsyncFunction', 'DataView', 'Date', 'Error', + 'EvalError', 'Float32Array', 'Float64Array', 'Function', 'Generator', + 'GeneratorFunction', 'Int16Array', 'Int32Array', 'Int8Array', 'Map', 'Object', + 'Promise', 'Proxy', 'RangeError', 'ReferenceError', 'RegExp', 'Set', + 'SharedArrayBuffer', 'SyntaxError', 'TypeError', 'TypedArray', 'URIError', + 'Uint16Array', 'Uint32Array', 'Uint8Array', 'Uint8ClampedArray', 'WeakMap', + 'WeakSet' ]; const typeMap = { 'Iterable': @@ -27,34 +27,68 @@ const typeMap = { 'Iterator': `${jsDocPrefix}Reference/Iteration_protocols#The_iterator_protocol`, + 'this': `${jsDocPrefix}Reference/Operators/this`, + + 'AsyncHook': 'async_hooks.html#async_hooks_async_hooks_createhook_callbacks', + 'Buffer': 'buffer.html#buffer_class_buffer', 'ChildProcess': 'child_process.html#child_process_class_childprocess', 'cluster.Worker': 'cluster.html#cluster_class_worker', + 'crypto.constants': 'crypto.html#crypto_crypto_constants_1', + 'dgram.Socket': 'dgram.html#dgram_class_dgram_socket', + 'Domain': 'domain.html#domain_class_domain', + 'EventEmitter': 'events.html#events_class_eventemitter', + 'fs.Stats': 'fs.html#fs_class_fs_stats', + 'http.Agent': 'http.html#http_class_http_agent', 'http.ClientRequest': 'http.html#http_class_http_clientrequest', 'http.IncomingMessage': 'http.html#http_class_http_incomingmessage', 'http.Server': 'http.html#http_class_http_server', 'http.ServerResponse': 'http.html#http_class_http_serverresponse', + 'ClientHttp2Stream': 'http2.html#http2_class_clienthttp2stream', + 'HTTP/2 Headers Object': 'http2.html#http2_headers_object', + 'HTTP/2 Settings Object': 'http2.html#http2_settings_object', + 'http2.Http2ServerRequest': 'http2.html#http2_class_http2_http2serverrequest', + 'http2.Http2ServerResponse': + 'http2.html#http2_class_http2_http2serverresponse', + 'Http2Server': 'http2.html#http2_class_http2server', + 'Http2Session': 'http2.html#http2_class_http2session', + 'Http2Stream': 'http2.html#http2_class_http2stream', + 'ServerHttp2Stream': 'http2.html#http2_class_serverhttp2stream', + 'Handle': 'net.html#net_server_listen_handle_backlog_callback', + 'net.Server': 'net.html#net_class_net_server', 'net.Socket': 'net.html#net_class_net_socket', + 'os.constants.dlopen': 'os.html#os_dlopen_constants', + + 'PerformanceObserver': + 'perf_hooks.html#perf_hooks_class_performanceobserver_callback', + 'PerformanceObserverEntryList': + 'perf_hooks.html#perf_hooks_class_performanceobserverentrylist', + + 'readline.Interface': 'readline.html#readline_class_interface', + 'Stream': 'stream.html#stream_stream', + 'stream.Duplex': 'stream.html#stream_class_stream_duplex', 'stream.Readable': 'stream.html#stream_class_stream_readable', 'stream.Writable': 'stream.html#stream_class_stream_writable', - 'stream.Duplex': 'stream.html#stream_class_stream_duplex', - - 'tls.TLSSocket': 'tls.html#tls_class_tls_tlssocket', + 'Immediate': 'timers.html#timers_class_immediate', + 'Timeout': 'timers.html#timers_class_timeout', 'Timer': 'timers.html#timers_timers', + 'tls.Server': 'tls.html#tls_class_tls_server', + 'tls.TLSSocket': 'tls.html#tls_class_tls_tlssocket', + 'URL': 'url.html#url_the_whatwg_url_api', 'URLSearchParams': 'url.html#url_class_urlsearchparams' }; diff --git a/tools/eslint-rules/crypto-check.js b/tools/eslint-rules/crypto-check.js index 9d24d3355dce7f..9570c24c030ef4 100644 --- a/tools/eslint-rules/crypto-check.js +++ b/tools/eslint-rules/crypto-check.js @@ -16,13 +16,18 @@ const utils = require('./rules-utils.js'); const msg = 'Please add a hasCrypto check to allow this test to be skipped ' + 'when Node is built "--without-ssl".'; +const cryptoModules = ['crypto', 'http2']; +const requireModules = cryptoModules.concat(['tls', 'https']); +const bindingModules = cryptoModules.concat(['tls_wrap']); + module.exports = function(context) { const missingCheckNodes = []; const requireNodes = []; var hasSkipCall = false; function testCryptoUsage(node) { - if (utils.isRequired(node, ['crypto', 'tls', 'https', 'http2'])) { + if (utils.isRequired(node, requireModules) || + utils.isBinding(node, bindingModules)) { requireNodes.push(node); } } diff --git a/tools/eslint-rules/non-ascii-character.js b/tools/eslint-rules/non-ascii-character.js new file mode 100644 index 00000000000000..e67aac7cd91e82 --- /dev/null +++ b/tools/eslint-rules/non-ascii-character.js @@ -0,0 +1,61 @@ +/** + * @fileOverview Any non-ASCII characters in lib/ will increase the size + * of the compiled node binary. This linter rule ensures that + * any such character is reported. + * @author Sarat Addepalli + */ + +'use strict'; + +//------------------------------------------------------------------------------ +// Rule Definition +//------------------------------------------------------------------------------ + +const nonAsciiRegexPattern = /[^\r\n\x20-\x7e]/; +const suggestions = { + '’': '\'', + '‛': '\'', + '‘': '\'', + '“': '"', + '‟': '"', + '”': '"', + '«': '"', + '»': '"', + '—': '-' +}; + +module.exports = (context) => { + + const reportIfError = (node, sourceCode) => { + + const matches = sourceCode.text.match(nonAsciiRegexPattern); + + if (!matches) return; + + const offendingCharacter = matches[0]; + const offendingCharacterPosition = matches.index; + const suggestion = suggestions[offendingCharacter]; + + let message = `Non-ASCII character '${offendingCharacter}' detected.`; + + message = suggestion ? + `${message} Consider replacing with: ${suggestion}` : + message; + + context.report({ + node, + message, + loc: sourceCode.getLocFromIndex(offendingCharacterPosition), + fix: (fixer) => { + return fixer.replaceText( + node, + suggestion ? `${suggestion}` : '' + ); + } + }); + }; + + return { + Program: (node) => reportIfError(node, context.getSourceCode()) + }; +}; diff --git a/tools/eslint-rules/prefer-assert-iferror.js b/tools/eslint-rules/prefer-assert-iferror.js index e15287417693e0..399ee7403a6c88 100644 --- a/tools/eslint-rules/prefer-assert-iferror.js +++ b/tools/eslint-rules/prefer-assert-iferror.js @@ -5,9 +5,12 @@ 'use strict'; +const utils = require('./rules-utils.js'); + module.exports = { create(context) { const sourceCode = context.getSourceCode(); + var assertImported = false; function hasSameTokens(nodeA, nodeB) { const aTokens = sourceCode.getTokens(nodeA); @@ -20,8 +23,15 @@ module.exports = { }); } + function checkAssertNode(node) { + if (utils.isRequired(node, ['assert'])) { + assertImported = true; + } + } + return { - IfStatement(node) { + 'CallExpression': (node) => checkAssertNode(node), + 'IfStatement': (node) => { const firstStatement = node.consequent.type === 'BlockStatement' ? node.consequent.body[0] : node.consequent; @@ -30,10 +40,19 @@ module.exports = { firstStatement.type === 'ThrowStatement' && hasSameTokens(node.test, firstStatement.argument) ) { + const argument = sourceCode.getText(node.test); context.report({ node: firstStatement, message: 'Use assert.ifError({{argument}}) instead.', - data: { argument: sourceCode.getText(node.test) } + data: { argument }, + fix: (fixer) => { + if (assertImported) { + return fixer.replaceText( + node, + `assert.ifError(${argument});` + ); + } + } }); } } diff --git a/tools/eslint-rules/prefer-assert-methods.js b/tools/eslint-rules/prefer-assert-methods.js index 0604fd3ed99046..2917d40de40810 100644 --- a/tools/eslint-rules/prefer-assert-methods.js +++ b/tools/eslint-rules/prefer-assert-methods.js @@ -1,3 +1,7 @@ +/** + * @fileoverview Prohibit the use of assert operators ( ===, !==, ==, != ) + */ + 'use strict'; const astSelector = 'ExpressionStatement[expression.type="CallExpression"]' + @@ -21,7 +25,19 @@ module.exports = function(context) { const arg = node.expression.arguments[0]; const assertMethod = preferedAssertMethod[arg.operator]; if (assertMethod) { - context.report(node, parseError(assertMethod, arg.operator)); + context.report({ + node, + message: parseError(assertMethod, arg.operator), + fix: (fixer) => { + const sourceCode = context.getSourceCode(); + const left = sourceCode.getText(arg.left); + const right = sourceCode.getText(arg.right); + return fixer.replaceText( + node, + `assert.${assertMethod}(${left}, ${right});` + ); + } + }); } } }; diff --git a/tools/eslint-rules/rules-utils.js b/tools/eslint-rules/rules-utils.js index f2f5428ed1cbc1..2bfab1c6399ee8 100644 --- a/tools/eslint-rules/rules-utils.js +++ b/tools/eslint-rules/rules-utils.js @@ -12,6 +12,18 @@ module.exports.isRequired = function(node, modules) { modules.includes(node.arguments[0].value); }; +/** + * Returns true if any of the passed in modules are used in + * binding calls. + */ +module.exports.isBinding = function(node, modules) { + if (node.callee.object) { + return node.callee.object.name === 'process' && + node.callee.property.name === 'binding' && + modules.includes(node.arguments[0].value); + } +}; + /** * Returns true is the node accesses any property in the properties * array on the 'common' object. diff --git a/vcbuild.bat b/vcbuild.bat index a918757332fdb2..51509b9cf4bfb5 100644 --- a/vcbuild.bat +++ b/vcbuild.bat @@ -105,7 +105,7 @@ if /i "%1"=="build-release" set build_release=1&set sign=1&goto arg-ok if /i "%1"=="upload" set upload=1&goto arg-ok if /i "%1"=="small-icu" set i18n_arg=%1&goto arg-ok if /i "%1"=="full-icu" set i18n_arg=%1&goto arg-ok -if /i "%1"=="intl-none" set i18n_arg=%1&goto arg-ok +if /i "%1"=="intl-none" set i18n_arg=none&goto arg-ok if /i "%1"=="without-intl" set i18n_arg=none&goto arg-ok if /i "%1"=="download-all" set download_arg="--download=all"&goto arg-ok if /i "%1"=="ignore-flaky" set test_args=%test_args% --flaky-tests=dontcare&goto arg-ok @@ -161,7 +161,6 @@ if defined link_module set configure_flags=%configure_flags% %link_module% if defined i18n_arg set configure_flags=%configure_flags% --with-intl=%i18n_arg% if defined config_flags set configure_flags=%configure_flags% %config_flags% if defined target_arch set configure_flags=%configure_flags% --dest-cpu=%target_arch% -if defined TAG set configure_flags=%configure_flags% --tag=%TAG% if not exist "%~dp0deps\icu" goto no-depsicu if "%target%"=="Clean" echo deleting %~dp0deps\icu @@ -170,6 +169,8 @@ if "%target%"=="Clean" rmdir /S /Q %~dp0deps\icu call :getnodeversion || exit /b 1 +if defined TAG set configure_flags=%configure_flags% --tag=%TAG% + if "%target%"=="Clean" rmdir /Q /S "%~dp0%config%\node-v%FULLVERSION%-win-%target_arch%" > nul 2> nul if defined noprojgen if defined nobuild if not defined sign if not defined msi goto licensertf @@ -536,7 +537,7 @@ if defined lint_js_ci goto lint-js-ci if not defined lint_js goto exit if not exist tools\eslint goto no-lint echo running lint-js -%config%\node tools\eslint\bin\eslint.js --cache --rule "linebreak-style: 0" --rulesdir=tools\eslint-rules --ext=.js,.md benchmark doc lib test tools +%config%\node tools\eslint\bin\eslint.js --cache --rule "linebreak-style: 0" --rulesdir=tools\eslint-rules --ext=.js,.mjs,.md benchmark doc lib test tools goto exit :lint-js-ci