diff --git a/.editorconfig b/.editorconfig
index 1c1b4418547481..9eda3f95b66ab0 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -36,5 +36,5 @@ end_of_line = ignore
trim_trailing_whitespace = ignore
charset = ignore
-[{test/fixtures,deps,tools/eslint,tools/gyp,tools/icu,tools/msvs}/**]
+[{test/fixtures,deps,tools/node_modules,tools/gyp,tools/icu,tools/msvs}/**]
insert_final_newline = false
diff --git a/.eslintignore b/.eslintignore
index b9b743fb536cf5..27d1707084861f 100644
--- a/.eslintignore
+++ b/.eslintignore
@@ -3,7 +3,7 @@ lib/punycode.js
test/addons/??_*
test/fixtures
test/message/esm_display_syntax_error.mjs
-tools/eslint
+tools/node_modules
tools/icu
tools/remark-*
node_modules
diff --git a/.eslintrc.yaml b/.eslintrc.yaml
index 4735778b7bbff8..0839e352cd399d 100644
--- a/.eslintrc.yaml
+++ b/.eslintrc.yaml
@@ -7,8 +7,10 @@ env:
node: true
es6: true
+parser: babel-eslint
+
parserOptions:
- ecmaVersion: 2017
+ sourceType: script
overrides:
- files: ["doc/api/esm.md", "*.mjs", "test/es-module/test-esm-example-loader.js"]
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index c0ac8800df2431..63e76a27fab182 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -13,7 +13,7 @@ Contributors guide: https://github.com/nodejs/node/blob/master/CONTRIBUTING.md
- [ ] `make -j4 test` (UNIX), or `vcbuild test` (Windows) passes
- [ ] tests and/or benchmarks are included
- [ ] documentation is changed or added
-- [ ] commit message follows [commit guidelines](https://github.com/nodejs/node/blob/master/CONTRIBUTING.md#commit-message-guidelines)
+- [ ] commit message follows [commit guidelines](https://github.com/nodejs/node/blob/master/doc/guides/contributing/pull-requests.md#commit-message-guidelines)
##### Affected core subsystem(s)
diff --git a/.gitignore b/.gitignore
index 40838bcc7fa8d7..0ff301ace3824d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,7 +2,7 @@
.*
!deps/**/.*
!test/fixtures/**/.*
-!tools/eslint/**/.*
+!tools/node_modules/**/.*
!tools/doc/node_modules/**/.*
!.editorconfig
!.eslintignore
diff --git a/BUILDING.md b/BUILDING.md
index c459a867ba6096..74b5903ed22cd0 100644
--- a/BUILDING.md
+++ b/BUILDING.md
@@ -10,7 +10,7 @@ file a new issue.
## Supported platforms
-This list of supported platforms is current as of the branch / release to
+This list of supported platforms is current as of the branch/release to
which it is attached.
### Input
@@ -33,15 +33,15 @@ Support is divided into three tiers:
### Supported platforms
-The community does not build or test against end of life distributions (EoL).
-Thus we do not recommend that you use Node on end of life or unsupported platforms
+The community does not build or test against end-of-life distributions (EoL).
+Thus we do not recommend that you use Node on end-of-life or unsupported platforms
in production.
| System | Support type | Version | Architectures | Notes |
|--------------|--------------|----------------------------------|----------------------|------------------|
| GNU/Linux | Tier 1 | kernel >= 2.6.32, glibc >= 2.12 | x64, arm, arm64 | |
| macOS | Tier 1 | >= 10.10 | x64 | |
-| Windows | Tier 1 | >= Windows 7 / 2008 R2 | x86, x64 | vs2017 |
+| Windows | Tier 1 | >= Windows 7/2008 R2 | x86, x64 | vs2017 |
| SmartOS | Tier 2 | >= 15 < 16.4 | x86, x64 | see note1 |
| FreeBSD | Tier 2 | >= 10 | x64 | |
| GNU/Linux | Tier 2 | kernel >= 3.13.0, glibc >= 2.19 | ppc64le >=power8 | |
@@ -92,16 +92,16 @@ Depending on host platform, the selection of toolchains may vary.
*Note:* All prerequisites can be easily installed by following
[this bootstrapping guide](https://github.com/nodejs/node/blob/master/tools/bootstrap/README.md).
-### Unix / macOS
+### Unix/macOS
-Prerequisites:
+#### Prerequisites
* `gcc` and `g++` 4.9.4 or newer, or
* `clang` and `clang++` 3.4.2 or newer (macOS: latest Xcode Command Line Tools)
* Python 2.6 or 2.7
* GNU Make 3.81 or newer
-On macOS you will need to install the `Xcode Command Line Tools` by running
+On macOS, you will need to install the `Xcode Command Line Tools` by running
`xcode-select --install`. Alternatively, if you already have the full Xcode
installed, you can find them under the menu `Xcode -> Open Developer Tool ->
More Developer Tools...`. This step will install `clang`, `clang++`, and
@@ -114,12 +114,14 @@ If the path to your build directory contains a space, the build will likely fail
```console
$ sudo ./tools/macosx-firewall.sh
```
-Running this script will add rules for the executable `node` in the out
+Running this script will add rules for the executable `node` in the `out`
directory and the symbolic `node` link in the project's root directory.
On FreeBSD and OpenBSD, you may also need:
* libexecinfo
+#### Building Node.js
+
To build Node.js:
```console
@@ -138,13 +140,26 @@ for more information.
Note that the above requires that `python` resolve to Python 2.6 or 2.7
and not a newer version.
-To run the tests:
+#### Running Tests
+
+To verify the build:
+
+```console
+$ make test-only
+```
+
+At this point, you are ready to make code changes and re-run the tests.
+
+If you are running tests prior to submitting a Pull Request, the recommended
+command is:
```console
$ make test
```
-At this point you are ready to make code changes and re-run the tests!
+`make test` does a full check on the codebase, including running linters and
+documentation tests.
+
Optionally, continue below.
To run the tests and generate code coverage reports:
@@ -166,6 +181,8 @@ reports:
$ make coverage-clean
```
+#### Building the documentation
+
To build the documentation:
This will build Node.js first (if necessary) and then use it to build the docs:
@@ -215,7 +232,8 @@ Prerequisites:
* **Optional** (to build the MSI): the [WiX Toolset v3.11](http://wixtoolset.org/releases/)
and the [Wix Toolset Visual Studio 2017 Extension](https://marketplace.visualstudio.com/items?itemName=RobMensching.WixToolsetVisualStudio2017Extension).
-If the path to your build directory contains a space, the build will likely fail.
+If the path to your build directory contains a space or a non-ASCII character, the
+build will likely fail.
```console
> .\vcbuild
@@ -233,7 +251,7 @@ To test if Node.js was built correctly:
> Release\node -e "console.log('Hello from Node.js', process.version)"
```
-### Android / Android-based devices (e.g. Firefox OS)
+### Android/Android-based devices (e.g. Firefox OS)
Although these instructions for building on Android are provided, please note
that Android is not an officially supported platform at this time. Patches to
@@ -273,7 +291,7 @@ With the `--download=all`, this may download ICU if you don't have an
ICU in `deps/icu`. (The embedded `small-icu` included in the default
Node.js source does not include all locales.)
-##### Unix / macOS:
+##### Unix/macOS:
```console
$ ./configure --with-intl=full-icu --download=all
@@ -290,7 +308,7 @@ $ ./configure --with-intl=full-icu --download=all
The `Intl` object will not be available, nor some other APIs such as
`String.normalize`.
-##### Unix / macOS:
+##### Unix/macOS:
```console
$ ./configure --without-intl
@@ -302,7 +320,7 @@ $ ./configure --without-intl
> .\vcbuild without-intl
```
-#### Use existing installed ICU (Unix / macOS only):
+#### Use existing installed ICU (Unix/macOS only):
```console
$ pkg-config --modversion icu-i18n && ./configure --with-intl=system-icu
@@ -318,7 +336,7 @@ You can find other ICU releases at
Download the file named something like `icu4c-**##.#**-src.tgz` (or
`.zip`).
-##### Unix / macOS
+##### Unix/macOS
From an already-unpacked ICU:
```console
@@ -366,7 +384,7 @@ and [user guide](https://openssl.org/docs/fips/UserGuide-2.0.pdf).
through which you get the file complies with the requirements
for a "secure installation" as described in section 6.6 in
the [user guide](https://openssl.org/docs/fips/UserGuide-2.0.pdf).
- For evaluation/experimentation you can simply download and verify
+ For evaluation/experimentation, you can simply download and verify
`openssl-fips-x.x.x.tar.gz` from https://www.openssl.org/source/
2. Extract source to `openssl-fips` folder and `cd openssl-fips`
3. `./config`
@@ -385,3 +403,26 @@ and [user guide](https://openssl.org/docs/fips/UserGuide-2.0.pdf).
`/usr/local/ssl/fips-2.0`
8. Build Node.js with `make -j`
9. Verify with `node -p "process.versions.openssl"` (for example `1.0.2a-fips`)
+
+## Building Node.js with external core modules
+
+It is possible to specify one or more JavaScript text files to be bundled in
+the binary as builtin modules when building Node.js.
+
+### Unix / macOS
+
+This command will make `/root/myModule.js` available via
+`require('/root/myModule')` and `./myModule2.js` available via
+`require('myModule2')`.
+
+```console
+$ ./configure --link-module '/root/myModule.js' --link-module './myModule2.js'
+```
+
+### Windows
+
+To make `./myCustomModule.js` available via `require('myCustomModule')`.
+
+```console
+> .\vcbuild link-module './myCustomModule.js'
+```
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6ba104e7f77801..c6c8277ec86836 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -29,7 +29,8 @@ release.
-9.4.0
+9.5.0
+9.4.0
9.3.0
9.2.1
9.2.0
diff --git a/COLLABORATOR_GUIDE.md b/COLLABORATOR_GUIDE.md
index 2569570b8d03bd..5a1d51dafe375c 100644
--- a/COLLABORATOR_GUIDE.md
+++ b/COLLABORATOR_GUIDE.md
@@ -127,12 +127,14 @@ Before landing pull requests, sufficient time should be left for input
from other Collaborators. In general, leave at least 48 hours during the
week and 72 hours over weekends to account for international time
differences and work schedules. However, certain types of pull requests
-can be fast-tracked and may be landed after a shorter delay:
+can be fast-tracked and may be landed after a shorter delay. For example:
-* Focused changes that affect only documentation and/or the test suite.
- `code-and-learn` and `good-first-issue` pull requests typically fall
- into this category.
-* Changes that fix regressions.
+* Focused changes that affect only documentation and/or the test suite:
+ * `code-and-learn` tasks typically fall into this category.
+ * `good-first-issue` pull requests may also be suitable.
+* Changes that fix regressions:
+ * Regressions that break the workflow (red CI or broken compilation).
+ * Regressions that happen right before a release, or reported soon after.
When a pull request is deemed suitable to be fast-tracked, label it with
`fast-track`. The pull request can be landed once 2 or more Collaborators
@@ -435,7 +437,7 @@ The TSC should serve as the final arbiter where required.
author when squashing.
Review the commit message to ensure that it adheres to the guidelines outlined
-in the [contributing](./CONTRIBUTING.md#commit-message-guidelines) guide.
+in the [contributing](./doc/guides/contributing/pull-requests.md#commit-message-guidelines) guide.
Add all necessary [metadata](#metadata) to commit messages before landing.
@@ -465,7 +467,7 @@ $ git checkout master
```
Update the tree (assumes your repo is set up as detailed in
-[CONTRIBUTING.md](CONTRIBUTING.md#step-1-fork)):
+[CONTRIBUTING.md](./doc/guides/contributing/pull-requests.md#step-1-fork)):
```text
$ git fetch upstream
@@ -560,7 +562,7 @@ commit logs, ensure that they are properly formatted, and add
`Reviewed-By` lines.
* The commit message text must conform to the
-[commit message guidelines](./CONTRIBUTING.md#commit-message-guidelines).
+[commit message guidelines](./doc/guides/contributing/pull-requests.md#commit-message-guidelines).
* Modify the original commit message to include additional metadata regarding
@@ -621,7 +623,7 @@ error: failed to push some refs to 'https://github.com/nodejs/node'
hint: Updates were rejected because the remote contains work that you do
hint: not have locally. This is usually caused by another repository pushing
hint: to the same ref. You may want to first integrate the remote changes
-hint: (e.g., 'git pull ...') before pushing again.
+hint: (e.g. 'git pull ...') before pushing again.
hint: See the 'Note about fast-forwards' in 'git push --help' for details.
```
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index b1cc67ada553ff..cb8d20e90d005b 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -11,821 +11,37 @@ small and all contributions are valued.
This guide explains the process for contributing to the Node.js project's core
`nodejs/node` GitHub Repository and describes what to expect at each step.
-* [Code of Conduct](#code-of-conduct)
- * [Bad Actors](#bad-actors)
-* [Issues](#issues)
- * [Asking for General Help](#asking-for-general-help)
- * [Discussing non-technical topics](#discussing-non-technical-topics)
- * [Submitting a Bug Report](#submitting-a-bug-report)
- * [Triaging a Bug Report](#triaging-a-bug-report)
- * [Resolving a Bug Report](#resolving-a-bug-report)
-* [Pull Requests](#pull-requests)
- * [Dependencies](#dependencies)
- * [Setting up your local environment](#setting-up-your-local-environment)
- * [Step 1: Fork](#step-1-fork)
- * [Step 2: Branch](#step-2-branch)
- * [The Process of Making Changes](#the-process-of-making-changes)
- * [Step 3: Code](#step-3-code)
- * [Step 4: Commit](#step-4-commit)
- * [Commit message guidelines](#commit-message-guidelines)
- * [Step 5: Rebase](#step-5-rebase)
- * [Step 6: Test](#step-6-test)
- * [Test Coverage](#test-coverage)
- * [Step 7: Push](#step-7-push)
- * [Step 8: Opening the Pull Request](#step-8-opening-the-pull-request)
- * [Step 9: Discuss and Update](#step-9-discuss-and-update)
- * [Approval and Request Changes Workflow](#approval-and-request-changes-workflow)
- * [Step 10: Landing](#step-10-landing)
- * [Reviewing Pull Requests](#reviewing-pull-requests)
- * [Review a bit at a time](#review-a-bit-at-a-time)
- * [Be aware of the person behind the code](#be-aware-of-the-person-behind-the-code)
- * [Respect the minimum wait time for comments](#respect-the-minimum-wait-time-for-comments)
- * [Abandoned or Stalled Pull Requests](#abandoned-or-stalled-pull-requests)
- * [Approving a change](#approving-a-change)
- * [Accept that there are different opinions about what belongs in Node.js](#accept-that-there-are-different-opinions-about-what-belongs-in-nodejs)
- * [Performance is not everything](#performance-is-not-everything)
- * [Continuous Integration Testing](#continuous-integration-testing)
-* [Additional Notes](#additional-notes)
- * [Commit Squashing](#commit-squashing)
- * [Getting Approvals for your Pull Request](#getting-approvals-for-your-pull-request)
- * [CI Testing](#ci-testing)
- * [Waiting Until the Pull Request Gets Landed](#waiting-until-the-pull-request-gets-landed)
- * [Check Out the Collaborator's Guide](#check-out-the-collaborators-guide)
- * [Helpful Resources](#helpful-resources)
-* [Developer's Certificate of Origin 1.1](#developers-certificate-of-origin-11)
+## [Code of Conduct](./doc/guides/contributing/coc.md)
-## Code of Conduct
+The Node.js project has a
+[Code of Conduct](https://github.com/nodejs/admin/blob/master/CODE_OF_CONDUCT.md)
+that *all* contributors are expected to follow. This code describes the
+*minimum* behavior expectations for all contributors.
-The Node.js project has a [Code of Conduct][] that *all* contributors are
-expected to follow. This code describes the *minimum* behavior expectations
-for all contributors.
+See [details on our policy on Code of Conduct](./doc/guides/contributing/coc.md).
-As a contributor to Node.js, how you choose to act and interact towards your
-fellow contributors, as well as to the community, will reflect back not only
-on yourself but on the project as a whole. The Code of Conduct is designed and
-intended, above all else, to help establish a culture within the project that
-allows anyone and everyone who wants to contribute to feel safe doing so.
-
-Should any individual act in any way that is considered in violation of the
-[Code of Conduct][], corrective actions will be taken. It is possible, however,
-for any individual to *act* in such a manner that is not in violation of the
-strict letter of the Code of Conduct guidelines while still going completely
-against the spirit of what that Code is intended to accomplish.
-
-Open, diverse, and inclusive communities live and die on the basis of trust.
-Contributors can disagree with one another so long as they trust that those
-disagreements are in good faith and everyone is working towards a common goal.
-
-### Bad actors
-
-All contributors to Node.js tacitly agree to abide by both the letter and
-spirit of the [Code of Conduct][]. Failure, or unwillingness, to do so will
-result in contributions being respectfully declined.
-
-A *bad actor* is someone who repeatedly violates the *spirit* of the Code of
-Conduct through consistent failure to self-regulate the way in which they
-interact with other contributors in the project. In doing so, bad actors
-alienate other contributors, discourage collaboration, and generally reflect
-poorly on the project as a whole.
-
-Being a bad actor may be intentional or unintentional. Typically, unintentional
-bad behavior can be easily corrected by being quick to apologize and correct
-course *even if you are not entirely convinced you need to*. Giving other
-contributors the benefit of the doubt and having a sincere willingness to admit
-that you *might* be wrong is critical for any successful open collaboration.
-
-Don't be a bad actor.
-
-## Issues
+## [Issues](./doc/guides/contributing/issues.md)
Issues in `nodejs/node` are the primary means by which bug reports and
-general discussions are made. For any issue, there are fundamentally three
-ways an individual can contribute:
-
-1. By opening the issue for discussion: For instance, if you believe that you
- have uncovered a bug in Node.js, creating a new issue in the `nodejs/node`
- issue tracker is the way to report it.
-2. By helping to triage the issue: This can be done either by providing
- supporting details (a test case that demonstrates a bug), or providing
- suggestions on how to address the issue.
-3. By helping to resolve the issue: Typically this is done either in the form
- of demonstrating that the issue reported is not a problem after all, or more
- often, by opening a Pull Request that changes some bit of something in
- `nodejs/node` in a concrete and reviewable manner.
-
-### Asking for General Help
-
-Because the level of activity in the `nodejs/node` repository is so high,
-questions or requests for general help using Node.js should be directed at
-the [Node.js help repository][].
-
-### Discussing non-technical topics
-
-Discussion of non-technical topics (such as intellectual property and trademark)
-should be directed to the [Technical Steering Committee (TSC) repository][].
-
-### Submitting a Bug Report
-
-When opening a new issue in the `nodejs/node` issue tracker, users will be
-presented with a basic template that should be filled in.
-
-```markdown
-
-
-* **Version**:
-* **Platform**:
-* **Subsystem**:
-
-
-```
-
-If you believe that you have uncovered a bug in Node.js, please fill out this
-form, following the template to the best of your ability. Do not worry if you
-cannot answer every detail, just fill in what you can.
-
-The two most important pieces of information we need in order to properly
-evaluate the report is a description of the behavior you are seeing and a simple
-test case we can use to recreate the problem on our own. If we cannot recreate
-the issue, it becomes impossible for us to fix.
+general discussions are made.
-In order to rule out the possibility of bugs introduced by userland code, test
-cases should be limited, as much as possible, to using *only* Node.js APIs.
-If the bug occurs only when you're using a specific userland module, there is
-a very good chance that either (a) the module has a bug or (b) something in
-Node.js changed that broke the module.
+* [How to Contribute in Issues](./doc/guides/contributing/issues.md#how-to-contribute-in-issues)
+* [Asking for General Help](./doc/guides/contributing/issues.md#asking-for-general-help)
+* [Discussing non-technical topics](./doc/guides/contributing/issues.md#discussing-non-technical-topics)
+* [Submitting a Bug Report](./doc/guides/contributing/issues.md#submitting-a-bug-report)
+* [Triaging a Bug Report](./doc/guides/contributing/issues.md#triaging-a-bug-report)
+* [Resolving a Bug Report](./doc/guides/contributing/issues.md#resolving-a-bug-report)
-### Triaging a Bug Report
-
-Once an issue has been opened, it is not uncommon for there to be discussion
-around it. Some contributors may have differing opinions about the issue,
-including whether the behavior being seen is a bug or a feature. This discussion
-is part of the process and should be kept focused, helpful, and professional.
-
-Short, clipped responses—that provide neither additional context nor supporting
-detail—are not helpful or professional. To many, such responses are simply
-annoying and unfriendly.
-
-Contributors are encouraged to help one another make forward progress as much
-as possible, empowering one another to solve issues collaboratively. If you
-choose to comment on an issue that you feel either is not a problem that needs
-to be fixed, or if you encounter information in an issue that you feel is
-incorrect, explain *why* you feel that way with additional supporting context,
-and be willing to be convinced that you may be wrong. By doing so, we can often
-reach the correct outcome much faster.
-
-### Resolving a Bug Report
-
-In the vast majority of cases, issues are resolved by opening a Pull Request.
-The process for opening and reviewing a Pull Request is similar to that of
-opening and triaging issues, but carries with it a necessary review and approval
-workflow that ensures that the proposed changes meet the minimal quality and
-functional guidelines of the Node.js project.
-
-## Pull Requests
+## [Pull Requests](./doc/guides/contributing/pull-requests.md)
Pull Requests are the way concrete changes are made to the code, documentation,
dependencies, and tools contained in the `nodejs/node` repository.
-There are two fundamental components of the Pull Request process: one concrete
-and technical, and one more process oriented. The concrete and technical
-component involves the specific details of setting up your local environment
-so that you can make the actual changes. This is where we will start.
-
-### Dependencies
-
-Node.js has several bundled dependencies in the *deps/* and the *tools/*
-directories that are not part of the project proper. Changes to files in those
-directories should be sent to their respective projects. Do not send a patch to
-Node.js. We cannot accept such patches.
-
-In case of doubt, open an issue in the
-[issue tracker](https://github.com/nodejs/node/issues/) or contact one of the
-[project Collaborators](https://github.com/nodejs/node/#current-project-team-members).
-Node.js has two IRC channels:
-[#Node.js](https://webchat.freenode.net/?channels=node.js) for general help and
-questions, and
-[#Node-dev](https://webchat.freenode.net/?channels=node-dev) for development of
-Node.js core specifically.
-
-### Setting up your local environment
-
-To get started, you will need to have `git` installed locally. Depending on
-your operating system, there are also a number of other dependencies required.
-These are detailed in the [Building guide][].
-
-Once you have `git` and are sure you have all of the necessary dependencies,
-it's time to create a fork.
-
-Before getting started, it is recommended to configure `git` so that it knows
-who you are:
-
-```text
-$ git config --global user.name "J. Random User"
-$ git config --global user.email "j.random.user@example.com"
-```
-Please make sure this local email is also added to your
-[GitHub email list](https://github.com/settings/emails) so that your commits
-will be properly associated with your account and you will be promoted
-to Contributor once your first commit is landed.
-
-#### Step 1: Fork
-
-Fork the project [on GitHub](https://github.com/nodejs/node) and clone your fork
-locally.
-
-```text
-$ git clone git@github.com:username/node.git
-$ cd node
-$ git remote add upstream https://github.com/nodejs/node.git
-$ git fetch upstream
-```
-
-#### Step 2: Branch
-
-As a best practice to keep your development environment as organized as
-possible, create local branches to work within. These should also be created
-directly off of the `master` branch.
-
-```text
-$ git checkout -b my-branch -t upstream/master
-```
-
-### The Process of Making Changes
-
-#### Step 3: Code
-
-The vast majority of Pull Requests opened against the `nodejs/node`
-repository includes changes to either the C/C++ code contained in the `src`
-directory, the JavaScript code contained in the `lib` directory, the
-documentation in `docs/api` or tests within the `test` directory.
-
-If you are modifying code, please be sure to run `make lint` from time to
-time to ensure that the changes follow the Node.js code style guide.
-
-Any documentation you write (including code comments and API documentation)
-should follow the [Style Guide](doc/STYLE_GUIDE.md). Code samples included
-in the API docs will also be checked when running `make lint` (or
-`vcbuild.bat lint` on Windows).
-
-For contributing C++ code, you may want to look at the
-[C++ Style Guide](CPP_STYLE_GUIDE.md).
-
-#### Step 4: Commit
-
-It is a recommended best practice to keep your changes as logically grouped
-as possible within individual commits. There is no limit to the number of
-commits any single Pull Request may have, and many contributors find it easier
-to review changes that are split across multiple commits.
-
-```text
-$ git add my/changed/files
-$ git commit
-```
-
-Note that multiple commits often get squashed when they are landed (see the
-notes about [commit squashing](#commit-squashing)).
-
-##### Commit message guidelines
-
-A good commit message should describe what changed and why.
-
-1. The first line should:
- - contain a short description of the change (preferably 50 characters or less,
- and no more than 72 characters)
- - be entirely in lowercase with the exception of proper nouns, acronyms, and
- the words that refer to code, like function/variable names
- - be prefixed with the name of the changed subsystem and start with an
- imperative verb. Check the output of `git log --oneline files/you/changed` to
- find out what subsystems your changes touch.
-
- Examples:
- - `net: add localAddress and localPort to Socket`
- - `src: fix typos in node_lttng_provider.h`
-
-
-2. Keep the second line blank.
-3. Wrap all other lines at 72 columns.
-
-4. If your patch fixes an open issue, you can add a reference to it at the end
-of the log. Use the `Fixes:` prefix and the full issue URL. For other references
-use `Refs:`.
-
- Examples:
- - `Fixes: https://github.com/nodejs/node/issues/1337`
- - `Refs: http://eslint.org/docs/rules/space-in-parens.html`
- - `Refs: https://github.com/nodejs/node/pull/3615`
-
-5. If your commit introduces a breaking change (`semver-major`), it should
-contain an explanation about the reason of the breaking change, which
-situation would trigger the breaking change and what is the exact change.
-
-Breaking changes will be listed in the wiki with the aim to make upgrading
-easier. Please have a look at [Breaking Changes](https://github.com/nodejs/node/wiki/Breaking-changes-between-v4-LTS-and-v6-LTS)
-for the level of detail that's suitable.
-
-Sample complete commit message:
-
-```txt
-subsystem: explain the commit in one line
-
-Body of commit message is a few lines of text, explaining things
-in more detail, possibly giving some background about the issue
-being fixed, etc.
-
-The body of the commit message can be several paragraphs, and
-please do proper word-wrap and keep columns shorter than about
-72 characters or so. That way, `git log` will show things
-nicely even when it is indented.
-
-Fixes: https://github.com/nodejs/node/issues/1337
-Refs: http://eslint.org/docs/rules/space-in-parens.html
-```
-
-If you are new to contributing to Node.js, please try to do your best at
-conforming to these guidelines, but do not worry if you get something wrong.
-One of the existing contributors will help get things situated and the
-contributor landing the Pull Request will ensure that everything follows
-the project guidelines.
-
-#### Step 5: Rebase
-
-As a best practice, once you have committed your changes, it is a good idea
-to use `git rebase` (not `git merge`) to synchronize your work with the main
-repository.
-
-```text
-$ git fetch upstream
-$ git rebase upstream/master
-```
-
-This ensures that your working branch has the latest changes from `nodejs/node`
-master.
-
-#### Step 6: Test
-
-Bug fixes and features should always come with tests. A
-[guide for writing tests in Node.js](./doc/guides/writing-tests.md) has been
-provided to make the process easier. Looking at other tests to see how they
-should be structured can also help.
-
-The `test` directory within the `nodejs/node` repository is complex and it is
-often not clear where a new test file should go. When in doubt, add new tests
-to the `test/parallel/` directory and the right location will be sorted out
-later.
-
-Before submitting your changes in a Pull Request, always run the full Node.js
-test suite. To run the tests (including code linting) on Unix / macOS:
-
-```text
-$ ./configure && make -j4 test
-```
-
-And on Windows:
-
-```text
-> vcbuild test
-```
-
-(See the [BUILDING.md](./BUILDING.md) for more details.)
-
-Make sure the linter does not report any issues and that all tests pass. Please
-do not submit patches that fail either check.
-
-If you want to run the linter without running tests, use
-`make lint`/`vcbuild lint`. It will run both JavaScript linting and
-C++ linting.
-
-If you are updating tests and just want to run a single test to check it:
-
-```text
-$ python tools/test.py -J --mode=release parallel/test-stream2-transform
-```
-
-You can execute the entire suite of tests for a given subsystem
-by providing the name of a subsystem:
-
-```text
-$ python tools/test.py -J --mode=release child-process
-```
-
-If you want to check the other options, please refer to the help by using
-the `--help` option
-
-```text
-$ python tools/test.py --help
-```
-
-You can usually run tests directly with node:
-
-```text
-$ ./node ./test/parallel/test-stream2-transform.js
-```
-
-Remember to recompile with `make -j4` in between test runs if you change code in
-the `lib` or `src` directories.
-
-##### Test Coverage
-
-It's good practice to ensure any code you add or change is covered by tests.
-You can do so by running the test suite with coverage enabled:
-
-```text
-$ ./configure --coverage && make coverage
-```
-
-A detailed coverage report will be written to `coverage/index.html` for
-JavaScript coverage and to `coverage/cxxcoverage.html` for C++ coverage.
-
-_Note that generating a test coverage report can take several minutes._
-
-To collect coverage for a subset of tests you can set the `CI_JS_SUITES` and
-`CI_NATIVE_SUITES` variables:
-
-```text
-$ CI_JS_SUITES=child-process CI_NATIVE_SUITES= make coverage
-```
-
-The above command executes tests for the `child-process` subsystem and
-outputs the resulting coverage report.
-
-Running tests with coverage will create and modify several directories
-and files. To clean up afterwards, run:
-
-```text
-make coverage-clean
-./configure && make -j4.
-```
-
-#### Step 7: Push
-
-Once you are sure your commits are ready to go, with passing tests and linting,
-begin the process of opening a Pull Request by pushing your working branch to
-your fork on GitHub.
-
-```text
-$ git push origin my-branch
-```
-
-#### Step 8: Opening the Pull Request
-
-From within GitHub, opening a new Pull Request will present you with a template
-that should be filled out:
-
-```markdown
-
-
-##### Checklist
-
-
-- [ ] `make -j4 test` (UNIX), or `vcbuild test` (Windows) passes
-- [ ] tests and/or benchmarks are included
-- [ ] documentation is changed or added
-- [ ] commit message follows [commit guidelines](https://github.com/nodejs/node/blob/master/CONTRIBUTING.md#commit-message-guidelines)
-
-##### Affected core subsystem(s)
-
-```
-
-Please try to do your best at filling out the details, but feel free to skip
-parts if you're not sure what to put.
-
-Once opened, Pull Requests are usually reviewed within a few days.
-
-#### Step 9: Discuss and update
-
-You will probably get feedback or requests for changes to your Pull Request.
-This is a big part of the submission process so don't be discouraged! Some
-contributors may sign off on the Pull Request right away, others may have
-more detailed comments or feedback. This is a necessary part of the process
-in order to evaluate whether the changes are correct and necessary.
-
-To make changes to an existing Pull Request, make the changes to your local
-branch, add a new commit with those changes, and push those to your fork.
-GitHub will automatically update the Pull Request.
-
-```text
-$ git add my/changed/files
-$ git commit
-$ git push origin my-branch
-```
-
-It is also frequently necessary to synchronize your Pull Request with other
-changes that have landed in `master` by using `git rebase`:
-
-```text
-$ git fetch --all
-$ git rebase origin/master
-$ git push --force-with-lease origin my-branch
-```
-
-**Important:** The `git push --force-with-lease` command is one of the few ways
-to delete history in `git`. Before you use it, make sure you understand the
-risks. If in doubt, you can always ask for guidance in the Pull Request or on
-[IRC in the #node-dev channel][].
-
-If you happen to make a mistake in any of your commits, do not worry. You can
-amend the last commit (for example if you want to change the commit log).
-
-```text
-$ git add any/changed/files
-$ git commit --amend
-$ git push --force-with-lease origin my-branch
-```
-
-There are a number of more advanced mechanisms for managing commits using
-`git rebase` that can be used, but are beyond the scope of this guide.
-
-Feel free to post a comment in the Pull Request to ping reviewers if you are
-awaiting an answer on something. If you encounter words or acronyms that
-seem unfamiliar, refer to this
-[glossary](https://sites.google.com/a/chromium.org/dev/glossary).
-
-##### Approval and Request Changes Workflow
-
-All Pull Requests require "sign off" in order to land. Whenever a contributor
-reviews a Pull Request they may find specific details that they would like to
-see changed or fixed. These may be as simple as fixing a typo, or may involve
-substantive changes to the code you have written. In general, such requests
-are intended to be helpful, but at times may come across as abrupt or unhelpful,
-especially requests to change things that do not include concrete suggestions
-on *how* to change them.
-
-Try not to be discouraged. If you feel that a particular review is unfair,
-say so, or contact one of the other contributors in the project and seek their
-input. Often such comments are the result of the reviewer having only taken a
-short amount of time to review and are not ill-intended. Such issues can often
-be resolved with a bit of patience. That said, reviewers should be expected to
-be helpful in their feedback, and feedback that is simply vague, dismissive and
-unhelpful is likely safe to ignore.
-
-#### Step 10: Landing
-
-In order to land, a Pull Request needs to be reviewed and [approved][] by
-at least one Node.js Collaborator and pass a
-[CI (Continuous Integration) test run][]. After that, as long as there are no
-objections from other contributors, the Pull Request can be merged. If you find
-your Pull Request waiting longer than you expect, see the
-[notes about the waiting time](#waiting-until-the-pull-request-gets-landed).
-
-When a collaborator lands your Pull Request, they will post
-a comment to the Pull Request page mentioning the commit(s) it
-landed as. GitHub often shows the Pull Request as `Closed` at this
-point, but don't worry. If you look at the branch you raised your
-Pull Request against (probably `master`), you should see a commit with
-your name on it. Congratulations and thanks for your contribution!
-
-### Reviewing Pull Requests
-
-All Node.js contributors who choose to review and provide feedback on Pull
-Requests have a responsibility to both the project and the individual making the
-contribution. Reviews and feedback must be helpful, insightful, and geared
-towards improving the contribution as opposed to simply blocking it. If there
-are reasons why you feel the PR should not land, explain what those are. Do not
-expect to be able to block a Pull Request from advancing simply because you say
-"No" without giving an explanation. Be open to having your mind changed. Be open
-to working with the contributor to make the Pull Request better.
-
-Reviews that are dismissive or disrespectful of the contributor or any other
-reviewers are strictly counter to the [Code of Conduct][].
-
-When reviewing a Pull Request, the primary goals are for the codebase to improve
-and for the person submitting the request to succeed. Even if a Pull Request
-does not land, the submitters should come away from the experience feeling like
-their effort was not wasted or unappreciated. Every Pull Request from a new
-contributor is an opportunity to grow the community.
-
-#### Review a bit at a time.
-
-Do not overwhelm new contributors.
-
-It is tempting to micro-optimize and make everything about relative performance,
-perfect grammar, or exact style matches. Do not succumb to that temptation.
-
-Focus first on the most significant aspects of the change:
-
-1. Does this change make sense for Node.js?
-2. Does this change make Node.js better, even if only incrementally?
-3. Are there clear bugs or larger scale issues that need attending to?
-4. Is the commit message readable and correct? If it contains a breaking change is it clear enough?
-
-When changes are necessary, *request* them, do not *demand* them, and do not
-assume that the submitter already knows how to add a test or run a benchmark.
-
-Specific performance optimization techniques, coding styles and conventions
-change over time. The first impression you give to a new contributor never does.
-
-Nits (requests for small changes that are not essential) are fine, but try to
-avoid stalling the Pull Request. Most nits can typically be fixed by the
-Node.js Collaborator landing the Pull Request but they can also be an
-opportunity for the contributor to learn a bit more about the project.
-
-It is always good to clearly indicate nits when you comment: e.g.
-`Nit: change foo() to bar(). But this is not blocking.`
-
-#### Be aware of the person behind the code
-
-Be aware that *how* you communicate requests and reviews in your feedback can
-have a significant impact on the success of the Pull Request. Yes, we may land
-a particular change that makes Node.js better, but the individual might just
-not want to have anything to do with Node.js ever again. The goal is not just
-having good code.
-
-#### Respect the minimum wait time for comments
-
-There is a minimum waiting time which we try to respect for non-trivial
-changes, so that people who may have important input in such a distributed
-project are able to respond.
-
-For non-trivial changes, Pull Requests must be left open for *at least* 48
-hours during the week, and 72 hours on a weekend. In most cases, when the
-PR is relatively small and focused on a narrow set of changes, these periods
-provide more than enough time to adequately review. Sometimes changes take far
-longer to review, or need more specialized review from subject matter experts.
-When in doubt, do not rush.
-
-Trivial changes, typically limited to small formatting changes or fixes to
-documentation, may be landed within the minimum 48 hour window.
-
-#### Abandoned or Stalled Pull Requests
-
-If a Pull Request appears to be abandoned or stalled, it is polite to first
-check with the contributor to see if they intend to continue the work before
-checking if they would mind if you took it over (especially if it just has
-nits left). When doing so, it is courteous to give the original contributor
-credit for the work they started (either by preserving their name and email
-address in the commit log, or by using an `Author: ` meta-data tag in the
-commit.
-
-#### Approving a change
-
-Any Node.js core Collaborator (any GitHub user with commit rights in the
-`nodejs/node` repository) is authorized to approve any other contributor's
-work. Collaborators are not permitted to approve their own Pull Requests.
-
-Collaborators indicate that they have reviewed and approve of the changes in
-a Pull Request either by using GitHub's Approval Workflow, which is preferred,
-or by leaving an `LGTM` ("Looks Good To Me") comment.
-
-When explicitly using the "Changes requested" component of the GitHub Approval
-Workflow, show empathy. That is, do not be rude or abrupt with your feedback
-and offer concrete suggestions for improvement, if possible. If you're not
-sure *how* a particular change can be improved, say so.
-
-Most importantly, after leaving such requests, it is courteous to make yourself
-available later to check whether your comments have been addressed.
-
-If you see that requested changes have been made, you can clear another
-collaborator's `Changes requested` review.
-
-Change requests that are vague, dismissive, or unconstructive may also be
-dismissed if requests for greater clarification go unanswered within a
-reasonable period of time.
-
-If you do not believe that the Pull Request should land at all, use
-`Changes requested` to indicate that you are considering some of your comments
-to block the PR from landing. When doing so, explain *why* you believe the
-Pull Request should not land along with an explanation of what may be an
-acceptable alternative course, if any.
-
-#### Accept that there are different opinions about what belongs in Node.js
-
-Opinions on this vary, even among the members of the Technical Steering
-Committee.
-
-One general rule of thumb is that if Node.js itself needs it (due to historic
-or functional reasons), then it belongs in Node.js. For instance, `url`
-parsing is in Node.js because of HTTP protocol support.
-
-Also, functionality that either cannot be implemented outside of core in any
-reasonable way, or only with significant pain.
-
-It is not uncommon for contributors to suggest new features they feel would
-make Node.js better. These may or may not make sense to add, but as with all
-changes, be courteous in how you communicate your stance on these. Comments
-that make the contributor feel like they should have "known better" or
-ridiculed for even trying run counter to the [Code of Conduct][].
-
-#### Performance is not everything
-
-Node.js has always optimized for speed of execution. If a particular change
-can be shown to make some part of Node.js faster, it's quite likely to be
-accepted. Claims that a particular Pull Request will make things faster will
-almost always be met by requests for performance [benchmark results][] that
-demonstrate the improvement.
-
-That said, performance is not the only factor to consider. Node.js also
-optimizes in favor of not breaking existing code in the ecosystem, and not
-changing working functional code just for the sake of changing.
-
-If a particular Pull Request introduces a performance or functional
-regression, rather than simply rejecting the Pull Request, take the time to
-work *with* the contributor on improving the change. Offer feedback and
-advice on what would make the Pull Request acceptable, and do not assume that
-the contributor should already know how to do that. Be explicit in your
-feedback.
-
-#### Continuous Integration Testing
-
-All Pull Requests that contain changes to code must be run through
-continuous integration (CI) testing at [https://ci.nodejs.org/][].
-
-Only Node.js core Collaborators with commit rights to the `nodejs/node`
-repository may start a CI testing run. The specific details of how to do
-this are included in the new Collaborator [Onboarding guide][].
-
-Ideally, the code change will pass ("be green") on all platform configurations
-supported by Node.js (there are over 30 platform configurations currently).
-This means that all tests pass and there are no linting errors. In reality,
-however, it is not uncommon for the CI infrastructure itself to fail on
-specific platforms or for so-called "flaky" tests to fail ("be red"). It is
-vital to visually inspect the results of all failed ("red") tests to determine
-whether the failure was caused by the changes in the Pull Request.
-
-## Additional Notes
-
-### Commit Squashing
-
-When the commits in your Pull Request land, they may be squashed
-into one commit per logical change. Metadata will be added to the commit
-message (including links to the Pull Request, links to relevant issues,
-and the names of the reviewers). The commit history of your Pull Request,
-however, will stay intact on the Pull Request page.
-
-For the size of "one logical change",
-[0b5191f](https://github.com/nodejs/node/commit/0b5191f15d0f311c804d542b67e2e922d98834f8)
-can be a good example. It touches the implementation, the documentation,
-and the tests, but is still one logical change. In general, the tests should
-always pass when each individual commit lands on the master branch.
-
-### Getting Approvals for Your Pull Request
-
-A Pull Request is approved either by saying LGTM, which stands for
-"Looks Good To Me", or by using GitHub's Approve button.
-GitHub's Pull Request review feature can be used during the process.
-For more information, check out
-[the video tutorial](https://www.youtube.com/watch?v=HW0RPaJqm4g)
-or [the official documentation](https://help.github.com/articles/reviewing-changes-in-pull-requests/).
-
-After you push new changes to your branch, you need to get
-approval for these new changes again, even if GitHub shows "Approved"
-because the reviewers have hit the buttons before.
-
-### CI Testing
-
-Every Pull Request needs to be tested
-to make sure that it works on the platforms that Node.js
-supports. This is done by running the code through the CI system.
-
-Only a Collaborator can start a CI run. Usually one of them will do it
-for you as approvals for the Pull Request come in.
-If not, you can ask a Collaborator to start a CI run.
-
-### Waiting Until the Pull Request Gets Landed
-
-A Pull Request needs to stay open for at least 48 hours (72 hours on a
-weekend) from when it is submitted, even after it gets approved and
-passes the CI. This is to make sure that everyone has a chance to
-weigh in. If the changes are trivial, collaborators may decide it
-doesn't need to wait. A Pull Request may well take longer to be
-merged in. All these precautions are important because Node.js is
-widely used, so don't be discouraged!
-
-### Check Out the Collaborator's Guide
-
-If you want to know more about the code review and the landing process,
-you can take a look at the
-[collaborator's guide](https://github.com/nodejs/node/blob/master/COLLABORATOR_GUIDE.md).
-
-### Helpful Resources
-
-The following additional resources may be of assistance:
-
-* [How to create a Minimal, Complete, and Verifiable example](https://stackoverflow.com/help/mcve)
-* [core-validate-commit](https://github.com/evanlucas/core-validate-commit) -
- A utility that ensures commits follow the commit formatting guidelines.
+* [Dependencies](./doc/guides/contributing/pull-requests.md#dependencies)
+* [Setting up your local environment](./doc/guides/contributing/pull-requests.md#setting-up-your-local-environment)
+* [The Process of Making Changes](./doc/guides/contributing/pull-requests.md#the-process-of-making-changes)
+* [Reviewing Pull Requests](./doc/guides/contributing/pull-requests.md#reviewing-pull-requests)
+* [Additional Notes](./doc/guides/contributing/pull-requests.md#additional-notes)
## Developer's Certificate of Origin 1.1
@@ -853,14 +69,3 @@ By making a contribution to this project, I certify that:
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
-
-[approved]: #getting-approvals-for-your-pull-request
-[benchmark results]: ./doc/guides/writing-and-running-benchmarks.md
-[Building guide]: ./BUILDING.md
-[CI (Continuous Integration) test run]: #ci-testing
-[Code of Conduct]: https://github.com/nodejs/admin/blob/master/CODE_OF_CONDUCT.md
-[https://ci.nodejs.org/]: https://ci.nodejs.org/
-[IRC in the #node-dev channel]: https://webchat.freenode.net?channels=node-dev&uio=d4
-[Node.js help repository]: https://github.com/nodejs/help/issues
-[Onboarding guide]: ./doc/onboarding.md
-[Technical Steering Committee (TSC) repository]: https://github.com/nodejs/TSC/issues
diff --git a/CPP_STYLE_GUIDE.md b/CPP_STYLE_GUIDE.md
index 6266ee03b7c538..5a275094ad16fa 100644
--- a/CPP_STYLE_GUIDE.md
+++ b/CPP_STYLE_GUIDE.md
@@ -20,7 +20,8 @@
* [Others](#others)
* [Type casting](#type-casting)
* [Do not include `*.h` if `*-inl.h` has already been included](#do-not-include-h-if--inlh-has-already-been-included)
- * [Avoid throwing JavaScript errors in nested C++ methods](#avoid-throwing-javascript-errors-in-nested-c-methods)
+ * [Avoid throwing JavaScript errors in C++ methods](#avoid-throwing-javascript-errors-in-c)
+ * [Avoid throwing JavaScript errors in nested C++ methods](#avoid-throwing-javascript-errors-in-nested-c-methods)
Unfortunately, the C++ linter (based on
[Google’s `cpplint`](https://github.com/google/styleguide)), which can be run
@@ -213,12 +214,65 @@ instead of
#include "util-inl.h"
```
-## Avoid throwing JavaScript errors in nested C++ methods
+## Avoid throwing JavaScript errors in C++
-If you need to throw JavaScript errors from a C++ binding method, try to do it
-at the top level and not inside of nested calls.
+When there is a need to throw errors from a C++ binding method, try to
+return the data necessary for constructing the errors to JavaScript,
+then construct and throw the errors [using `lib/internal/errors.js`][errors].
-A lot of code inside Node.js is written so that typechecking etc. is performed
-in JavaScript.
+Note that in general, type-checks on arguments should be done in JavaScript
+before the arguments are passed into C++. Then in the C++ binding, simply using
+`CHECK` assertions to guard against invalid arguments should be enough.
+
+If the return value of the binding cannot be used to signal failures or return
+the necessary data for constructing errors in JavaScript, pass a context object
+to the binding and put the necessary data inside in C++. For example:
+
+```cpp
+void Foo(const FunctionCallbackInfo& args) {
+ Environment* env = Environment::GetCurrent(args);
+ // Let the JavaScript handle the actual type-checking,
+ // only assertions are placed in C++
+ CHECK_EQ(args.Length(), 2);
+ CHECK(args[0]->IsString());
+ CHECK(args[1]->IsObject());
+
+ int err = DoSomethingWith(args[0].As());
+ if (err) {
+ // Put the data inside the error context
+ Local ctx = args[1].As();
+ Local key = FIXED_ONE_BYTE_STRING(env->isolate(), "code");
+ ctx->Set(env->context(), key, err).FromJust();
+ } else {
+ args.GetReturnValue().Set(something_to_return);
+ }
+}
+
+// In the initialize function
+env->SetMethod(target, "foo", Foo);
+```
+
+```js
+exports.foo = function(str) {
+ // Prefer doing the type-checks in JavaScript
+ if (typeof str !== 'string') {
+ throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'str', 'string');
+ }
+
+ const ctx = {};
+ const result = binding.foo(str, ctx);
+ if (ctx.code !== undefined) {
+ throw new errors.Error('ERR_ERROR_NAME', ctx.code);
+ }
+ return result;
+};
+```
+
+### Avoid throwing JavaScript errors in nested C++ methods
+
+When you have to throw the errors from C++, try to do it at the top level and
+not inside of nested calls.
Using C++ `throw` is not allowed.
+
+[errors]: https://github.com/nodejs/node/blob/master/doc/guides/using-internal-errors.md
diff --git a/GOVERNANCE.md b/GOVERNANCE.md
index 7b8903fa5c8bae..4f1ff30d83411c 100644
--- a/GOVERNANCE.md
+++ b/GOVERNANCE.md
@@ -4,27 +4,30 @@ The Node.js project is governed by its Collaborators, including a Technical
Steering Committee (TSC) which is responsible for high-level guidance of the
project.
+
+
+- [Collaborators](#collaborators)
+ - [Collaborator Activities](#collaborator-activities)
+- [Technical Steering Committee](#technical-steering-committee)
+ - [TSC Meetings](#tsc-meetings)
+- [Collaborator Nominations](#collaborator-nominations)
+ - [Onboarding](#onboarding)
+- [Consensus Seeking Process](#consensus-seeking-process)
+
+
+
## Collaborators
-The [nodejs/node](https://github.com/nodejs/node) GitHub repository is
-maintained by Collaborators who are added by the TSC on an ongoing basis.
+The [nodejs/node][] GitHub repository is maintained by Node.js Core
+Collaborators. Upon becoming Collaborators, they:
-Individuals identified by the TSC as making significant and valuable
-contributions across any Node.js repository may be made Collaborators and given
-commit access to the project. Activities taken into consideration include (but
-are not limited to) the quality of:
+* Become members of the @nodejs/collaborators team
+* Gain individual membership of the Node.js foundation
-* code commits and pull requests
-* documentation commits and pull requests
-* comments on issues and pull requests
-* contributions to the Node.js website
-* assistance provided to end users and novice contributors
-* participation in Working Groups
-* other participation in the wider Node.js community
+Their privileges include but are not limited to:
-If individuals making valuable contributions do not believe they have been
-considered for commit access, they may log an issue or contact a TSC member
-directly.
+* Commit access to the [nodejs/node][] repository
+* Access to the Node.js continuous integration (CI) jobs
Modifications of the contents of the nodejs/node repository are made on
a collaborative basis. Anybody with a GitHub account may propose a
@@ -49,6 +52,8 @@ Collaborators may opt to elevate significant or controversial modifications to
the TSC by assigning the `tsc-review` label to a pull request or issue. The
TSC should serve as the final arbiter where required.
+See:
+
* [Current list of Collaborators](./README.md#current-project-team-members)
* [A guide for Collaborators](./COLLABORATOR_GUIDE.md)
@@ -56,11 +61,11 @@ TSC should serve as the final arbiter where required.
Typical activities of a Collaborator include:
-* helping users and novice contributors
-* contributing code and documentation changes that improve the project
-* reviewing and commenting on issues and pull requests
-* participation in working groups
-* merging pull requests
+* Helping users and novice contributors
+* Contributing code and documentation changes that improve the project
+* Reviewing and commenting on issues and pull requests
+* Participation in working groups
+* Merging pull requests
The TSC periodically reviews the Collaborator list to identify inactive
Collaborators. Past Collaborators are typically given _Emeritus_ status. Emeriti
@@ -68,8 +73,8 @@ may request that the TSC restore them to active status.
## Technical Steering Committee
-The Technical Steering Committee (TSC) has final authority over this project
-including:
+A subset of the Collaborators form the Technical Steering Committee (TSC).
+The TSC has final authority over this project, including:
* Technical direction
* Project governance and process (including this policy)
@@ -78,7 +83,8 @@ including:
* Conduct guidelines
* Maintaining the list of additional Collaborators
-* [Current list of TSC members](./README.md#current-project-team-members)
+The current list of TSC members can be found in
+[the project README](./README.md#current-project-team-members).
The operations of the TSC are governed by the [TSC Charter][] as approved by
the Node.js Foundation Board of Directors.
@@ -128,10 +134,70 @@ the issue tracker is:
either the proposal is dropped or the objecting members are persuaded. If
there is an extended impasse, a motion for a vote may be made.
+## Collaborator Nominations
+
+Any existing Collaborator can nominate an individual making significant
+and valuable contributions across the Node.js organization to become a new
+Collaborator.
+
+To nominate a new Collaborator, open an issue in the [nodejs/node][]
+repository, with a summary of the nominee's contributions, for example:
+
+* Commits in the [nodejs/node][] repository.
+ * Can be shown using the link
+ `https://github.com/nodejs/node/commits?author=${GITHUB_ID}`
+ (replace `${GITHUB_ID}` with the nominee's GitHub ID).
+* Pull requests and issues opened in the [nodejs/node][] repository.
+ * Can be shown using the link
+ `https://github.com/nodejs/node/pulls?q=author%3A${GITHUB_ID}+`
+* Comments and reviews on issues and pull requests in the
+ [nodejs/node][] repository
+ * Can be shown using the links
+ `https://github.com/nodejs/node/pulls?q=reviewed-by%3A${GITHUB_ID}+`
+ and `https://github.com/nodejs/node/pulls?q=commenter%3A${GITHUB_ID}+`
+* Assistance provided to end users and novice contributors
+* Participation in other projects, teams, and working groups of the
+ Node.js organization
+ * Can be shown using the links
+ `https://github.com/search?q=author%3A${GITHUB_ID}++org%3Anodejs&type=Issues`
+ and
+ `https://github.com/search?q=commenter%3A${GITHUB_ID}++org%3Anodejs&type=Issues`
+* Other participation in the wider Node.js community
+
+Mention @nodejs/collaborators in the issue to notify other Collaborators about
+the nomination.
+
+If there are no objections raised by any Collaborators one week after
+the issue is opened, the nomination will be considered as accepted.
+Should there be any objections against the nomination, the TSC is responsible
+for working with the individuals involved and finding a resolution.
+The nomination must be approved by the TSC, which is assumed when there are no
+objections from any TSC members.
+
+Prior to the public nomination, the Collaborator initiating it can seek
+feedback from other Collaborators in private using
+[the GitHub discussion page][collaborators-discussions] of the
+Collaborators team, and work with the nominee to improve the nominee's
+contribution profile, in order to make the nomination as frictionless
+as possible.
+
+If individuals making valuable contributions do not believe they have been
+considered for a nomination, they may log an issue or contact a Collaborator
+directly.
+
+### Onboarding
+
+When the nomination is accepted, the new Collaborator will be onboarded
+by a TSC member. See [the onboarding guide](./doc/onboarding.md) on
+details of the onboarding process. In general, the onboarding should be
+completed within a month after the nomination is accepted.
+
## Consensus Seeking Process
The TSC follows a [Consensus Seeking][] decision making model as described by
the [TSC Charter][].
-[TSC Charter]: https://github.com/nodejs/TSC/blob/master/TSC-Charter.md
+[collaborators-discussions]: https://github.com/orgs/nodejs/teams/collaborators/discussions
[Consensus Seeking]: https://en.wikipedia.org/wiki/Consensus-seeking_decision-making
+[TSC Charter]: https://github.com/nodejs/TSC/blob/master/TSC-Charter.md
+[nodejs/node]: https://github.com/nodejs/node
diff --git a/LICENSE b/LICENSE
index 5cb6d3a6f68d73..4d4fd710946b24 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1038,7 +1038,7 @@ The externally maintained libraries used by Node.js are:
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
-- ESLint, located at tools/eslint, is licensed as follows:
+- ESLint, located at tools/node_modules/eslint, is licensed as follows:
"""
Copyright JS Foundation and other contributors, https://js.foundation
@@ -1061,6 +1061,32 @@ The externally maintained libraries used by Node.js are:
THE SOFTWARE.
"""
+- babel-eslint, located at tools/node_modules/babel-eslint, is licensed as follows:
+ """
+ Copyright (c) 2014-2016 Sebastian McKenzie
+
+ MIT License
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ """
+
- gtest, located at deps/gtest, is licensed as follows:
"""
Copyright 2008, Google Inc.
diff --git a/Makefile b/Makefile
index 213284aa70cd15..eaebe4f2b55a4b 100644
--- a/Makefile
+++ b/Makefile
@@ -14,6 +14,7 @@ COVTESTS ?= test-cov
GTEST_FILTER ?= "*"
GNUMAKEFLAGS += --no-print-directory
GCOV ?= gcov
+PWD = $(CURDIR)
ifdef JOBS
PARALLEL_ARGS = -j $(JOBS)
@@ -144,12 +145,14 @@ check: test
coverage-clean:
if [ -d lib_ ]; then $(RM) -r lib; mv lib_ lib; fi
$(RM) -r node_modules
- $(RM) -r gcovr testing
+ $(RM) -r gcovr build
$(RM) -r out/$(BUILDTYPE)/.coverage
$(RM) -r .cov_tmp
- $(RM) out/$(BUILDTYPE)/obj.target/node/{src,gen}/*.gcda
+ $(RM) out/$(BUILDTYPE)/obj.target/node/gen/*.gcda
+ $(RM) out/$(BUILDTYPE)/obj.target/node/src/*.gcda
$(RM) out/$(BUILDTYPE)/obj.target/node/src/tracing/*.gcda
- $(RM) out/$(BUILDTYPE)/obj.target/node/{src,gen}/*.gcno
+ $(RM) out/$(BUILDTYPE)/obj.target/node/gen/*.gcno
+ $(RM) out/$(BUILDTYPE)/obj.target/node/src/*.gcno
$(RM) out/$(BUILDTYPE)/obj.target/node/src/tracing/*.gcno
$(RM) out/$(BUILDTYPE)/obj.target/cctest/src/*.gcno
$(RM) out/$(BUILDTYPE)/obj.target/cctest/test/cctest/*.gcno
@@ -171,7 +174,7 @@ coverage-build: all
$(NODE) ./deps/npm install nyc --no-save --no-package-lock; fi
if [ ! -d gcovr ]; then git clone --depth=1 \
--single-branch git://github.com/gcovr/gcovr.git; fi
- if [ ! -d testing ]; then git clone --depth=1 \
+ if [ ! -d build ]; then git clone --depth=1 \
--single-branch https://github.com/nodejs/build.git; fi
if [ ! -f gcovr/scripts/gcovr.orig ]; then \
(cd gcovr && patch -N -p1 < \
@@ -185,7 +188,8 @@ coverage-build: all
coverage-test: coverage-build
$(RM) -r out/$(BUILDTYPE)/.coverage
$(RM) -r .cov_tmp
- $(RM) out/$(BUILDTYPE)/obj.target/node/{src,gen}/*.gcda
+ $(RM) out/$(BUILDTYPE)/obj.target/node/gen/*.gcda
+ $(RM) out/$(BUILDTYPE)/obj.target/node/src/*.gcda
$(RM) out/$(BUILDTYPE)/obj.target/node/src/tracing/*.gcda
-$(MAKE) $(COVTESTS)
mv lib lib__
@@ -640,7 +644,7 @@ out/doc/api/assets/%: doc/api_assets/% out/doc/api/assets
available-node = \
if [ -x $(PWD)/$(NODE) ] && [ -e $(PWD)/$(NODE) ]; then \
$(PWD)/$(NODE) $(1); \
- elif [ -x `which node` ] && [ -e `which node` ]; then \
+ elif [ -x `which node` ] && [ -e `which node` ] && [ `which node` ]; then \
`which node` $(1); \
else \
echo "No available node, cannot run \"node $(1)\""; \
@@ -907,15 +911,31 @@ $(TARBALL): release-only $(NODE_EXE) doc
mkdir -p $(TARNAME)/doc/api
cp doc/node.1 $(TARNAME)/doc/node.1
cp -r out/doc/api/* $(TARNAME)/doc/api/
- $(RM) -r $(TARNAME)/deps/v8/{test,samples,tools/profviz,tools/run-tests.py}
- $(RM) -r $(TARNAME)/doc/images # too big
- $(RM) -r $(TARNAME)/deps/uv/{docs,samples,test}
- $(RM) -r $(TARNAME)/deps/openssl/openssl/{doc,demos,test}
+ $(RM) -r $(TARNAME)/.editorconfig
+ $(RM) -r $(TARNAME)/.git*
+ $(RM) -r $(TARNAME)/.mailmap
+ $(RM) -r $(TARNAME)/deps/openssl/openssl/demos
+ $(RM) -r $(TARNAME)/deps/openssl/openssl/doc
+ $(RM) -r $(TARNAME)/deps/openssl/openssl/test
+ $(RM) -r $(TARNAME)/deps/uv/docs
+ $(RM) -r $(TARNAME)/deps/uv/samples
+ $(RM) -r $(TARNAME)/deps/uv/test
+ $(RM) -r $(TARNAME)/deps/v8/samples
+ $(RM) -r $(TARNAME)/deps/v8/test
+ $(RM) -r $(TARNAME)/deps/v8/tools/profviz
+ $(RM) -r $(TARNAME)/deps/v8/tools/run-tests.py
$(RM) -r $(TARNAME)/deps/zlib/contrib # too big, unused
- $(RM) -r $(TARNAME)/.{editorconfig,git*,mailmap}
- $(RM) -r $(TARNAME)/tools/{eslint,eslint-rules,osx-pkg.pmdoc,pkgsrc,remark-cli,remark-preset-lint-node}
- $(RM) -r $(TARNAME)/tools/{osx-*,license-builder.sh,cpplint.py}
+ $(RM) -r $(TARNAME)/doc/images # too big
$(RM) -r $(TARNAME)/test*.tap
+ $(RM) -r $(TARNAME)/tools/cpplint.py
+ $(RM) -r $(TARNAME)/tools/eslint-rules
+ $(RM) -r $(TARNAME)/tools/license-builder.sh
+ $(RM) -r $(TARNAME)/tools/node_modules
+ $(RM) -r $(TARNAME)/tools/osx-*
+ $(RM) -r $(TARNAME)/tools/osx-pkg.pmdoc
+ $(RM) -r $(TARNAME)/tools/pkgsrc
+ $(RM) -r $(TARNAME)/tools/remark-cli
+ $(RM) -r $(TARNAME)/tools/remark-preset-lint-node
find $(TARNAME)/ -name ".eslint*" -maxdepth 2 | xargs $(RM)
find $(TARNAME)/ -type l | xargs $(RM) # annoying on windows
tar -cf $(TARNAME).tar $(TARNAME)
@@ -1019,57 +1039,13 @@ ifeq ($(XZ), 0)
ssh $(STAGINGSERVER) "touch nodejs/$(DISTTYPEDIR)/$(FULLVERSION)/$(TARNAME)-$(OSTYPE)-$(ARCH).tar.xz.done"
endif
-.PHONY: bench-net
-bench-net: all
- @$(NODE) benchmark/run.js net
-
-bench-crypto: all
- @$(NODE) benchmark/run.js crypto
-
-.PHONY: bench-tls
-bench-tls: all
- @$(NODE) benchmark/run.js tls
-
-.PHONY: bench-http
-bench-http: all
- @$(NODE) benchmark/run.js http
-
-.PHONY: bench-fs
-bench-fs: all
- @$(NODE) benchmark/run.js fs
-
-.PHONY: bench-misc
-bench-misc: benchmark/misc/function_call/build/Release/binding.node
- @$(NODE) benchmark/run.js misc
-
-.PHONY: bench-array
-bench-array: all
- @$(NODE) benchmark/run.js arrays
-
-.PHONY: bench-buffer
-bench-buffer: all
- @$(NODE) benchmark/run.js buffers
-
-bench-url: all
- @$(NODE) benchmark/run.js url
-
-bench-events: all
- @$(NODE) benchmark/run.js events
-
-bench-util: all
- @$(NODE) benchmark/run.js util
-
-bench-dgram: all
- @$(NODE) benchmark/run.js dgram
-
.PHONY: bench-all
-bench-all: bench bench-misc bench-array bench-buffer bench-url bench-events bench-dgram bench-util
+bench-all:
+ @echo "Please use benchmark/run.js or benchmark/compare.js to run the benchmarks."
.PHONY: bench
-bench: bench-net bench-http bench-fs bench-tls
-
-.PHONY: bench-ci
-bench-ci: bench
+bench:
+ @echo "Please use benchmark/run.js or benchmark/compare.js to run the benchmarks."
.PHONY: lint-md-clean
lint-md-clean:
@@ -1081,27 +1057,32 @@ lint-md-clean:
lint-md-build:
@if [ ! -d tools/remark-cli/node_modules ]; then \
echo "Markdown linter: installing remark-cli into tools/"; \
- cd tools/remark-cli && ../../$(NODE) ../../$(NPM) install; fi
+ cd tools/remark-cli && $(call available-node,$(run-npm-install)) fi
@if [ ! -d tools/remark-preset-lint-node/node_modules ]; then \
echo "Markdown linter: installing remark-preset-lint-node into tools/"; \
- cd tools/remark-preset-lint-node && ../../$(NODE) ../../$(NPM) install; fi
+ cd tools/remark-preset-lint-node && $(call available-node,$(run-npm-install)) fi
+
.PHONY: lint-md
ifneq ("","$(wildcard tools/remark-cli/node_modules/)")
-LINT_MD_TARGETS = src lib benchmark tools/doc tools/icu
-LINT_MD_ROOT_DOCS := $(wildcard *.md)
-LINT_MD_FILES := $(shell find $(LINT_MD_TARGETS) -type f \
- -not -path '*node_modules*' -name '*.md') $(LINT_MD_ROOT_DOCS)
-LINT_DOC_MD_FILES = $(shell ls doc/**/*.md)
-tools/.docmdlintstamp: $(LINT_DOC_MD_FILES)
+LINT_MD_DOC_FILES = $(shell ls doc/**/*.md)
+run-lint-doc-md = tools/remark-cli/cli.js -q -f $(LINT_MD_DOC_FILES)
+# Lint all changed markdown files under doc/
+tools/.docmdlintstamp: $(LINT_MD_DOC_FILES)
@echo "Running Markdown linter on docs..."
- @$(NODE) tools/remark-cli/cli.js -q -f $(LINT_DOC_MD_FILES)
+ @$(call available-node,$(run-lint-doc-md))
@touch $@
-tools/.miscmdlintstamp: $(LINT_MD_FILES)
+LINT_MD_TARGETS = src lib benchmark tools/doc tools/icu
+LINT_MD_ROOT_DOCS := $(wildcard *.md)
+LINT_MD_MISC_FILES := $(shell find $(LINT_MD_TARGETS) -type f \
+ -not -path '*node_modules*' -name '*.md') $(LINT_MD_ROOT_DOCS)
+run-lint-misc-md = tools/remark-cli/cli.js -q -f $(LINT_MD_MISC_FILES)
+# Lint other changed markdown files maintained by us
+tools/.miscmdlintstamp: $(LINT_MD_MISC_FILES)
@echo "Running Markdown linter on misc docs..."
- @$(NODE) tools/remark-cli/cli.js -q -f $(LINT_MD_FILES)
+ @$(call available-node,$(run-lint-misc-md))
@touch $@
tools/.mdlintstamp: tools/.miscmdlintstamp tools/.docmdlintstamp
@@ -1115,43 +1096,33 @@ lint-md:
endif
LINT_JS_TARGETS = benchmark doc lib test tools
-LINT_JS_CMD = tools/eslint/bin/eslint.js --cache \
- --rulesdir=tools/eslint-rules --ext=.js,.mjs,.md \
- $(LINT_JS_TARGETS)
+
+run-lint-js = tools/node_modules/eslint/bin/eslint.js --cache \
+ --rulesdir=tools/eslint-rules --ext=.js,.mjs,.md $(LINT_JS_TARGETS)
+run-lint-js-fix = $(run-lint-js) --fix
.PHONY: lint-js-fix
lint-js-fix:
- @if [ -x $(NODE) ]; then \
- $(NODE) $(LINT_JS_CMD) --fix; \
- else \
- node $(LINT_JS_CMD) --fix; \
- fi
+ @$(call available-node,$(run-lint-js-fix))
.PHONY: lint-js
# Note that on the CI `lint-js-ci` is run instead.
# Lints the JavaScript code with eslint.
lint-js:
@echo "Running JS linter..."
- @if [ -x $(NODE) ]; then \
- $(NODE) $(LINT_JS_CMD); \
- else \
- node $(LINT_JS_CMD); \
- fi
+ @$(call available-node,$(run-lint-js))
jslint: lint-js
@echo "Please use lint-js instead of jslint"
+run-lint-js-ci = tools/lint-js.js $(PARALLEL_ARGS) -f tap -o test-eslint.tap \
+ $(LINT_JS_TARGETS)
+
.PHONY: lint-js-ci
# On the CI the output is emitted in the TAP format.
lint-js-ci:
@echo "Running JS linter..."
- @if [ -x $(NODE) ]; then \
- $(NODE) tools/lint-js.js $(PARALLEL_ARGS) -f tap -o test-eslint.tap \
- $(LINT_JS_TARGETS); \
- else \
- node tools/lint-js.js $(PARALLEL_ARGS) -f tap -o test-eslint.tap \
- $(LINT_JS_TARGETS); \
- fi
+ @$(call available-node,$(run-lint-js-ci))
jslint-ci: lint-js-ci
@echo "Please use lint-js-ci instead of jslint-ci"
@@ -1206,7 +1177,7 @@ cpplint: lint-cpp
.PHONY: lint
.PHONY: lint-ci
-ifneq ("","$(wildcard tools/eslint/)")
+ifneq ("","$(wildcard tools/node_modules/eslint/)")
lint: ## Run JS, C++, MD and doc linters.
@EXIT_STATUS=0 ; \
$(MAKE) lint-js || EXIT_STATUS=$$? ; \
diff --git a/README.md b/README.md
index 85a27f549a5150..47b8694797637b 100644
--- a/README.md
+++ b/README.md
@@ -405,6 +405,8 @@ For more information about the governance of the Node.js project, see
**Kunal Pathak** <kunal.pathak@microsoft.com>
* [lance](https://github.com/lance) -
**Lance Ball** <lball@redhat.com>
+* [Leko](https://github.com/Leko) -
+**Shingo Inoue** <leko.noor@gmail.com> (he/him)
* [lpinca](https://github.com/lpinca) -
**Luigi Pinca** <luigipinca@gmail.com> (he/him)
* [lucamaraschi](https://github.com/lucamaraschi) -
@@ -495,6 +497,8 @@ For more information about the governance of the Node.js project, see
**Rich Trott** <rtrott@gmail.com> (he/him)
* [tunniclm](https://github.com/tunniclm) -
**Mike Tunnicliffe** <m.j.tunnicliffe@gmail.com>
+* [vdeturckheim](https://github.com/vdeturckheim) -
+**Vladimir de Turckheim** <vlad2t@hotmail.com> (he/him)
* [vkurchatkin](https://github.com/vkurchatkin) -
**Vladimir Kurchatkin** <vladimir.kurchatkin@gmail.com>
* [vsemozhetbyt](https://github.com/vsemozhetbyt) -
diff --git a/benchmark/_http-benchmarkers.js b/benchmark/_http-benchmarkers.js
index 54b7481afaa817..55ebcc96ba21ed 100644
--- a/benchmark/_http-benchmarkers.js
+++ b/benchmark/_http-benchmarkers.js
@@ -89,11 +89,14 @@ class TestDoubleBenchmarker {
}
create(options) {
+ const env = Object.assign({
+ duration: options.duration,
+ test_url: `http://127.0.0.1:${options.port}${options.path}`,
+ }, process.env);
+
const child = child_process.fork(this.executable, {
silent: true,
- env: Object.assign({}, process.env, {
- test_url: `http://127.0.0.1:${options.port}${options.path}`
- })
+ env
});
return child;
}
diff --git a/benchmark/_test-double-benchmarker.js b/benchmark/_test-double-benchmarker.js
index 8c2f744fbf6e9f..e2a0eb13126ed5 100644
--- a/benchmark/_test-double-benchmarker.js
+++ b/benchmark/_test-double-benchmarker.js
@@ -2,6 +2,28 @@
const http = require('http');
-http.get(process.env.test_url, function() {
- console.log(JSON.stringify({ throughput: 1 }));
-});
+const duration = process.env.duration || 0;
+const url = process.env.test_url;
+
+const start = process.hrtime();
+let throughput = 0;
+
+function request(res) {
+ res.on('data', () => {});
+ res.on('error', () => {});
+ res.on('end', () => {
+ throughput++;
+ const diff = process.hrtime(start);
+ if (duration > 0 && diff[0] < duration) {
+ run();
+ } else {
+ console.log(JSON.stringify({ throughput }));
+ }
+ });
+}
+
+function run() {
+ http.get(url, request);
+}
+
+run();
diff --git a/benchmark/arrays/var-int.js b/benchmark/arrays/var-int.js
index a8acbf5ccbbcb1..e36a909a3b9e76 100644
--- a/benchmark/arrays/var-int.js
+++ b/benchmark/arrays/var-int.js
@@ -17,10 +17,8 @@ const bench = common.createBenchmark(main, {
n: [25]
});
-function main(conf) {
- const type = conf.type;
+function main({ type, n }) {
const clazz = global[type];
- const n = +conf.n;
bench.start();
const arr = new clazz(n * 1e6);
diff --git a/benchmark/arrays/zero-float.js b/benchmark/arrays/zero-float.js
index c8d7dbf7ed57d2..073460e0efb8fc 100644
--- a/benchmark/arrays/zero-float.js
+++ b/benchmark/arrays/zero-float.js
@@ -17,10 +17,8 @@ const bench = common.createBenchmark(main, {
n: [25]
});
-function main(conf) {
- const type = conf.type;
+function main({ type, n }) {
const clazz = global[type];
- const n = +conf.n;
bench.start();
const arr = new clazz(n * 1e6);
diff --git a/benchmark/arrays/zero-int.js b/benchmark/arrays/zero-int.js
index b16a6b6b72064e..78fd34ae6c0bf4 100644
--- a/benchmark/arrays/zero-int.js
+++ b/benchmark/arrays/zero-int.js
@@ -17,10 +17,8 @@ const bench = common.createBenchmark(main, {
n: [25]
});
-function main(conf) {
- const type = conf.type;
+function main({ type, n }) {
const clazz = global[type];
- const n = +conf.n;
bench.start();
const arr = new clazz(n * 1e6);
diff --git a/benchmark/assert/deepequal-buffer.js b/benchmark/assert/deepequal-buffer.js
index d4495af69b48ef..0e7494544d3387 100644
--- a/benchmark/assert/deepequal-buffer.js
+++ b/benchmark/assert/deepequal-buffer.js
@@ -13,9 +13,7 @@ const bench = common.createBenchmark(main, {
]
});
-function main(conf) {
- const n = +conf.n;
- const len = +conf.len;
+function main({ len, n, method }) {
var i;
const data = Buffer.allocUnsafe(len + 1);
@@ -26,7 +24,7 @@ function main(conf) {
data.copy(expected);
data.copy(expectedWrong);
- switch (conf.method) {
+ switch (method) {
case '':
// Empty string falls through to next line as default, mostly for tests.
case 'deepEqual':
diff --git a/benchmark/assert/deepequal-map.js b/benchmark/assert/deepequal-map.js
index 4976f2619834bf..085274e8bfb943 100644
--- a/benchmark/assert/deepequal-map.js
+++ b/benchmark/assert/deepequal-map.js
@@ -38,14 +38,11 @@ function benchmark(method, n, values, values2) {
bench.end(n);
}
-function main(conf) {
- const n = +conf.n;
- const len = +conf.len;
-
+function main({ n, len, method }) {
const array = Array(len).fill(1);
var values, values2;
- switch (conf.method) {
+ switch (method) {
case '':
// Empty string falls through to next line as default, mostly for tests.
case 'deepEqual_primitiveOnly':
diff --git a/benchmark/assert/deepequal-object.js b/benchmark/assert/deepequal-object.js
index 2efa9452af88e1..2c2549d58485fc 100644
--- a/benchmark/assert/deepequal-object.js
+++ b/benchmark/assert/deepequal-object.js
@@ -25,10 +25,9 @@ function createObj(source, add = '') {
}));
}
-function main(conf) {
- const size = +conf.size;
- // TODO: Fix this "hack"
- const n = (+conf.n) / size;
+function main({ size, n, method }) {
+ // TODO: Fix this "hack". `n` should not be manipulated.
+ n = n / size;
var i;
const source = Array.apply(null, Array(size));
@@ -36,7 +35,7 @@ function main(conf) {
const expected = createObj(source);
const expectedWrong = createObj(source, '4');
- switch (conf.method) {
+ switch (method) {
case '':
// Empty string falls through to next line as default, mostly for tests.
case 'deepEqual':
diff --git a/benchmark/assert/deepequal-prims-and-objs-big-array-set.js b/benchmark/assert/deepequal-prims-and-objs-big-array-set.js
index 19337d7828236d..04802a76928cb2 100644
--- a/benchmark/assert/deepequal-prims-and-objs-big-array-set.js
+++ b/benchmark/assert/deepequal-prims-and-objs-big-array-set.js
@@ -15,7 +15,7 @@ const primValues = {
};
const bench = common.createBenchmark(main, {
- prim: Object.keys(primValues),
+ primitive: Object.keys(primValues),
n: [25],
len: [1e5],
method: [
@@ -30,10 +30,8 @@ const bench = common.createBenchmark(main, {
]
});
-function main(conf) {
- const prim = primValues[conf.prim];
- const n = +conf.n;
- const len = +conf.len;
+function main({ n, len, primitive, method }) {
+ const prim = primValues[primitive];
const actual = [];
const expected = [];
const expectedWrong = [];
@@ -52,7 +50,7 @@ function main(conf) {
const expectedSet = new Set(expected);
const expectedWrongSet = new Set(expectedWrong);
- switch (conf.method) {
+ switch (method) {
case '':
// Empty string falls through to next line as default, mostly for tests.
case 'deepEqual_Array':
diff --git a/benchmark/assert/deepequal-prims-and-objs-big-loop.js b/benchmark/assert/deepequal-prims-and-objs-big-loop.js
index 4a345f27c20f0e..09797dfaf2df21 100644
--- a/benchmark/assert/deepequal-prims-and-objs-big-loop.js
+++ b/benchmark/assert/deepequal-prims-and-objs-big-loop.js
@@ -14,7 +14,7 @@ const primValues = {
};
const bench = common.createBenchmark(main, {
- prim: Object.keys(primValues),
+ primitive: Object.keys(primValues),
n: [1e6],
method: [
'deepEqual',
@@ -24,16 +24,15 @@ const bench = common.createBenchmark(main, {
]
});
-function main(conf) {
- const prim = primValues[conf.prim];
- const n = +conf.n;
+function main({ n, primitive, method }) {
+ const prim = primValues[primitive];
const actual = prim;
const expected = prim;
const expectedWrong = 'b';
var i;
// Creates new array to avoid loop invariant code motion
- switch (conf.method) {
+ switch (method) {
case '':
// Empty string falls through to next line as default, mostly for tests.
case 'deepEqual':
diff --git a/benchmark/assert/deepequal-set.js b/benchmark/assert/deepequal-set.js
index aa0ebc064886a2..ebcf33cc6d5254 100644
--- a/benchmark/assert/deepequal-set.js
+++ b/benchmark/assert/deepequal-set.js
@@ -38,15 +38,12 @@ function benchmark(method, n, values, values2) {
bench.end(n);
}
-function main(conf) {
- const n = +conf.n;
- const len = +conf.len;
-
+function main({ n, len, method }) {
const array = Array(len).fill(1);
var values, values2;
- switch (conf.method) {
+ switch (method) {
case '':
// Empty string falls through to next line as default, mostly for tests.
case 'deepEqual_primitiveOnly':
diff --git a/benchmark/assert/deepequal-typedarrays.js b/benchmark/assert/deepequal-typedarrays.js
index 8e8cc4b083a73e..01546801ff3004 100644
--- a/benchmark/assert/deepequal-typedarrays.js
+++ b/benchmark/assert/deepequal-typedarrays.js
@@ -24,12 +24,8 @@ const bench = common.createBenchmark(main, {
len: [1e6]
});
-function main(conf) {
- const type = conf.type;
+function main({ type, n, len, method }) {
const clazz = global[type];
- const n = +conf.n;
- const len = +conf.len;
-
const actual = new clazz(len);
const expected = new clazz(len);
const expectedWrong = Buffer.alloc(len);
@@ -37,7 +33,7 @@ function main(conf) {
expectedWrong[wrongIndex] = 123;
var i;
- switch (conf.method) {
+ switch (method) {
case '':
// Empty string falls through to next line as default, mostly for tests.
case 'deepEqual':
diff --git a/benchmark/assert/throws.js b/benchmark/assert/throws.js
index 075e227f886acc..bffde7cbc1fd94 100644
--- a/benchmark/assert/throws.js
+++ b/benchmark/assert/throws.js
@@ -13,15 +13,14 @@ const bench = common.createBenchmark(main, {
]
});
-function main(conf) {
- const n = +conf.n;
+function main({ n, method }) {
const throws = () => { throw new TypeError('foobar'); };
const doesNotThrow = () => { return 'foobar'; };
const regExp = /foobar/;
const message = 'failure';
var i;
- switch (conf.method) {
+ switch (method) {
case '':
// Empty string falls through to next line as default, mostly for tests.
case 'doesNotThrow':
@@ -54,6 +53,6 @@ function main(conf) {
bench.end(n);
break;
default:
- throw new Error(`Unsupported method ${conf.method}`);
+ throw new Error(`Unsupported method ${method}`);
}
}
diff --git a/benchmark/async_hooks/gc-tracking.js b/benchmark/async_hooks/gc-tracking.js
index c71c1b07aa5431..a569fb8fa92485 100644
--- a/benchmark/async_hooks/gc-tracking.js
+++ b/benchmark/async_hooks/gc-tracking.js
@@ -21,10 +21,8 @@ function endAfterGC(n) {
});
}
-function main(conf) {
- const n = +conf.n;
-
- switch (conf.method) {
+function main({ n, method }) {
+ switch (method) {
case 'trackingEnabled':
bench.start();
for (let i = 0; i < n; i++) {
diff --git a/benchmark/buffers/buffer-base64-decode-wrapped.js b/benchmark/buffers/buffer-base64-decode-wrapped.js
index 3140cd5525ad07..61e3bb654ee7c0 100644
--- a/benchmark/buffers/buffer-base64-decode-wrapped.js
+++ b/benchmark/buffers/buffer-base64-decode-wrapped.js
@@ -6,8 +6,7 @@ const bench = common.createBenchmark(main, {
n: [32],
});
-function main(conf) {
- const n = +conf.n;
+function main({ n }) {
const charsPerLine = 76;
const linesCount = 8 << 16;
const bytesCount = charsPerLine * linesCount / 4 * 3;
diff --git a/benchmark/buffers/buffer-base64-decode.js b/benchmark/buffers/buffer-base64-decode.js
index 6a9002df383a8f..492922fb2b6eac 100644
--- a/benchmark/buffers/buffer-base64-decode.js
+++ b/benchmark/buffers/buffer-base64-decode.js
@@ -6,8 +6,7 @@ const bench = common.createBenchmark(main, {
n: [32],
});
-function main(conf) {
- const n = +conf.n;
+function main({ n }) {
const s = 'abcd'.repeat(8 << 20);
// eslint-disable-next-line no-unescaped-regexp-dot
s.match(/./); // Flatten string.
diff --git a/benchmark/buffers/buffer-base64-encode.js b/benchmark/buffers/buffer-base64-encode.js
index 509fcd9a33c9d4..d8b601bbd181f4 100644
--- a/benchmark/buffers/buffer-base64-encode.js
+++ b/benchmark/buffers/buffer-base64-encode.js
@@ -27,9 +27,7 @@ const bench = common.createBenchmark(main, {
n: [32]
});
-function main(conf) {
- const n = +conf.n;
- const len = +conf.len;
+function main({ n, len }) {
const b = Buffer.allocUnsafe(len);
let s = '';
let i;
diff --git a/benchmark/buffers/buffer-bytelength.js b/benchmark/buffers/buffer-bytelength.js
index fc6dfcf2301eaf..0617b4feb3f140 100644
--- a/benchmark/buffers/buffer-bytelength.js
+++ b/benchmark/buffers/buffer-bytelength.js
@@ -15,11 +15,7 @@ const chars = [
'𠜎𠜱𠝹𠱓𠱸𠲖𠳏𠳕𠴕𠵼𠵿𠸎𠸏𠹷𠺝𠺢' // 4 bytes
];
-function main(conf) {
- const n = conf.n | 0;
- const len = conf.len | 0;
- const encoding = conf.encoding;
-
+function main({ n, len, encoding }) {
var strings = [];
var results;
if (encoding === 'buffer') {
diff --git a/benchmark/buffers/buffer-compare-instance-method.js b/benchmark/buffers/buffer-compare-instance-method.js
index ff3bc4c1abda98..a3433803b79537 100644
--- a/benchmark/buffers/buffer-compare-instance-method.js
+++ b/benchmark/buffers/buffer-compare-instance-method.js
@@ -7,10 +7,8 @@ const bench = common.createBenchmark(main, {
millions: [1]
});
-function main(conf) {
- const iter = (conf.millions >>> 0) * 1e6;
- const size = (conf.size >>> 0);
- const args = (conf.args >>> 0);
+function main({ millions, size, args }) {
+ const iter = millions * 1e6;
const b0 = Buffer.alloc(size, 'a');
const b1 = Buffer.alloc(size, 'a');
const b0Len = b0.length;
diff --git a/benchmark/buffers/buffer-compare-offset.js b/benchmark/buffers/buffer-compare-offset.js
index 96719abfbe5618..850fe11d3f429e 100644
--- a/benchmark/buffers/buffer-compare-offset.js
+++ b/benchmark/buffers/buffer-compare-offset.js
@@ -23,13 +23,11 @@ function compareUsingOffset(b0, b1, len, iter) {
bench.end(iter / 1e6);
}
-function main(conf) {
- const iter = (conf.millions >>> 0) * 1e6;
- const size = (conf.size >>> 0);
- const method =
- conf.method === 'slice' ? compareUsingSlice : compareUsingOffset;
- method(Buffer.alloc(size, 'a'),
- Buffer.alloc(size, 'b'),
- size >> 1,
- iter);
+function main({ millions, size, method }) {
+ const iter = millions * 1e6;
+ const fn = method === 'slice' ? compareUsingSlice : compareUsingOffset;
+ fn(Buffer.alloc(size, 'a'),
+ Buffer.alloc(size, 'b'),
+ size >> 1,
+ iter);
}
diff --git a/benchmark/buffers/buffer-compare.js b/benchmark/buffers/buffer-compare.js
index ad6519cd102340..f7abb4b3d94ffa 100644
--- a/benchmark/buffers/buffer-compare.js
+++ b/benchmark/buffers/buffer-compare.js
@@ -27,9 +27,8 @@ const bench = common.createBenchmark(main, {
millions: [1]
});
-function main(conf) {
- const iter = (conf.millions >>> 0) * 1e6;
- const size = (conf.size >>> 0);
+function main({ millions, size }) {
+ const iter = millions * 1e6;
const b0 = Buffer.alloc(size, 'a');
const b1 = Buffer.alloc(size, 'a');
diff --git a/benchmark/buffers/buffer-concat.js b/benchmark/buffers/buffer-concat.js
index a27e132193ef41..3f9cffc06a6a7e 100644
--- a/benchmark/buffers/buffer-concat.js
+++ b/benchmark/buffers/buffer-concat.js
@@ -8,15 +8,11 @@ const bench = common.createBenchmark(main, {
n: [1024]
});
-function main(conf) {
- const n = +conf.n;
- const size = +conf.pieceSize;
- const pieces = +conf.pieces;
-
+function main({ n, pieces, pieceSize, withTotalLength }) {
const list = new Array(pieces);
- list.fill(Buffer.allocUnsafe(size));
+ list.fill(Buffer.allocUnsafe(pieceSize));
- const totalLength = conf.withTotalLength ? pieces * size : undefined;
+ const totalLength = withTotalLength ? pieces * pieceSize : undefined;
bench.start();
for (var i = 0; i < n * 1024; i++) {
diff --git a/benchmark/buffers/buffer-creation.js b/benchmark/buffers/buffer-creation.js
index 4ca0a049228f6c..73e620955e91db 100644
--- a/benchmark/buffers/buffer-creation.js
+++ b/benchmark/buffers/buffer-creation.js
@@ -15,10 +15,8 @@ const bench = common.createBenchmark(main, {
n: [1024]
});
-function main(conf) {
- const len = +conf.len;
- const n = +conf.n;
- switch (conf.type) {
+function main({ len, n, type }) {
+ switch (type) {
case '':
case 'fast-alloc':
bench.start();
diff --git a/benchmark/buffers/buffer-from.js b/benchmark/buffers/buffer-from.js
index 50cfbc887aa0b9..6f2358bcf296ab 100644
--- a/benchmark/buffers/buffer-from.js
+++ b/benchmark/buffers/buffer-from.js
@@ -18,10 +18,7 @@ const bench = common.createBenchmark(main, {
n: [2048]
});
-function main(conf) {
- const len = +conf.len;
- const n = +conf.n;
-
+function main({ len, n, source }) {
const array = new Array(len).fill(42);
const arrayBuf = new ArrayBuffer(len);
const str = 'a'.repeat(len);
@@ -31,7 +28,7 @@ function main(conf) {
var i;
- switch (conf.source) {
+ switch (source) {
case 'array':
bench.start();
for (i = 0; i < n * 1024; i++) {
diff --git a/benchmark/buffers/buffer-hex.js b/benchmark/buffers/buffer-hex.js
index d05bb832b3068c..1bdef81139ffe7 100644
--- a/benchmark/buffers/buffer-hex.js
+++ b/benchmark/buffers/buffer-hex.js
@@ -7,9 +7,7 @@ const bench = common.createBenchmark(main, {
n: [1e7]
});
-function main(conf) {
- const len = conf.len | 0;
- const n = conf.n | 0;
+function main({ len, n }) {
const buf = Buffer.alloc(len);
for (let i = 0; i < buf.length; i++)
diff --git a/benchmark/buffers/buffer-indexof-number.js b/benchmark/buffers/buffer-indexof-number.js
index 2e6e10b9f33d40..91bff0d54bb7eb 100644
--- a/benchmark/buffers/buffer-indexof-number.js
+++ b/benchmark/buffers/buffer-indexof-number.js
@@ -8,16 +8,14 @@ const bench = common.createBenchmark(main, {
n: [1e7]
});
-function main(conf) {
- const n = +conf.n;
- const search = +conf.value;
+function main({ n, value }) {
const aliceBuffer = fs.readFileSync(
path.resolve(__dirname, '../fixtures/alice.html')
);
bench.start();
for (var i = 0; i < n; i++) {
- aliceBuffer.indexOf(search, 0, undefined);
+ aliceBuffer.indexOf(value, 0, undefined);
}
bench.end(n);
}
diff --git a/benchmark/buffers/buffer-indexof.js b/benchmark/buffers/buffer-indexof.js
index 1545475269a025..c98d15320aaaae 100644
--- a/benchmark/buffers/buffer-indexof.js
+++ b/benchmark/buffers/buffer-indexof.js
@@ -25,16 +25,13 @@ const bench = common.createBenchmark(main, {
search: searchStrings,
encoding: ['undefined', 'utf8', 'ucs2', 'binary'],
type: ['buffer', 'string'],
- iter: [1]
+ iter: [100000]
});
-function main(conf) {
- const iter = (conf.iter) * 100000;
+function main({ iter, search, encoding, type }) {
var aliceBuffer = fs.readFileSync(
path.resolve(__dirname, '../fixtures/alice.html')
);
- var search = conf.search;
- var encoding = conf.encoding;
if (encoding === 'undefined') {
encoding = undefined;
@@ -44,7 +41,7 @@ function main(conf) {
aliceBuffer = Buffer.from(aliceBuffer.toString(), encoding);
}
- if (conf.type === 'buffer') {
+ if (type === 'buffer') {
search = Buffer.from(Buffer.from(search).toString(), encoding);
}
diff --git a/benchmark/buffers/buffer-iterate.js b/benchmark/buffers/buffer-iterate.js
index 4e911caa72ce14..8531e1cae82115 100644
--- a/benchmark/buffers/buffer-iterate.js
+++ b/benchmark/buffers/buffer-iterate.js
@@ -16,14 +16,11 @@ const methods = {
'iterator': benchIterator
};
-function main(conf) {
- const len = +conf.size;
- const clazz = conf.type === 'fast' ? Buffer : SlowBuffer;
- const buffer = new clazz(len);
+function main({ size, type, method, n }) {
+ const clazz = type === 'fast' ? Buffer : SlowBuffer;
+ const buffer = new clazz(size);
buffer.fill(0);
-
- const method = conf.method || 'for';
- methods[method](buffer, conf.n);
+ methods[method || 'for'](buffer, n);
}
diff --git a/benchmark/buffers/buffer-read.js b/benchmark/buffers/buffer-read.js
index 339da75befce4d..41e842f3123623 100644
--- a/benchmark/buffers/buffer-read.js
+++ b/benchmark/buffers/buffer-read.js
@@ -25,13 +25,12 @@ const bench = common.createBenchmark(main, {
millions: [1]
});
-function main(conf) {
- const noAssert = conf.noAssert === 'true';
- const len = +conf.millions * 1e6;
- const clazz = conf.buf === 'fast' ? Buffer : require('buffer').SlowBuffer;
+function main({ noAssert, millions, buf, type }) {
+ noAssert = noAssert === 'true';
+ const len = millions * 1e6;
+ const clazz = buf === 'fast' ? Buffer : require('buffer').SlowBuffer;
const buff = new clazz(8);
- const type = conf.type || 'UInt8';
- const fn = `read${type}`;
+ const fn = `read${type || 'UInt8'}`;
buff.writeDoubleLE(0, 0, noAssert);
const testFunction = new Function('buff', `
diff --git a/benchmark/buffers/buffer-slice.js b/benchmark/buffers/buffer-slice.js
index 0067d02d8c7931..2e52475da91866 100644
--- a/benchmark/buffers/buffer-slice.js
+++ b/benchmark/buffers/buffer-slice.js
@@ -10,9 +10,8 @@ const bench = common.createBenchmark(main, {
const buf = Buffer.allocUnsafe(1024);
const slowBuf = new SlowBuffer(1024);
-function main(conf) {
- const n = +conf.n;
- const b = conf.type === 'fast' ? buf : slowBuf;
+function main({ n, type }) {
+ const b = type === 'fast' ? buf : slowBuf;
bench.start();
for (var i = 0; i < n * 1024; i++) {
b.slice(10, 256);
diff --git a/benchmark/buffers/buffer-swap.js b/benchmark/buffers/buffer-swap.js
index 05cde002943f4a..8f6e1f51d3a0f2 100644
--- a/benchmark/buffers/buffer-swap.js
+++ b/benchmark/buffers/buffer-swap.js
@@ -72,13 +72,9 @@ function genMethod(method) {
return (new Function(fnString))();
}
-function main(conf) {
- const method = conf.method || 'swap16';
- const len = conf.len | 0;
- const n = conf.n | 0;
- const aligned = conf.aligned || 'true';
+function main({ method, len, n, aligned = 'true' }) {
const buf = createBuffer(len, aligned === 'true');
- const bufferSwap = genMethod(method);
+ const bufferSwap = genMethod(method || 'swap16');
bufferSwap(n, buf);
bench.start();
diff --git a/benchmark/buffers/buffer-tojson.js b/benchmark/buffers/buffer-tojson.js
index 19a6fe89474838..71936fb622eae6 100644
--- a/benchmark/buffers/buffer-tojson.js
+++ b/benchmark/buffers/buffer-tojson.js
@@ -7,9 +7,8 @@ const bench = common.createBenchmark(main, {
len: [0, 10, 256, 4 * 1024]
});
-function main(conf) {
- const n = +conf.n;
- const buf = Buffer.allocUnsafe(+conf.len);
+function main({ n, len }) {
+ const buf = Buffer.allocUnsafe(len);
bench.start();
for (var i = 0; i < n; ++i)
diff --git a/benchmark/buffers/buffer-tostring.js b/benchmark/buffers/buffer-tostring.js
index 49916fca4023ca..b2a14d8aec55ce 100644
--- a/benchmark/buffers/buffer-tostring.js
+++ b/benchmark/buffers/buffer-tostring.js
@@ -9,11 +9,7 @@ const bench = common.createBenchmark(main, {
n: [1e7]
});
-function main(conf) {
- var encoding = conf.encoding;
- const args = conf.args | 0;
- const len = conf.len | 0;
- const n = conf.n | 0;
+function main({ encoding, args, len, n }) {
const buf = Buffer.alloc(len, 42);
if (encoding.length === 0)
diff --git a/benchmark/buffers/buffer-write-string.js b/benchmark/buffers/buffer-write-string.js
index 927aa0b68466ef..37d4fda52c04e3 100644
--- a/benchmark/buffers/buffer-write-string.js
+++ b/benchmark/buffers/buffer-write-string.js
@@ -10,12 +10,7 @@ const bench = common.createBenchmark(main, {
n: [1e7]
});
-function main(conf) {
- const len = +conf.len;
- const n = +conf.n;
- const encoding = conf.encoding;
- const args = conf.args;
-
+function main({ len, n, encoding, args }) {
const string = 'a'.repeat(len);
const buf = Buffer.allocUnsafe(len);
diff --git a/benchmark/buffers/buffer-write.js b/benchmark/buffers/buffer-write.js
index b500a13dedcccd..ce2fbe3103cb83 100644
--- a/benchmark/buffers/buffer-write.js
+++ b/benchmark/buffers/buffer-write.js
@@ -45,13 +45,11 @@ const mod = {
writeUInt32LE: UINT32
};
-function main(conf) {
- const noAssert = conf.noAssert === 'true';
- const len = +conf.millions * 1e6;
- const clazz = conf.buf === 'fast' ? Buffer : require('buffer').SlowBuffer;
+function main({ noAssert, millions, buf, type }) {
+ const len = millions * 1e6;
+ const clazz = buf === 'fast' ? Buffer : require('buffer').SlowBuffer;
const buff = new clazz(8);
- const type = conf.type || 'UInt8';
- const fn = `write${type}`;
+ const fn = `write${type || 'UInt8'}`;
if (/Int/.test(fn))
benchInt(buff, fn, len, noAssert);
@@ -63,7 +61,7 @@ function benchInt(buff, fn, len, noAssert) {
const m = mod[fn];
const testFunction = new Function('buff', `
for (var i = 0; i !== ${len}; i++) {
- buff.${fn}(i & ${m}, 0, ${JSON.stringify(noAssert)});
+ buff.${fn}(i & ${m}, 0, ${noAssert});
}
`);
bench.start();
@@ -74,7 +72,7 @@ function benchInt(buff, fn, len, noAssert) {
function benchFloat(buff, fn, len, noAssert) {
const testFunction = new Function('buff', `
for (var i = 0; i !== ${len}; i++) {
- buff.${fn}(i, 0, ${JSON.stringify(noAssert)});
+ buff.${fn}(i, 0, ${noAssert});
}
`);
bench.start();
diff --git a/benchmark/buffers/buffer_zero.js b/benchmark/buffers/buffer_zero.js
index 06ca50bbb99ee7..06b68c313f1241 100644
--- a/benchmark/buffers/buffer_zero.js
+++ b/benchmark/buffers/buffer_zero.js
@@ -10,13 +10,12 @@ const bench = common.createBenchmark(main, {
const zeroBuffer = Buffer.alloc(0);
const zeroString = '';
-function main(conf) {
- const n = +conf.n;
+function main({ n, type }) {
bench.start();
- if (conf.type === 'buffer')
+ if (type === 'buffer')
for (let i = 0; i < n * 1024; i++) Buffer.from(zeroBuffer);
- else if (conf.type === 'string')
+ else if (type === 'string')
for (let i = 0; i < n * 1024; i++) Buffer.from(zeroString);
bench.end(n);
diff --git a/benchmark/buffers/dataview-set.js b/benchmark/buffers/dataview-set.js
index 0dd4598ab7f1c5..ee5acfb1c1f72c 100644
--- a/benchmark/buffers/dataview-set.js
+++ b/benchmark/buffers/dataview-set.js
@@ -39,11 +39,11 @@ const mod = {
setUint32: UINT32
};
-function main(conf) {
- const len = +conf.millions * 1e6;
+function main({ millions, type }) {
+ type = type || 'Uint8';
+ const len = millions * 1e6;
const ab = new ArrayBuffer(8);
const dv = new DataView(ab, 0, 8);
- const type = conf.type || 'Uint8';
const le = /LE$/.test(type);
const fn = `set${type.replace(/[LB]E$/, '')}`;
diff --git a/benchmark/child_process/child-process-exec-stdout.js b/benchmark/child_process/child-process-exec-stdout.js
index 1e78d445f8376d..a891026b86971f 100644
--- a/benchmark/child_process/child-process-exec-stdout.js
+++ b/benchmark/child_process/child-process-exec-stdout.js
@@ -12,12 +12,10 @@ const bench = common.createBenchmark(childProcessExecStdout, {
dur: [5]
});
-function childProcessExecStdout(conf) {
+function childProcessExecStdout({ dur, len }) {
bench.start();
- const maxDuration = conf.dur * 1000;
- const len = +conf.len;
-
+ const maxDuration = dur * 1000;
const cmd = `yes "${'.'.repeat(len)}"`;
const child = exec(cmd, { 'stdio': ['ignore', 'pipe', 'ignore'] });
diff --git a/benchmark/child_process/child-process-params.js b/benchmark/child_process/child-process-params.js
index 644b2136a0f03f..df930395b2a015 100644
--- a/benchmark/child_process/child-process-params.js
+++ b/benchmark/child_process/child-process-params.js
@@ -20,11 +20,7 @@ const configs = {
const bench = common.createBenchmark(main, configs);
-function main(conf) {
- const n = +conf.n;
- const methodName = conf.methodName;
- const params = +conf.params;
-
+function main({ n, methodName, params }) {
const method = cp[methodName];
switch (methodName) {
diff --git a/benchmark/child_process/child-process-read-ipc.js b/benchmark/child_process/child-process-read-ipc.js
index e6fb9b19c202dc..3971eb8b39663e 100644
--- a/benchmark/child_process/child-process-read-ipc.js
+++ b/benchmark/child_process/child-process-read-ipc.js
@@ -18,11 +18,9 @@ if (process.argv[2] === 'child') {
dur: [5]
});
const spawn = require('child_process').spawn;
- function main(conf) {
- bench.start();
- const dur = +conf.dur;
- const len = +conf.len;
+ function main({ dur, len }) {
+ bench.start();
const options = { 'stdio': ['ignore', 1, 2, 'ipc'] };
const child = spawn(process.argv[0],
diff --git a/benchmark/child_process/child-process-read.js b/benchmark/child_process/child-process-read.js
index 91c9964e8d1414..0ff08af79483b8 100644
--- a/benchmark/child_process/child-process-read.js
+++ b/benchmark/child_process/child-process-read.js
@@ -17,12 +17,9 @@ const bench = common.createBenchmark(main, {
dur: [5]
});
-function main(conf) {
+function main({ dur, len }) {
bench.start();
- const dur = +conf.dur;
- const len = +conf.len;
-
const msg = `"${'.'.repeat(len)}"`;
const options = { 'stdio': ['ignore', 'pipe', 'ignore'] };
const child = child_process.spawn('yes', [msg], options);
diff --git a/benchmark/child_process/spawn-echo.js b/benchmark/child_process/spawn-echo.js
index 1ce40c3abf4541..62f46fb4c0e8b4 100644
--- a/benchmark/child_process/spawn-echo.js
+++ b/benchmark/child_process/spawn-echo.js
@@ -5,9 +5,7 @@ const bench = common.createBenchmark(main, {
});
const spawn = require('child_process').spawn;
-function main(conf) {
- const n = +conf.n;
-
+function main({ n }) {
bench.start();
go(n, n);
}
diff --git a/benchmark/cluster/echo.js b/benchmark/cluster/echo.js
index 07096d251db489..90ae7f9fb0b677 100644
--- a/benchmark/cluster/echo.js
+++ b/benchmark/cluster/echo.js
@@ -10,22 +10,19 @@ if (cluster.isMaster) {
n: [1e5]
});
- function main(conf) {
- const n = +conf.n;
- const workers = +conf.workers;
- const sends = +conf.sendsPerBroadcast;
- const expectedPerBroadcast = sends * workers;
- var payload;
+ function main({ n, workers, sendsPerBroadcast, payload }) {
+ const expectedPerBroadcast = sendsPerBroadcast * workers;
var readies = 0;
var broadcasts = 0;
var msgCount = 0;
+ var data;
- switch (conf.payload) {
+ switch (payload) {
case 'string':
- payload = 'hello world!';
+ data = 'hello world!';
break;
case 'object':
- payload = { action: 'pewpewpew', powerLevel: 9001 };
+ data = { action: 'pewpewpew', powerLevel: 9001 };
break;
default:
throw new Error('Unsupported payload type');
@@ -51,8 +48,8 @@ if (cluster.isMaster) {
}
for (id in cluster.workers) {
const worker = cluster.workers[id];
- for (var i = 0; i < sends; ++i)
- worker.send(payload);
+ for (var i = 0; i < sendsPerBroadcast; ++i)
+ worker.send(data);
}
}
diff --git a/benchmark/compare.R b/benchmark/compare.R
index 5085f4ea73b71a..1527d680c38182 100644
--- a/benchmark/compare.R
+++ b/benchmark/compare.R
@@ -35,6 +35,21 @@ if (!is.null(plot.filename)) {
ggsave(plot.filename, p);
}
+# computes the shared standard error, as used in the welch t-test
+welch.sd = function (old.rate, new.rate) {
+ old.se.squared = var(old.rate) / length(old.rate)
+ new.se.squared = var(new.rate) / length(new.rate)
+ return(sqrt(old.se.squared + new.se.squared))
+}
+
+# calculate the improvement confidence interval. The improvement is calculated
+# by dividing by old.mu and not new.mu, because old.mu is what the mean
+# improvement is calculated relative to.
+confidence.interval = function (shared.se, old.mu, w, risk) {
+ interval = qt(1 - (risk / 2), w$parameter) * shared.se;
+ return(sprintf("±%.2f%%", (interval / old.mu) * 100))
+}
+
# Print a table with results
statistics = ddply(dat, "name", function(subdat) {
old.rate = subset(subdat, binary == "old")$rate;
@@ -45,33 +60,42 @@ statistics = ddply(dat, "name", function(subdat) {
new.mu = mean(new.rate);
improvement = sprintf("%.2f %%", ((new.mu - old.mu) / old.mu * 100));
- p.value = NA;
- confidence = 'NA';
+ r = list(
+ confidence = "NA",
+ improvement = improvement,
+ "accuracy (*)" = "NA",
+ "(**)" = "NA",
+ "(***)" = "NA"
+ );
+
# Check if there is enough data to calculate the calculate the p-value
if (length(old.rate) > 1 && length(new.rate) > 1) {
# Perform a statistics test to see of there actually is a difference in
# performance.
w = t.test(rate ~ binary, data=subdat);
- p.value = w$p.value;
+ shared.se = welch.sd(old.rate, new.rate)
# Add user friendly stars to the table. There should be at least one star
# before you can say that there is an improvement.
confidence = '';
- if (p.value < 0.001) {
+ if (w$p.value < 0.001) {
confidence = '***';
- } else if (p.value < 0.01) {
+ } else if (w$p.value < 0.01) {
confidence = '**';
- } else if (p.value < 0.05) {
+ } else if (w$p.value < 0.05) {
confidence = '*';
}
+
+ r = list(
+ confidence = confidence,
+ improvement = improvement,
+ "accuracy (*)" = confidence.interval(shared.se, old.mu, w, 0.05),
+ "(**)" = confidence.interval(shared.se, old.mu, w, 0.01),
+ "(***)" = confidence.interval(shared.se, old.mu, w, 0.001)
+ );
}
- r = list(
- improvement = improvement,
- confidence = confidence,
- p.value = p.value
- );
- return(data.frame(r));
+ return(data.frame(r, check.names=FALSE));
});
@@ -81,3 +105,16 @@ statistics$name = NULL;
options(width = 200);
print(statistics);
+cat("\n")
+cat(sprintf(
+"Be aware that when doing many comparisions the risk of a false-positive
+result increases. In this case there are %d comparisions, you can thus
+expect the following amount of false-positive results:
+ %.2f false positives, when considering a 5%% risk acceptance (*, **, ***),
+ %.2f false positives, when considering a 1%% risk acceptance (**, ***),
+ %.2f false positives, when considering a 0.1%% risk acceptance (***)
+",
+nrow(statistics),
+nrow(statistics) * 0.05,
+nrow(statistics) * 0.01,
+nrow(statistics) * 0.001))
diff --git a/benchmark/dgram/array-vs-concat.js b/benchmark/dgram/array-vs-concat.js
index 7ee4e2d3acb6e1..a7843a9c7f7c28 100644
--- a/benchmark/dgram/array-vs-concat.js
+++ b/benchmark/dgram/array-vs-concat.js
@@ -2,6 +2,7 @@
'use strict';
const common = require('../common.js');
+const dgram = require('dgram');
const PORT = common.PORT;
// `num` is the number of send requests to queue up each time.
@@ -15,34 +16,15 @@ const bench = common.createBenchmark(main, {
dur: [5]
});
-var dur;
-var len;
-var num;
-var type;
-var chunk;
-var chunks;
-
-function main(conf) {
- dur = +conf.dur;
- len = +conf.len;
- num = +conf.num;
- type = conf.type;
- chunks = +conf.chunks;
-
- chunk = [];
+function main({ dur, len, num, type, chunks }) {
+ const chunk = [];
for (var i = 0; i < chunks; i++) {
chunk.push(Buffer.allocUnsafe(Math.round(len / chunks)));
}
- server();
-}
-
-const dgram = require('dgram');
-
-function server() {
+ // Server
var sent = 0;
const socket = dgram.createSocket('udp4');
-
const onsend = type === 'concat' ? onsendConcat : onsendMulti;
function onsendConcat() {
diff --git a/benchmark/dgram/bind-params.js b/benchmark/dgram/bind-params.js
index 411bef98adcf7c..5f7999f7a39241 100644
--- a/benchmark/dgram/bind-params.js
+++ b/benchmark/dgram/bind-params.js
@@ -12,10 +12,9 @@ const configs = {
const bench = common.createBenchmark(main, configs);
const noop = () => {};
-function main(conf) {
- const n = +conf.n;
- const port = conf.port === 'true' ? 0 : undefined;
- const address = conf.address === 'true' ? '0.0.0.0' : undefined;
+function main({ n, port, address }) {
+ port = port === 'true' ? 0 : undefined;
+ address = address === 'true' ? '0.0.0.0' : undefined;
if (port !== undefined && address !== undefined) {
bench.start();
diff --git a/benchmark/dgram/multi-buffer.js b/benchmark/dgram/multi-buffer.js
index 15f70760abfd9e..ee74c584e45278 100644
--- a/benchmark/dgram/multi-buffer.js
+++ b/benchmark/dgram/multi-buffer.js
@@ -2,6 +2,7 @@
'use strict';
const common = require('../common.js');
+const dgram = require('dgram');
const PORT = common.PORT;
// `num` is the number of send requests to queue up each time.
@@ -15,31 +16,11 @@ const bench = common.createBenchmark(main, {
dur: [5]
});
-var dur;
-var len;
-var num;
-var type;
-var chunk;
-var chunks;
-
-function main(conf) {
- dur = +conf.dur;
- len = +conf.len;
- num = +conf.num;
- type = conf.type;
- chunks = +conf.chunks;
-
- chunk = [];
+function main({ dur, len, num, type, chunks }) {
+ const chunk = [];
for (var i = 0; i < chunks; i++) {
chunk.push(Buffer.allocUnsafe(Math.round(len / chunks)));
}
-
- server();
-}
-
-const dgram = require('dgram');
-
-function server() {
var sent = 0;
var received = 0;
const socket = dgram.createSocket('udp4');
diff --git a/benchmark/dgram/offset-length.js b/benchmark/dgram/offset-length.js
index 7f5a02afe58107..8a2df9ac67c1b6 100644
--- a/benchmark/dgram/offset-length.js
+++ b/benchmark/dgram/offset-length.js
@@ -2,6 +2,7 @@
'use strict';
const common = require('../common.js');
+const dgram = require('dgram');
const PORT = common.PORT;
// `num` is the number of send requests to queue up each time.
@@ -14,24 +15,8 @@ const bench = common.createBenchmark(main, {
dur: [5]
});
-var dur;
-var len;
-var num;
-var type;
-var chunk;
-
-function main(conf) {
- dur = +conf.dur;
- len = +conf.len;
- num = +conf.num;
- type = conf.type;
- chunk = Buffer.allocUnsafe(len);
- server();
-}
-
-const dgram = require('dgram');
-
-function server() {
+function main({ dur, len, num, type }) {
+ const chunk = Buffer.allocUnsafe(len);
var sent = 0;
var received = 0;
const socket = dgram.createSocket('udp4');
diff --git a/benchmark/dgram/single-buffer.js b/benchmark/dgram/single-buffer.js
index 454662b5425df7..0bf650d265c177 100644
--- a/benchmark/dgram/single-buffer.js
+++ b/benchmark/dgram/single-buffer.js
@@ -2,6 +2,7 @@
'use strict';
const common = require('../common.js');
+const dgram = require('dgram');
const PORT = common.PORT;
// `num` is the number of send requests to queue up each time.
@@ -14,24 +15,8 @@ const bench = common.createBenchmark(main, {
dur: [5]
});
-var dur;
-var len;
-var num;
-var type;
-var chunk;
-
-function main(conf) {
- dur = +conf.dur;
- len = +conf.len;
- num = +conf.num;
- type = conf.type;
- chunk = Buffer.allocUnsafe(len);
- server();
-}
-
-const dgram = require('dgram');
-
-function server() {
+function main({ dur, len, num, type }) {
+ const chunk = Buffer.allocUnsafe(len);
var sent = 0;
var received = 0;
const socket = dgram.createSocket('udp4');
diff --git a/benchmark/dns/lookup.js b/benchmark/dns/lookup.js
index bb562d528c5b37..3cc228c5669265 100644
--- a/benchmark/dns/lookup.js
+++ b/benchmark/dns/lookup.js
@@ -9,13 +9,10 @@ const bench = common.createBenchmark(main, {
n: [5e6]
});
-function main(conf) {
- const name = conf.name;
- const n = +conf.n;
- const all = conf.all === 'true' ? true : false;
+function main({ name, n, all }) {
var i = 0;
- if (all) {
+ if (all === 'true') {
const opts = { all: true };
bench.start();
(function cb() {
diff --git a/benchmark/domain/domain-fn-args.js b/benchmark/domain/domain-fn-args.js
index 0b98d17674064c..fe912e34d206e8 100644
--- a/benchmark/domain/domain-fn-args.js
+++ b/benchmark/domain/domain-fn-args.js
@@ -3,17 +3,15 @@ const common = require('../common.js');
const domain = require('domain');
const bench = common.createBenchmark(main, {
- arguments: [0, 1, 2, 3],
+ args: [0, 1, 2, 3],
n: [10]
});
const bdomain = domain.create();
const gargs = [1, 2, 3];
-function main(conf) {
-
- const n = +conf.n;
- const myArguments = gargs.slice(0, conf.arguments);
+function main({ n, args }) {
+ const myArguments = gargs.slice(0, args);
bench.start();
bdomain.enter();
diff --git a/benchmark/es/defaultparams-bench.js b/benchmark/es/defaultparams-bench.js
index 1393abbe54395c..ce2132718ca369 100644
--- a/benchmark/es/defaultparams-bench.js
+++ b/benchmark/es/defaultparams-bench.js
@@ -38,10 +38,10 @@ function runDefaultParams(n) {
bench.end(n / 1e6);
}
-function main(conf) {
- const n = +conf.millions * 1e6;
+function main({ millions, method }) {
+ const n = millions * 1e6;
- switch (conf.method) {
+ switch (method) {
case '':
// Empty string falls through to next line as default, mostly for tests.
case 'withoutdefaults':
diff --git a/benchmark/es/destructuring-bench.js b/benchmark/es/destructuring-bench.js
index a6c9a81ae02895..f244506860d248 100644
--- a/benchmark/es/destructuring-bench.js
+++ b/benchmark/es/destructuring-bench.js
@@ -34,10 +34,10 @@ function runSwapDestructured(n) {
bench.end(n / 1e6);
}
-function main(conf) {
- const n = +conf.millions * 1e6;
+function main({ millions, method }) {
+ const n = millions * 1e6;
- switch (conf.method) {
+ switch (method) {
case '':
// Empty string falls through to next line as default, mostly for tests.
case 'swap':
diff --git a/benchmark/es/destructuring-object-bench.js b/benchmark/es/destructuring-object-bench.js
index 63e085a2424430..73687f018de9dd 100644
--- a/benchmark/es/destructuring-object-bench.js
+++ b/benchmark/es/destructuring-object-bench.js
@@ -33,10 +33,10 @@ function runDestructured(n) {
bench.end(n / 1e6);
}
-function main(conf) {
- const n = +conf.millions * 1e6;
+function main({ millions, method }) {
+ const n = millions * 1e6;
- switch (conf.method) {
+ switch (method) {
case '':
// Empty string falls through to next line as default, mostly for tests.
case 'normal':
diff --git a/benchmark/es/foreach-bench.js b/benchmark/es/foreach-bench.js
index 62aa02236fc7ae..c7caa7cee6f461 100644
--- a/benchmark/es/foreach-bench.js
+++ b/benchmark/es/foreach-bench.js
@@ -52,17 +52,15 @@ function useForEach(n, items) {
bench.end(n / 1e6);
}
-function main(conf) {
- const n = +conf.millions * 1e6;
- const count = +conf.count;
-
+function main({ millions, count, method }) {
+ const n = millions * 1e6;
const items = new Array(count);
var i;
var fn;
for (i = 0; i < count; i++)
items[i] = i;
- switch (conf.method) {
+ switch (method) {
case '':
// Empty string falls through to next line as default, mostly for tests.
case 'for':
diff --git a/benchmark/es/map-bench.js b/benchmark/es/map-bench.js
index 035ed1a22aaf91..ba8e35c2eb934f 100644
--- a/benchmark/es/map-bench.js
+++ b/benchmark/es/map-bench.js
@@ -108,10 +108,10 @@ function runMap(n) {
bench.end(n / 1e6);
}
-function main(conf) {
- const n = +conf.millions * 1e6;
+function main({ millions, method }) {
+ const n = millions * 1e6;
- switch (conf.method) {
+ switch (method) {
case '':
// Empty string falls through to next line as default, mostly for tests.
case 'object':
diff --git a/benchmark/es/restparams-bench.js b/benchmark/es/restparams-bench.js
index 32fa985dedb806..78299d292ce6f6 100644
--- a/benchmark/es/restparams-bench.js
+++ b/benchmark/es/restparams-bench.js
@@ -60,10 +60,10 @@ function runUseArguments(n) {
bench.end(n / 1e6);
}
-function main(conf) {
- const n = +conf.millions * 1e6;
+function main({ millions, method }) {
+ const n = millions * 1e6;
- switch (conf.method) {
+ switch (method) {
case '':
// Empty string falls through to next line as default, mostly for tests.
case 'copy':
diff --git a/benchmark/es/spread-bench.js b/benchmark/es/spread-bench.js
index b6dfb5963e7acc..3c6cc93ea4f817 100644
--- a/benchmark/es/spread-bench.js
+++ b/benchmark/es/spread-bench.js
@@ -23,16 +23,16 @@ function makeTest(count, rest) {
}
}
-function main(conf) {
- const n = +conf.millions * 1e6;
- const ctx = conf.context === 'context' ? {} : null;
- var fn = makeTest(conf.count, conf.rest);
- const args = new Array(conf.count);
+function main({ millions, context, count, rest, method }) {
+ const n = millions * 1e6;
+ const ctx = context === 'context' ? {} : null;
+ var fn = makeTest(count, rest);
+ const args = new Array(count);
var i;
- for (i = 0; i < conf.count; i++)
+ for (i = 0; i < count; i++)
args[i] = i;
- switch (conf.method) {
+ switch (method) {
case '':
// Empty string falls through to next line as default, mostly for tests.
case 'apply':
diff --git a/benchmark/es/string-concatenations.js b/benchmark/es/string-concatenations.js
index b7f5c319361c6c..a40b7fa8c3b9f9 100644
--- a/benchmark/es/string-concatenations.js
+++ b/benchmark/es/string-concatenations.js
@@ -17,15 +17,13 @@ const configs = {
const bench = common.createBenchmark(main, configs);
-function main(conf) {
- const n = +conf.n;
-
+function main({ n, mode }) {
const str = 'abc';
const num = 123;
let string;
- switch (conf.mode) {
+ switch (mode) {
case '':
// Empty string falls through to next line as default, mostly for tests.
case 'multi-concat':
diff --git a/benchmark/es/string-repeat.js b/benchmark/es/string-repeat.js
index 1ddc7db78c7f86..e5bdbb5cc193c1 100644
--- a/benchmark/es/string-repeat.js
+++ b/benchmark/es/string-repeat.js
@@ -12,14 +12,12 @@ const configs = {
const bench = common.createBenchmark(main, configs);
-function main(conf) {
- const n = +conf.n;
- const size = +conf.size;
- const character = conf.encoding === 'ascii' ? 'a' : '\ud83d\udc0e'; // '🐎'
+function main({ n, size, encoding, mode }) {
+ const character = encoding === 'ascii' ? 'a' : '\ud83d\udc0e'; // '🐎'
let str;
- switch (conf.mode) {
+ switch (mode) {
case '':
// Empty string falls through to next line as default, mostly for tests.
case 'Array':
diff --git a/benchmark/events/ee-add-remove.js b/benchmark/events/ee-add-remove.js
index 7b6ec35f29b636..eee8ff4524ed1a 100644
--- a/benchmark/events/ee-add-remove.js
+++ b/benchmark/events/ee-add-remove.js
@@ -4,9 +4,7 @@ const events = require('events');
const bench = common.createBenchmark(main, { n: [25e4] });
-function main(conf) {
- const n = conf.n | 0;
-
+function main({ n }) {
const ee = new events.EventEmitter();
const listeners = [];
diff --git a/benchmark/events/ee-emit.js b/benchmark/events/ee-emit.js
index 3d7eb43b228b71..686ed10d3ecbfd 100644
--- a/benchmark/events/ee-emit.js
+++ b/benchmark/events/ee-emit.js
@@ -8,11 +8,7 @@ const bench = common.createBenchmark(main, {
listeners: [1, 5, 10],
});
-function main(conf) {
- const n = conf.n | 0;
- const argc = conf.argc | 0;
- const listeners = Math.max(conf.listeners | 0, 1);
-
+function main({ n, argc, listeners }) {
const ee = new EventEmitter();
for (var k = 0; k < listeners; k += 1)
diff --git a/benchmark/events/ee-listener-count-on-prototype.js b/benchmark/events/ee-listener-count-on-prototype.js
index 708f62f06687fe..cf6a33f44af9d1 100644
--- a/benchmark/events/ee-listener-count-on-prototype.js
+++ b/benchmark/events/ee-listener-count-on-prototype.js
@@ -4,9 +4,7 @@ const EventEmitter = require('events').EventEmitter;
const bench = common.createBenchmark(main, { n: [5e7] });
-function main(conf) {
- const n = conf.n | 0;
-
+function main({ n }) {
const ee = new EventEmitter();
for (var k = 0; k < 5; k += 1) {
diff --git a/benchmark/events/ee-listeners-many.js b/benchmark/events/ee-listeners-many.js
index 6cb0682b1ca9c3..9a1562eb2c005c 100644
--- a/benchmark/events/ee-listeners-many.js
+++ b/benchmark/events/ee-listeners-many.js
@@ -4,9 +4,7 @@ const EventEmitter = require('events').EventEmitter;
const bench = common.createBenchmark(main, { n: [5e6] });
-function main(conf) {
- const n = conf.n | 0;
-
+function main({ n }) {
const ee = new EventEmitter();
ee.setMaxListeners(101);
diff --git a/benchmark/events/ee-listeners.js b/benchmark/events/ee-listeners.js
index dff73de0b17fc4..d076dc646c93a7 100644
--- a/benchmark/events/ee-listeners.js
+++ b/benchmark/events/ee-listeners.js
@@ -4,9 +4,7 @@ const EventEmitter = require('events').EventEmitter;
const bench = common.createBenchmark(main, { n: [5e6] });
-function main(conf) {
- const n = conf.n | 0;
-
+function main({ n }) {
const ee = new EventEmitter();
for (var k = 0; k < 5; k += 1) {
diff --git a/benchmark/events/ee-once.js b/benchmark/events/ee-once.js
index d9e87a2b0843af..e1a09fb4b71167 100644
--- a/benchmark/events/ee-once.js
+++ b/benchmark/events/ee-once.js
@@ -4,9 +4,7 @@ const EventEmitter = require('events').EventEmitter;
const bench = common.createBenchmark(main, { n: [2e7] });
-function main(conf) {
- const n = conf.n | 0;
-
+function main({ n }) {
const ee = new EventEmitter();
function listener() {}
diff --git a/benchmark/fs/bench-readdir.js b/benchmark/fs/bench-readdir.js
index eb15e72724d8cb..a3e19e242dadbe 100644
--- a/benchmark/fs/bench-readdir.js
+++ b/benchmark/fs/bench-readdir.js
@@ -9,9 +9,7 @@ const bench = common.createBenchmark(main, {
});
-function main(conf) {
- const n = conf.n >>> 0;
-
+function main({ n }) {
bench.start();
(function r(cntr) {
if (cntr-- <= 0)
diff --git a/benchmark/fs/bench-readdirSync.js b/benchmark/fs/bench-readdirSync.js
index 8ba2a6ec4976c1..ef3327163e8c22 100644
--- a/benchmark/fs/bench-readdirSync.js
+++ b/benchmark/fs/bench-readdirSync.js
@@ -9,9 +9,7 @@ const bench = common.createBenchmark(main, {
});
-function main(conf) {
- const n = conf.n >>> 0;
-
+function main({ n }) {
bench.start();
for (var i = 0; i < n; i++) {
fs.readdirSync(path.resolve(__dirname, '../../lib/'));
diff --git a/benchmark/fs/bench-realpath.js b/benchmark/fs/bench-realpath.js
index 881bd0031f0024..6690d7e87b091f 100644
--- a/benchmark/fs/bench-realpath.js
+++ b/benchmark/fs/bench-realpath.js
@@ -12,10 +12,7 @@ const bench = common.createBenchmark(main, {
});
-function main(conf) {
- const n = conf.n >>> 0;
- const pathType = conf.pathType;
-
+function main({ n, pathType }) {
bench.start();
if (pathType === 'relative')
relativePath(n);
diff --git a/benchmark/fs/bench-realpathSync.js b/benchmark/fs/bench-realpathSync.js
index 2239d9748af6af..1c751156f73d53 100644
--- a/benchmark/fs/bench-realpathSync.js
+++ b/benchmark/fs/bench-realpathSync.js
@@ -14,10 +14,7 @@ const bench = common.createBenchmark(main, {
});
-function main(conf) {
- const n = conf.n >>> 0;
- const pathType = conf.pathType;
-
+function main({ n, pathType }) {
bench.start();
if (pathType === 'relative')
relativePath(n);
diff --git a/benchmark/fs/bench-stat.js b/benchmark/fs/bench-stat.js
index 05910d3fc3f83f..8a401ae0b9d857 100644
--- a/benchmark/fs/bench-stat.js
+++ b/benchmark/fs/bench-stat.js
@@ -9,9 +9,7 @@ const bench = common.createBenchmark(main, {
});
-function main(conf) {
- const n = conf.n >>> 0;
- const statType = conf.statType;
+function main({ n, statType }) {
var arg;
if (statType === 'fstat')
arg = fs.openSync(__filename, 'r');
diff --git a/benchmark/fs/bench-statSync.js b/benchmark/fs/bench-statSync.js
index 901f3f1beeaa11..bd8754a6c3d0e3 100644
--- a/benchmark/fs/bench-statSync.js
+++ b/benchmark/fs/bench-statSync.js
@@ -9,9 +9,7 @@ const bench = common.createBenchmark(main, {
});
-function main(conf) {
- const n = conf.n >>> 0;
- const statSyncType = conf.statSyncType;
+function main({ n, statSyncType }) {
const arg = (statSyncType === 'fstatSync' ?
fs.openSync(__filename, 'r') :
__dirname);
diff --git a/benchmark/fs/read-stream-throughput.js b/benchmark/fs/read-stream-throughput.js
index e0dc7edc05ea3e..3af80132725ec0 100644
--- a/benchmark/fs/read-stream-throughput.js
+++ b/benchmark/fs/read-stream-throughput.js
@@ -18,7 +18,7 @@ const bench = common.createBenchmark(main, {
function main(conf) {
encodingType = conf.encodingType;
- size = +conf.size;
+ size = conf.size;
filesize = conf.filesize;
switch (encodingType) {
diff --git a/benchmark/fs/readFileSync.js b/benchmark/fs/readFileSync.js
index 8fd0b50421e761..c28adeb229b358 100644
--- a/benchmark/fs/readFileSync.js
+++ b/benchmark/fs/readFileSync.js
@@ -7,9 +7,7 @@ const bench = common.createBenchmark(main, {
n: [60e4]
});
-function main(conf) {
- const n = +conf.n;
-
+function main({ n }) {
bench.start();
for (var i = 0; i < n; ++i)
fs.readFileSync(__filename);
diff --git a/benchmark/fs/readfile.js b/benchmark/fs/readfile.js
index 7c55073fe0f017..7da7758ed06638 100644
--- a/benchmark/fs/readfile.js
+++ b/benchmark/fs/readfile.js
@@ -15,8 +15,7 @@ const bench = common.createBenchmark(main, {
concurrent: [1, 10]
});
-function main(conf) {
- const len = +conf.len;
+function main({ len, dur, concurrent }) {
try { fs.unlinkSync(filename); } catch (e) {}
var data = Buffer.alloc(len, 'x');
fs.writeFileSync(filename, data);
@@ -30,7 +29,7 @@ function main(conf) {
bench.end(reads);
try { fs.unlinkSync(filename); } catch (e) {}
process.exit(0);
- }, +conf.dur * 1000);
+ }, dur * 1000);
function read() {
fs.readFile(filename, afterRead);
@@ -48,6 +47,5 @@ function main(conf) {
read();
}
- var cur = +conf.concurrent;
- while (cur--) read();
+ while (concurrent--) read();
}
diff --git a/benchmark/fs/write-stream-throughput.js b/benchmark/fs/write-stream-throughput.js
index 08f059156f2cd9..6fe00cde48cabb 100644
--- a/benchmark/fs/write-stream-throughput.js
+++ b/benchmark/fs/write-stream-throughput.js
@@ -13,10 +13,7 @@ const bench = common.createBenchmark(main, {
size: [2, 1024, 65535, 1024 * 1024]
});
-function main(conf) {
- const dur = +conf.dur;
- const encodingType = conf.encodingType;
- const size = +conf.size;
+function main({ dur, encodingType, size }) {
var encoding;
var chunk;
diff --git a/benchmark/http/_chunky_http_client.js b/benchmark/http/_chunky_http_client.js
index ee1f4f8caa2510..a90535e489f4c9 100644
--- a/benchmark/http/_chunky_http_client.js
+++ b/benchmark/http/_chunky_http_client.js
@@ -11,9 +11,7 @@ const bench = common.createBenchmark(main, {
});
-function main(conf) {
- const len = +conf.len;
- const num = +conf.n;
+function main({ len, n }) {
var todo = [];
const headers = [];
// Chose 7 because 9 showed "Connection error" / "Connection closed"
@@ -78,7 +76,7 @@ function main(conf) {
size = (size * mult + add) % mod;
if (did) {
count += 1;
- if (count === num) {
+ if (count === n) {
bench.end(count);
process.exit(0);
} else {
diff --git a/benchmark/http/bench-parser.js b/benchmark/http/bench-parser.js
index 1bc661e7289168..4c691d71345da3 100644
--- a/benchmark/http/bench-parser.js
+++ b/benchmark/http/bench-parser.js
@@ -15,9 +15,7 @@ const bench = common.createBenchmark(main, {
});
-function main(conf) {
- const len = conf.len >>> 0;
- const n = conf.n >>> 0;
+function main({ len, n }) {
var header = `GET /hello HTTP/1.1${CRLF}Content-Type: text/plain${CRLF}`;
for (var i = 0; i < len; i++) {
diff --git a/benchmark/http/check_invalid_header_char.js b/benchmark/http/check_invalid_header_char.js
index d71bc6fc607ef5..b9933d690e25cc 100644
--- a/benchmark/http/check_invalid_header_char.js
+++ b/benchmark/http/check_invalid_header_char.js
@@ -30,10 +30,7 @@ const bench = common.createBenchmark(main, {
n: [1e6],
});
-function main(conf) {
- const n = +conf.n;
- const key = conf.key;
-
+function main({ n, key }) {
bench.start();
for (var i = 0; i < n; i++) {
_checkInvalidHeaderChar(key);
diff --git a/benchmark/http/check_is_http_token.js b/benchmark/http/check_is_http_token.js
index 92df3445b45c45..c16993819be93b 100644
--- a/benchmark/http/check_is_http_token.js
+++ b/benchmark/http/check_is_http_token.js
@@ -40,10 +40,7 @@ const bench = common.createBenchmark(main, {
n: [1e6],
});
-function main(conf) {
- const n = +conf.n;
- const key = conf.key;
-
+function main({ n, key }) {
bench.start();
for (var i = 0; i < n; i++) {
_checkIsHttpToken(key);
diff --git a/benchmark/http/chunked.js b/benchmark/http/chunked.js
index 1056f456ef827d..5615395ee0b175 100644
--- a/benchmark/http/chunked.js
+++ b/benchmark/http/chunked.js
@@ -16,9 +16,9 @@ const bench = common.createBenchmark(main, {
c: [100]
});
-function main(conf) {
+function main({ len, n, c }) {
const http = require('http');
- const chunk = Buffer.alloc(conf.len, '8');
+ const chunk = Buffer.alloc(len, '8');
const server = http.createServer(function(req, res) {
function send(left) {
@@ -28,12 +28,12 @@ function main(conf) {
send(left - 1);
}, 0);
}
- send(conf.n);
+ send(n);
});
server.listen(common.PORT, function() {
bench.http({
- connections: conf.c
+ connections: c
}, function() {
server.close();
});
diff --git a/benchmark/http/client-request-body.js b/benchmark/http/client-request-body.js
index a6849580cfb44d..49bb9130ae3a8a 100644
--- a/benchmark/http/client-request-body.js
+++ b/benchmark/http/client-request-body.js
@@ -11,13 +11,10 @@ const bench = common.createBenchmark(main, {
method: ['write', 'end']
});
-function main(conf) {
- const dur = +conf.dur;
- const len = +conf.len;
-
+function main({ dur, len, type, method }) {
var encoding;
var chunk;
- switch (conf.type) {
+ switch (type) {
case 'buf':
chunk = Buffer.alloc(len, 'x');
break;
@@ -55,7 +52,7 @@ function main(conf) {
pummel(); // Line up next request.
res.resume();
});
- if (conf.method === 'write') {
+ if (method === 'write') {
req.write(chunk, encoding);
req.end();
} else {
diff --git a/benchmark/http/cluster.js b/benchmark/http/cluster.js
index 352b1d2645008f..56393fa1ab0518 100644
--- a/benchmark/http/cluster.js
+++ b/benchmark/http/cluster.js
@@ -15,7 +15,7 @@ if (cluster.isMaster) {
require('../fixtures/simple-http-server.js').listen(port);
}
-function main(conf) {
+function main({ type, len, c }) {
process.env.PORT = PORT;
var workers = 0;
const w1 = cluster.fork();
@@ -27,11 +27,11 @@ function main(conf) {
return;
setTimeout(function() {
- const path = `/${conf.type}/${conf.len}`;
+ const path = `/${type}/${len}`;
bench.http({
path: path,
- connections: conf.c
+ connections: c
}, function() {
w1.destroy();
w2.destroy();
diff --git a/benchmark/http/create-clientrequest.js b/benchmark/http/create-clientrequest.js
index d19a6fb43441ce..97316a7e800419 100644
--- a/benchmark/http/create-clientrequest.js
+++ b/benchmark/http/create-clientrequest.js
@@ -8,10 +8,7 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const len = +conf.len;
- const n = +conf.n;
-
+function main({ len, n }) {
const path = '/'.repeat(len);
const opts = { path: path, createConnection: function() {} };
diff --git a/benchmark/http/end-vs-write-end.js b/benchmark/http/end-vs-write-end.js
index b7db1eaa7839e2..f839e5c3cd9ed9 100644
--- a/benchmark/http/end-vs-write-end.js
+++ b/benchmark/http/end-vs-write-end.js
@@ -17,11 +17,10 @@ const bench = common.createBenchmark(main, {
method: ['write', 'end']
});
-function main(conf) {
+function main({ len, type, method, c }) {
const http = require('http');
var chunk;
- const len = conf.len;
- switch (conf.type) {
+ switch (type) {
case 'buf':
chunk = Buffer.alloc(len, 'x');
break;
@@ -42,15 +41,15 @@ function main(conf) {
res.end(chunk);
}
- const method = conf.method === 'write' ? write : end;
+ const fn = method === 'write' ? write : end;
const server = http.createServer(function(req, res) {
- method(res);
+ fn(res);
});
server.listen(common.PORT, function() {
bench.http({
- connections: conf.c
+ connections: c
}, function() {
server.close();
});
diff --git a/benchmark/http/simple.js b/benchmark/http/simple.js
index 544aad49688386..d5351815fc1b7e 100644
--- a/benchmark/http/simple.js
+++ b/benchmark/http/simple.js
@@ -12,17 +12,16 @@ const bench = common.createBenchmark(main, {
res: ['normal', 'setHeader', 'setHeaderWH']
});
-function main(conf) {
+function main({ type, len, chunks, c, chunkedEnc, res }) {
process.env.PORT = PORT;
var server = require('../fixtures/simple-http-server.js')
.listen(PORT)
.on('listening', function() {
- const path =
- `/${conf.type}/${conf.len}/${conf.chunks}/${conf.res}/${conf.chunkedEnc}`;
+ const path = `/${type}/${len}/${chunks}/${res}/${chunkedEnc}`;
bench.http({
path: path,
- connections: conf.c
+ connections: c
}, function() {
server.close();
});
diff --git a/benchmark/http2/headers.js b/benchmark/http2/headers.js
index 3c8d0465acb0d0..ad1eb50007a92d 100644
--- a/benchmark/http2/headers.js
+++ b/benchmark/http2/headers.js
@@ -9,9 +9,7 @@ const bench = common.createBenchmark(main, {
benchmarker: ['h2load']
}, { flags: ['--no-warnings', '--expose-http2'] });
-function main(conf) {
- const n = +conf.n;
- const nheaders = +conf.nheaders;
+function main({ n, nheaders }) {
const http2 = require('http2');
const server = http2.createServer({
maxHeaderListPairs: 20000
diff --git a/benchmark/http2/respond-with-fd.js b/benchmark/http2/respond-with-fd.js
index 791e5f3d1e7da6..6076cf91be9d84 100644
--- a/benchmark/http2/respond-with-fd.js
+++ b/benchmark/http2/respond-with-fd.js
@@ -14,15 +14,11 @@ const bench = common.createBenchmark(main, {
benchmarker: ['h2load']
}, { flags: ['--no-warnings', '--expose-http2'] });
-function main(conf) {
-
+function main({ requests, streams, clients }) {
fs.open(file, 'r', (err, fd) => {
if (err)
throw err;
- const n = +conf.requests;
- const m = +conf.streams;
- const c = +conf.clients;
const http2 = require('http2');
const server = http2.createServer();
server.on('stream', (stream) => {
@@ -32,10 +28,10 @@ function main(conf) {
server.listen(PORT, () => {
bench.http({
path: '/',
- requests: n,
- maxConcurrentStreams: m,
- clients: c,
- threads: c
+ requests,
+ maxConcurrentStreams: streams,
+ clients,
+ threads: clients
}, () => server.close());
});
diff --git a/benchmark/http2/simple.js b/benchmark/http2/simple.js
index e8cb3ddee2dff8..37c78d340181a8 100644
--- a/benchmark/http2/simple.js
+++ b/benchmark/http2/simple.js
@@ -15,10 +15,7 @@ const bench = common.createBenchmark(main, {
benchmarker: ['h2load']
}, { flags: ['--no-warnings', '--expose-http2'] });
-function main(conf) {
- const n = +conf.requests;
- const m = +conf.streams;
- const c = +conf.clients;
+function main({ requests, streams, clients }) {
const http2 = require('http2');
const server = http2.createServer();
server.on('stream', (stream) => {
@@ -30,10 +27,10 @@ function main(conf) {
server.listen(PORT, () => {
bench.http({
path: '/',
- requests: n,
- maxConcurrentStreams: m,
- clients: c,
- threads: c
+ requests,
+ maxConcurrentStreams: streams,
+ clients,
+ threads: clients
}, () => { server.close(); });
});
}
diff --git a/benchmark/http2/write.js b/benchmark/http2/write.js
index 91b9c8f0c5c073..7a802ef84fd9ed 100644
--- a/benchmark/http2/write.js
+++ b/benchmark/http2/write.js
@@ -10,19 +10,16 @@ const bench = common.createBenchmark(main, {
benchmarker: ['h2load']
}, { flags: ['--no-warnings', '--expose-http2'] });
-function main(conf) {
- const m = +conf.streams;
- const l = +conf.length;
- const s = +conf.size;
+function main({ streams, length, size }) {
const http2 = require('http2');
const server = http2.createServer();
server.on('stream', (stream) => {
stream.respond();
let written = 0;
function write() {
- stream.write('ü'.repeat(s));
- written += s;
- if (written < l)
+ stream.write('ü'.repeat(size));
+ written += size;
+ if (written < length)
setImmediate(write);
else
stream.end();
@@ -33,7 +30,7 @@ function main(conf) {
bench.http({
path: '/',
requests: 10000,
- maxConcurrentStreams: m,
+ maxConcurrentStreams: streams,
}, () => { server.close(); });
});
}
diff --git a/benchmark/misc/freelist.js b/benchmark/misc/freelist.js
index 461f4b3e4c8960..0530255728ffeb 100644
--- a/benchmark/misc/freelist.js
+++ b/benchmark/misc/freelist.js
@@ -8,9 +8,8 @@ const bench = common.createBenchmark(main, {
flags: ['--expose-internals']
});
-function main(conf) {
+function main({ n }) {
const FreeList = require('internal/freelist');
- const n = conf.n;
const poolSize = 1000;
const list = new FreeList('test', poolSize, Object);
var i;
diff --git a/benchmark/misc/function_call/index.js b/benchmark/misc/function_call/index.js
index 6a2595d2ae188d..91efa573597cc7 100644
--- a/benchmark/misc/function_call/index.js
+++ b/benchmark/misc/function_call/index.js
@@ -31,13 +31,13 @@ const bench = common.createBenchmark(main, {
millions: [1, 10, 50]
});
-function main(conf) {
- const n = +conf.millions * 1e6;
+function main({ millions, type }) {
+ const n = millions * 1e6;
- const fn = conf.type === 'cxx' ? cxx : js;
+ const fn = type === 'cxx' ? cxx : js;
bench.start();
for (var i = 0; i < n; i++) {
fn();
}
- bench.end(+conf.millions);
+ bench.end(millions);
}
diff --git a/benchmark/misc/object-property-bench.js b/benchmark/misc/object-property-bench.js
index d6afd4e9c0bcbb..37da82d88758fd 100644
--- a/benchmark/misc/object-property-bench.js
+++ b/benchmark/misc/object-property-bench.js
@@ -59,10 +59,10 @@ function runSymbol(n) {
bench.end(n / 1e6);
}
-function main(conf) {
- const n = +conf.millions * 1e6;
+function main({ millions, method }) {
+ const n = millions * 1e6;
- switch (conf.method) {
+ switch (method) {
// '' is a default case for tests
case '':
case 'property':
diff --git a/benchmark/misc/punycode.js b/benchmark/misc/punycode.js
index 40bcd70302003c..7016fa11712bbc 100644
--- a/benchmark/misc/punycode.js
+++ b/benchmark/misc/punycode.js
@@ -62,10 +62,8 @@ function runICU(n, val) {
bench.end(n);
}
-function main(conf) {
- const n = +conf.n;
- const val = conf.val;
- switch (conf.method) {
+function main({ n, val, method }) {
+ switch (method) {
// '' is a default case for tests
case '':
case 'punycode':
diff --git a/benchmark/misc/startup.js b/benchmark/misc/startup.js
index b010f9fa469070..703146f081b3c6 100644
--- a/benchmark/misc/startup.js
+++ b/benchmark/misc/startup.js
@@ -8,8 +8,7 @@ const bench = common.createBenchmark(startNode, {
dur: [1]
});
-function startNode(conf) {
- const dur = +conf.dur;
+function startNode({ dur }) {
var go = true;
var starts = 0;
diff --git a/benchmark/misc/util-extend-vs-object-assign.js b/benchmark/misc/util-extend-vs-object-assign.js
index f2a039bc5d71fc..149619f6e1dea3 100644
--- a/benchmark/misc/util-extend-vs-object-assign.js
+++ b/benchmark/misc/util-extend-vs-object-assign.js
@@ -8,19 +8,17 @@ const bench = common.createBenchmark(main, {
n: [10e4]
});
-function main(conf) {
+function main({ n, type }) {
let fn;
- const n = conf.n | 0;
-
- if (conf.type === 'extend') {
+ if (type === 'extend') {
fn = util._extend;
- } else if (conf.type === 'assign') {
+ } else if (type === 'assign') {
fn = Object.assign;
}
// Force-optimize the method to test so that the benchmark doesn't
// get disrupted by the optimizer kicking in halfway through.
- for (var i = 0; i < conf.type.length * 10; i += 1)
+ for (var i = 0; i < type.length * 10; i += 1)
fn({}, process.env);
const obj = new Proxy({}, { set: function(a, b, c) { return true; } });
diff --git a/benchmark/module/module-loader.js b/benchmark/module/module-loader.js
index cca5fc2c229038..8393d1f92e0e6c 100644
--- a/benchmark/module/module-loader.js
+++ b/benchmark/module/module-loader.js
@@ -12,8 +12,8 @@ const bench = common.createBenchmark(main, {
useCache: ['true', 'false']
});
-function main(conf) {
- const n = +conf.thousands * 1e3;
+function main({ thousands, fullPath, useCache }) {
+ const n = thousands * 1e3;
refreshTmpDir();
try { fs.mkdirSync(benchmarkDirectory); } catch (e) {}
@@ -30,10 +30,10 @@ function main(conf) {
);
}
- if (conf.fullPath === 'true')
- measureFull(n, conf.useCache === 'true');
+ if (fullPath === 'true')
+ measureFull(n, useCache === 'true');
else
- measureDir(n, conf.useCache === 'true');
+ measureDir(n, useCache === 'true');
refreshTmpDir();
}
diff --git a/benchmark/net/net-c2s-cork.js b/benchmark/net/net-c2s-cork.js
index a6582caa16be56..55bb99f6f73d2b 100644
--- a/benchmark/net/net-c2s-cork.js
+++ b/benchmark/net/net-c2s-cork.js
@@ -2,6 +2,7 @@
'use strict';
const common = require('../common.js');
+const net = require('net');
const PORT = common.PORT;
const bench = common.createBenchmark(main, {
@@ -10,17 +11,10 @@ const bench = common.createBenchmark(main, {
dur: [5],
});
-var dur;
-var len;
-var type;
var chunk;
var encoding;
-function main(conf) {
- dur = +conf.dur;
- len = +conf.len;
- type = conf.type;
-
+function main({ dur, len, type }) {
switch (type) {
case 'buf':
chunk = Buffer.alloc(len, 'x');
@@ -37,34 +31,6 @@ function main(conf) {
throw new Error(`invalid type: ${type}`);
}
- server();
-}
-
-const net = require('net');
-
-function Writer() {
- this.received = 0;
- this.writable = true;
-}
-
-Writer.prototype.write = function(chunk, encoding, cb) {
- this.received += chunk.length;
-
- if (typeof encoding === 'function')
- encoding();
- else if (typeof cb === 'function')
- cb();
-
- return true;
-};
-
-// doesn't matter, never emits anything.
-Writer.prototype.on = function() {};
-Writer.prototype.once = function() {};
-Writer.prototype.emit = function() {};
-Writer.prototype.prependListener = function() {};
-
-function server() {
const writer = new Writer();
// the actual benchmark.
@@ -95,3 +61,25 @@ function server() {
});
});
}
+
+function Writer() {
+ this.received = 0;
+ this.writable = true;
+}
+
+Writer.prototype.write = function(chunk, encoding, cb) {
+ this.received += chunk.length;
+
+ if (typeof encoding === 'function')
+ encoding();
+ else if (typeof cb === 'function')
+ cb();
+
+ return true;
+};
+
+// doesn't matter, never emits anything.
+Writer.prototype.on = function() {};
+Writer.prototype.once = function() {};
+Writer.prototype.emit = function() {};
+Writer.prototype.prependListener = function() {};
diff --git a/benchmark/net/net-c2s.js b/benchmark/net/net-c2s.js
index 140f9612ab1ed9..4add79a1664d4a 100644
--- a/benchmark/net/net-c2s.js
+++ b/benchmark/net/net-c2s.js
@@ -2,6 +2,7 @@
'use strict';
const common = require('../common.js');
+const net = require('net');
const PORT = common.PORT;
const bench = common.createBenchmark(main, {
@@ -10,17 +11,10 @@ const bench = common.createBenchmark(main, {
dur: [5],
});
-var dur;
-var len;
-var type;
var chunk;
var encoding;
-function main(conf) {
- dur = +conf.dur;
- len = +conf.len;
- type = conf.type;
-
+function main({ dur, len, type }) {
switch (type) {
case 'buf':
chunk = Buffer.alloc(len, 'x');
@@ -37,10 +31,30 @@ function main(conf) {
throw new Error(`invalid type: ${type}`);
}
- server();
-}
+ const reader = new Reader();
+ const writer = new Writer();
-const net = require('net');
+ // the actual benchmark.
+ const server = net.createServer(function(socket) {
+ socket.pipe(writer);
+ });
+
+ server.listen(PORT, function() {
+ const socket = net.connect(PORT);
+ socket.on('connect', function() {
+ bench.start();
+
+ reader.pipe(socket);
+
+ setTimeout(function() {
+ const bytes = writer.received;
+ const gbits = (bytes * 8) / (1024 * 1024 * 1024);
+ bench.end(gbits);
+ process.exit(0);
+ }, dur * 1000);
+ });
+ });
+}
function Writer() {
this.received = 0;
@@ -84,30 +98,3 @@ Reader.prototype.pipe = function(dest) {
this.flow();
return dest;
};
-
-
-function server() {
- const reader = new Reader();
- const writer = new Writer();
-
- // the actual benchmark.
- const server = net.createServer(function(socket) {
- socket.pipe(writer);
- });
-
- server.listen(PORT, function() {
- const socket = net.connect(PORT);
- socket.on('connect', function() {
- bench.start();
-
- reader.pipe(socket);
-
- setTimeout(function() {
- const bytes = writer.received;
- const gbits = (bytes * 8) / (1024 * 1024 * 1024);
- bench.end(gbits);
- process.exit(0);
- }, dur * 1000);
- });
- });
-}
diff --git a/benchmark/net/net-pipe.js b/benchmark/net/net-pipe.js
index a8ae50edfbfde0..3dd3bb78ccf9ac 100644
--- a/benchmark/net/net-pipe.js
+++ b/benchmark/net/net-pipe.js
@@ -2,6 +2,7 @@
'use strict';
const common = require('../common.js');
+const net = require('net');
const PORT = common.PORT;
const bench = common.createBenchmark(main, {
@@ -10,17 +11,10 @@ const bench = common.createBenchmark(main, {
dur: [5],
});
-var dur;
-var len;
-var type;
var chunk;
var encoding;
-function main(conf) {
- dur = +conf.dur;
- len = +conf.len;
- type = conf.type;
-
+function main({ dur, len, type }) {
switch (type) {
case 'buf':
chunk = Buffer.alloc(len, 'x');
@@ -37,10 +31,33 @@ function main(conf) {
throw new Error(`invalid type: ${type}`);
}
- server();
-}
+ const reader = new Reader();
+ const writer = new Writer();
-const net = require('net');
+ // the actual benchmark.
+ const server = net.createServer(function(socket) {
+ socket.pipe(socket);
+ });
+
+ server.listen(PORT, function() {
+ const socket = net.connect(PORT);
+ socket.on('connect', function() {
+ bench.start();
+
+ reader.pipe(socket);
+ socket.pipe(writer);
+
+ setTimeout(function() {
+ // multiply by 2 since we're sending it first one way
+ // then then back again.
+ const bytes = writer.received * 2;
+ const gbits = (bytes * 8) / (1024 * 1024 * 1024);
+ bench.end(gbits);
+ process.exit(0);
+ }, dur * 1000);
+ });
+ });
+}
function Writer() {
this.received = 0;
@@ -84,33 +101,3 @@ Reader.prototype.pipe = function(dest) {
this.flow();
return dest;
};
-
-
-function server() {
- const reader = new Reader();
- const writer = new Writer();
-
- // the actual benchmark.
- const server = net.createServer(function(socket) {
- socket.pipe(socket);
- });
-
- server.listen(PORT, function() {
- const socket = net.connect(PORT);
- socket.on('connect', function() {
- bench.start();
-
- reader.pipe(socket);
- socket.pipe(writer);
-
- setTimeout(function() {
- // multiply by 2 since we're sending it first one way
- // then then back again.
- const bytes = writer.received * 2;
- const gbits = (bytes * 8) / (1024 * 1024 * 1024);
- bench.end(gbits);
- process.exit(0);
- }, dur * 1000);
- });
- });
-}
diff --git a/benchmark/net/net-s2c.js b/benchmark/net/net-s2c.js
index 9fec2d8577c098..2ddf8fd6c5ff67 100644
--- a/benchmark/net/net-s2c.js
+++ b/benchmark/net/net-s2c.js
@@ -10,17 +10,10 @@ const bench = common.createBenchmark(main, {
dur: [5]
});
-var dur;
-var len;
-var type;
var chunk;
var encoding;
-function main(conf) {
- dur = +conf.dur;
- len = +conf.len;
- type = conf.type;
-
+function main({ dur, len, type }) {
switch (type) {
case 'buf':
chunk = Buffer.alloc(len, 'x');
@@ -37,7 +30,29 @@ function main(conf) {
throw new Error(`invalid type: ${type}`);
}
- server();
+ const reader = new Reader();
+ const writer = new Writer();
+
+ // the actual benchmark.
+ const server = net.createServer(function(socket) {
+ reader.pipe(socket);
+ });
+
+ server.listen(PORT, function() {
+ const socket = net.connect(PORT);
+ socket.on('connect', function() {
+ bench.start();
+
+ socket.pipe(writer);
+
+ setTimeout(function() {
+ const bytes = writer.received;
+ const gbits = (bytes * 8) / (1024 * 1024 * 1024);
+ bench.end(gbits);
+ process.exit(0);
+ }, dur * 1000);
+ });
+ });
}
const net = require('net');
@@ -84,30 +99,3 @@ Reader.prototype.pipe = function(dest) {
this.flow();
return dest;
};
-
-
-function server() {
- const reader = new Reader();
- const writer = new Writer();
-
- // the actual benchmark.
- const server = net.createServer(function(socket) {
- reader.pipe(socket);
- });
-
- server.listen(PORT, function() {
- const socket = net.connect(PORT);
- socket.on('connect', function() {
- bench.start();
-
- socket.pipe(writer);
-
- setTimeout(function() {
- const bytes = writer.received;
- const gbits = (bytes * 8) / (1024 * 1024 * 1024);
- bench.end(gbits);
- process.exit(0);
- }, dur * 1000);
- });
- });
-}
diff --git a/benchmark/net/net-wrap-js-stream-passthrough.js b/benchmark/net/net-wrap-js-stream-passthrough.js
new file mode 100644
index 00000000000000..05a66f4e7ab783
--- /dev/null
+++ b/benchmark/net/net-wrap-js-stream-passthrough.js
@@ -0,0 +1,96 @@
+// test the speed of .pipe() with JSStream wrapping for PassThrough streams
+'use strict';
+
+const common = require('../common.js');
+const { PassThrough } = require('stream');
+
+const bench = common.createBenchmark(main, {
+ len: [102400, 1024 * 1024 * 16],
+ type: ['utf', 'asc', 'buf'],
+ dur: [5],
+}, {
+ flags: ['--expose-internals']
+});
+
+var chunk;
+var encoding;
+
+function main({ dur, len, type }) {
+ // Can only require internals inside main().
+ const JSStreamWrap = require('internal/wrap_js_stream');
+
+ switch (type) {
+ case 'buf':
+ chunk = Buffer.alloc(len, 'x');
+ break;
+ case 'utf':
+ encoding = 'utf8';
+ chunk = 'ü'.repeat(len / 2);
+ break;
+ case 'asc':
+ encoding = 'ascii';
+ chunk = 'x'.repeat(len);
+ break;
+ default:
+ throw new Error(`invalid type: ${type}`);
+ }
+
+ const reader = new Reader();
+ const writer = new Writer();
+
+ // the actual benchmark.
+ const fakeSocket = new JSStreamWrap(new PassThrough());
+ bench.start();
+ reader.pipe(fakeSocket);
+ fakeSocket.pipe(writer);
+
+ setTimeout(function() {
+ const bytes = writer.received;
+ const gbits = (bytes * 8) / (1024 * 1024 * 1024);
+ bench.end(gbits);
+ process.exit(0);
+ }, dur * 1000);
+}
+
+function Writer() {
+ this.received = 0;
+ this.writable = true;
+}
+
+Writer.prototype.write = function(chunk, encoding, cb) {
+ this.received += chunk.length;
+
+ if (typeof encoding === 'function')
+ encoding();
+ else if (typeof cb === 'function')
+ cb();
+
+ return true;
+};
+
+// doesn't matter, never emits anything.
+Writer.prototype.on = function() {};
+Writer.prototype.once = function() {};
+Writer.prototype.emit = function() {};
+Writer.prototype.prependListener = function() {};
+
+
+function flow() {
+ const dest = this.dest;
+ const res = dest.write(chunk, encoding);
+ if (!res)
+ dest.once('drain', this.flow);
+ else
+ process.nextTick(this.flow);
+}
+
+function Reader() {
+ this.flow = flow.bind(this);
+ this.readable = true;
+}
+
+Reader.prototype.pipe = function(dest) {
+ this.dest = dest;
+ this.flow();
+ return dest;
+};
diff --git a/benchmark/net/tcp-raw-c2s.js b/benchmark/net/tcp-raw-c2s.js
index bd41be87728308..2be3bb3b538ffd 100644
--- a/benchmark/net/tcp-raw-c2s.js
+++ b/benchmark/net/tcp-raw-c2s.js
@@ -19,23 +19,7 @@ const TCPConnectWrap = process.binding('tcp_wrap').TCPConnectWrap;
const WriteWrap = process.binding('stream_wrap').WriteWrap;
const PORT = common.PORT;
-var dur;
-var len;
-var type;
-
-function main(conf) {
- dur = +conf.dur;
- len = +conf.len;
- type = conf.type;
- server();
-}
-
-
-function fail(err, syscall) {
- throw util._errnoException(err, syscall);
-}
-
-function server() {
+function main({ dur, len, type }) {
const serverHandle = new TCP(TCPConstants.SERVER);
var err = serverHandle.bind('127.0.0.1', PORT);
if (err)
@@ -73,10 +57,15 @@ function server() {
clientHandle.readStart();
};
- client();
+ client(type, len);
+}
+
+
+function fail(err, syscall) {
+ throw util._errnoException(err, syscall);
}
-function client() {
+function client(type, len) {
var chunk;
switch (type) {
case 'buf':
diff --git a/benchmark/net/tcp-raw-pipe.js b/benchmark/net/tcp-raw-pipe.js
index 4dd06ed446d6c1..2fc03f08cd4a90 100644
--- a/benchmark/net/tcp-raw-pipe.js
+++ b/benchmark/net/tcp-raw-pipe.js
@@ -14,27 +14,17 @@ const bench = common.createBenchmark(main, {
dur: [5]
});
+function fail(err, syscall) {
+ throw util._errnoException(err, syscall);
+}
+
const { TCP, constants: TCPConstants } = process.binding('tcp_wrap');
const TCPConnectWrap = process.binding('tcp_wrap').TCPConnectWrap;
const WriteWrap = process.binding('stream_wrap').WriteWrap;
const PORT = common.PORT;
-var dur;
-var len;
-var type;
-
-function main(conf) {
- dur = +conf.dur;
- len = +conf.len;
- type = conf.type;
- server();
-}
-
-function fail(err, syscall) {
- throw util._errnoException(err, syscall);
-}
-
-function server() {
+function main({ dur, len, type }) {
+ // Server
const serverHandle = new TCP(TCPConstants.SERVER);
var err = serverHandle.bind('127.0.0.1', PORT);
if (err)
@@ -70,10 +60,7 @@ function server() {
clientHandle.readStart();
};
- client();
-}
-
-function client() {
+ // Client
var chunk;
switch (type) {
case 'buf':
@@ -91,9 +78,9 @@ function client() {
const clientHandle = new TCP(TCPConstants.SOCKET);
const connectReq = new TCPConnectWrap();
- const err = clientHandle.connect(connectReq, '127.0.0.1', PORT);
var bytes = 0;
+ err = clientHandle.connect(connectReq, '127.0.0.1', PORT);
if (err)
fail(err, 'connect');
diff --git a/benchmark/net/tcp-raw-s2c.js b/benchmark/net/tcp-raw-s2c.js
index 2ca6016ce017a1..339f5e393d9254 100644
--- a/benchmark/net/tcp-raw-s2c.js
+++ b/benchmark/net/tcp-raw-s2c.js
@@ -19,22 +19,7 @@ const TCPConnectWrap = process.binding('tcp_wrap').TCPConnectWrap;
const WriteWrap = process.binding('stream_wrap').WriteWrap;
const PORT = common.PORT;
-var dur;
-var len;
-var type;
-
-function main(conf) {
- dur = +conf.dur;
- len = +conf.len;
- type = conf.type;
- server();
-}
-
-function fail(err, syscall) {
- throw util._errnoException(err, syscall);
-}
-
-function server() {
+function main({ dur, len, type }) {
const serverHandle = new TCP(TCPConstants.SERVER);
var err = serverHandle.bind('127.0.0.1', PORT);
if (err)
@@ -103,10 +88,14 @@ function server() {
}
};
- client();
+ client(dur);
+}
+
+function fail(err, syscall) {
+ throw util._errnoException(err, syscall);
}
-function client() {
+function client(dur) {
const clientHandle = new TCP(TCPConstants.SOCKET);
const connectReq = new TCPConnectWrap();
const err = clientHandle.connect(connectReq, '127.0.0.1', PORT);
diff --git a/benchmark/os/cpus.js b/benchmark/os/cpus.js
index 2a8535113c207a..da158a1b061c7f 100644
--- a/benchmark/os/cpus.js
+++ b/benchmark/os/cpus.js
@@ -7,9 +7,7 @@ const bench = common.createBenchmark(main, {
n: [3e4]
});
-function main(conf) {
- const n = +conf.n;
-
+function main({ n }) {
bench.start();
for (var i = 0; i < n; ++i)
cpus();
diff --git a/benchmark/os/loadavg.js b/benchmark/os/loadavg.js
index 6e3c57ed44b777..2cd38316b24bdd 100644
--- a/benchmark/os/loadavg.js
+++ b/benchmark/os/loadavg.js
@@ -7,9 +7,7 @@ const bench = common.createBenchmark(main, {
n: [5e6]
});
-function main(conf) {
- const n = +conf.n;
-
+function main({ n }) {
bench.start();
for (var i = 0; i < n; ++i)
loadavg();
diff --git a/benchmark/path/basename-posix.js b/benchmark/path/basename-posix.js
index 42e98c5932b028..20b734703f0746 100644
--- a/benchmark/path/basename-posix.js
+++ b/benchmark/path/basename-posix.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { posix } = require('path');
const bench = common.createBenchmark(main, {
pathext: [
@@ -18,20 +18,17 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.posix;
- var input = String(conf.pathext);
+function main({ n, pathext }) {
var ext;
- const extIdx = input.indexOf('|');
+ const extIdx = pathext.indexOf('|');
if (extIdx !== -1) {
- ext = input.slice(extIdx + 1);
- input = input.slice(0, extIdx);
+ ext = pathext.slice(extIdx + 1);
+ pathext = pathext.slice(0, extIdx);
}
bench.start();
for (var i = 0; i < n; i++) {
- p.basename(input, ext);
+ posix.basename(pathext, ext);
}
bench.end(n);
}
diff --git a/benchmark/path/basename-win32.js b/benchmark/path/basename-win32.js
index 6966e4fe81e1ac..8a66f56d6e3295 100644
--- a/benchmark/path/basename-win32.js
+++ b/benchmark/path/basename-win32.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { posix } = require('path');
const bench = common.createBenchmark(main, {
pathext: [
@@ -18,20 +18,17 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.win32;
- var input = String(conf.pathext);
+function main({ n, pathext }) {
var ext;
- const extIdx = input.indexOf('|');
+ const extIdx = pathext.indexOf('|');
if (extIdx !== -1) {
- ext = input.slice(extIdx + 1);
- input = input.slice(0, extIdx);
+ ext = pathext.slice(extIdx + 1);
+ pathext = pathext.slice(0, extIdx);
}
bench.start();
for (var i = 0; i < n; i++) {
- p.basename(input, ext);
+ posix.basename(pathext, ext);
}
bench.end(n);
}
diff --git a/benchmark/path/dirname-posix.js b/benchmark/path/dirname-posix.js
index 98ad67056bffe4..a045125f43c730 100644
--- a/benchmark/path/dirname-posix.js
+++ b/benchmark/path/dirname-posix.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { posix } = require('path');
const bench = common.createBenchmark(main, {
path: [
@@ -15,14 +15,10 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.posix;
- const input = String(conf.path);
-
+function main({ n, path }) {
bench.start();
for (var i = 0; i < n; i++) {
- p.dirname(input);
+ posix.dirname(path);
}
bench.end(n);
}
diff --git a/benchmark/path/dirname-win32.js b/benchmark/path/dirname-win32.js
index c09a3aff98de97..f47abdd37910e2 100644
--- a/benchmark/path/dirname-win32.js
+++ b/benchmark/path/dirname-win32.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { win32 } = require('path');
const bench = common.createBenchmark(main, {
path: [
@@ -15,14 +15,10 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.win32;
- const input = String(conf.path);
-
+function main({ n, path }) {
bench.start();
for (var i = 0; i < n; i++) {
- p.dirname(input);
+ win32.dirname(path);
}
bench.end(n);
}
diff --git a/benchmark/path/extname-posix.js b/benchmark/path/extname-posix.js
index 4b6e056094267b..3dde5e99005d72 100644
--- a/benchmark/path/extname-posix.js
+++ b/benchmark/path/extname-posix.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { posix } = require('path');
const bench = common.createBenchmark(main, {
path: [
@@ -18,14 +18,10 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.posix;
- const input = String(conf.path);
-
+function main({ n, path }) {
bench.start();
for (var i = 0; i < n; i++) {
- p.extname(input);
+ posix.extname(path);
}
bench.end(n);
}
diff --git a/benchmark/path/extname-win32.js b/benchmark/path/extname-win32.js
index fd54d485a9c025..55602df34b4a24 100644
--- a/benchmark/path/extname-win32.js
+++ b/benchmark/path/extname-win32.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { win32 } = require('path');
const bench = common.createBenchmark(main, {
path: [
@@ -18,14 +18,10 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.win32;
- const input = String(conf.path);
-
+function main({ n, path }) {
bench.start();
for (var i = 0; i < n; i++) {
- p.extname(input);
+ win32.extname(path);
}
bench.end(n);
}
diff --git a/benchmark/path/format-posix.js b/benchmark/path/format-posix.js
index fe20cc3c4fda9c..aa92c06a4d5b2f 100644
--- a/benchmark/path/format-posix.js
+++ b/benchmark/path/format-posix.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { posix } = require('path');
const bench = common.createBenchmark(main, {
props: [
@@ -9,10 +9,8 @@ const bench = common.createBenchmark(main, {
n: [1e7]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.posix;
- const props = String(conf.props).split('|');
+function main({ n, props }) {
+ props = props.split('|');
const obj = {
root: props[0] || '',
dir: props[1] || '',
@@ -23,7 +21,7 @@ function main(conf) {
bench.start();
for (var i = 0; i < n; i++) {
- p.format(obj);
+ posix.format(obj);
}
bench.end(n);
}
diff --git a/benchmark/path/format-win32.js b/benchmark/path/format-win32.js
index e59bee8669043e..5921f95cf12064 100644
--- a/benchmark/path/format-win32.js
+++ b/benchmark/path/format-win32.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { win32 } = require('path');
const bench = common.createBenchmark(main, {
props: [
@@ -9,10 +9,8 @@ const bench = common.createBenchmark(main, {
n: [1e7]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.win32;
- const props = String(conf.props).split('|');
+function main({ n, props }) {
+ props = props.split('|');
const obj = {
root: props[0] || '',
dir: props[1] || '',
@@ -23,7 +21,7 @@ function main(conf) {
bench.start();
for (var i = 0; i < n; i++) {
- p.format(obj);
+ win32.format(obj);
}
bench.end(n);
}
diff --git a/benchmark/path/isAbsolute-posix.js b/benchmark/path/isAbsolute-posix.js
index 956c8e0d1301e0..42994840487c68 100644
--- a/benchmark/path/isAbsolute-posix.js
+++ b/benchmark/path/isAbsolute-posix.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { posix } = require('path');
const bench = common.createBenchmark(main, {
path: [
@@ -13,14 +13,10 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.posix;
- const input = String(conf.path);
-
+function main({ n, path }) {
bench.start();
for (var i = 0; i < n; i++) {
- p.isAbsolute(input);
+ posix.isAbsolute(path);
}
bench.end(n);
}
diff --git a/benchmark/path/isAbsolute-win32.js b/benchmark/path/isAbsolute-win32.js
index 3c93b24220fe45..350e99d48b74a5 100644
--- a/benchmark/path/isAbsolute-win32.js
+++ b/benchmark/path/isAbsolute-win32.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { win32 } = require('path');
const bench = common.createBenchmark(main, {
path: [
@@ -14,14 +14,10 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.win32;
- const input = String(conf.path);
-
+function main({ n, path }) {
bench.start();
for (var i = 0; i < n; i++) {
- p.isAbsolute(input);
+ win32.isAbsolute(path);
}
bench.end(n);
}
diff --git a/benchmark/path/join-posix.js b/benchmark/path/join-posix.js
index 02b348cdff42d5..f06f74ad37fc46 100644
--- a/benchmark/path/join-posix.js
+++ b/benchmark/path/join-posix.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { posix } = require('path');
const bench = common.createBenchmark(main, {
paths: [
@@ -9,14 +9,12 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.posix;
- const args = String(conf.paths).split('|');
+function main({ n, paths }) {
+ const args = paths.split('|');
bench.start();
for (var i = 0; i < n; i++) {
- p.join.apply(null, args);
+ posix.join.apply(null, args);
}
bench.end(n);
}
diff --git a/benchmark/path/join-win32.js b/benchmark/path/join-win32.js
index 96e4aeaa0ca2a9..2fa29f8ebfd356 100644
--- a/benchmark/path/join-win32.js
+++ b/benchmark/path/join-win32.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { win32 } = require('path');
const bench = common.createBenchmark(main, {
paths: [
@@ -9,14 +9,12 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.win32;
- const args = String(conf.paths).split('|');
+function main({ n, paths }) {
+ const args = paths.split('|');
bench.start();
for (var i = 0; i < n; i++) {
- p.join.apply(null, args);
+ win32.join.apply(null, args);
}
bench.end(n);
}
diff --git a/benchmark/path/makeLong-win32.js b/benchmark/path/makeLong-win32.js
index 0c1ba38aedba6b..4314692eefab5e 100644
--- a/benchmark/path/makeLong-win32.js
+++ b/benchmark/path/makeLong-win32.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { win32 } = require('path');
const bench = common.createBenchmark(main, {
path: [
@@ -12,14 +12,10 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.win32;
- const input = String(conf.path);
-
+function main({ n, path }) {
bench.start();
for (var i = 0; i < n; i++) {
- p._makeLong(input);
+ win32._makeLong(path);
}
bench.end(n);
}
diff --git a/benchmark/path/normalize-posix.js b/benchmark/path/normalize-posix.js
index 454a5ba9aebf05..84ac8d2c7c89d2 100644
--- a/benchmark/path/normalize-posix.js
+++ b/benchmark/path/normalize-posix.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { posix } = require('path');
const bench = common.createBenchmark(main, {
path: [
@@ -14,14 +14,10 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.posix;
- const input = String(conf.path);
-
+function main({ n, path }) {
bench.start();
for (var i = 0; i < n; i++) {
- p.normalize(input);
+ posix.normalize(path);
}
bench.end(n);
}
diff --git a/benchmark/path/normalize-win32.js b/benchmark/path/normalize-win32.js
index 480856228aae6a..9b983eb9686580 100644
--- a/benchmark/path/normalize-win32.js
+++ b/benchmark/path/normalize-win32.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { win32 } = require('path');
const bench = common.createBenchmark(main, {
path: [
@@ -14,14 +14,10 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.win32;
- const input = String(conf.path);
-
+function main({ n, path }) {
bench.start();
for (var i = 0; i < n; i++) {
- p.normalize(input);
+ win32.normalize(path);
}
bench.end(n);
}
diff --git a/benchmark/path/parse-posix.js b/benchmark/path/parse-posix.js
index 4f1fb898b86af2..dd1153d3c68b7e 100644
--- a/benchmark/path/parse-posix.js
+++ b/benchmark/path/parse-posix.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { posix } = require('path');
const bench = common.createBenchmark(main, {
path: [
@@ -15,17 +15,13 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.posix;
- const input = String(conf.path);
-
+function main({ n, path }) {
for (var i = 0; i < n; i++) {
- p.parse(input);
+ posix.parse(path);
}
bench.start();
for (i = 0; i < n; i++) {
- p.parse(input);
+ posix.parse(path);
}
bench.end(n);
}
diff --git a/benchmark/path/parse-win32.js b/benchmark/path/parse-win32.js
index da48f78dd57b90..8c4f06272f4b05 100644
--- a/benchmark/path/parse-win32.js
+++ b/benchmark/path/parse-win32.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { win32 } = require('path');
const bench = common.createBenchmark(main, {
path: [
@@ -16,17 +16,13 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.win32;
- const input = String(conf.path);
-
+function main({ n, path }) {
for (var i = 0; i < n; i++) {
- p.parse(input);
+ win32.parse(path);
}
bench.start();
for (i = 0; i < n; i++) {
- p.parse(input);
+ win32.parse(path);
}
bench.end(n);
}
diff --git a/benchmark/path/relative-posix.js b/benchmark/path/relative-posix.js
index 1280b686bc55ae..70a0e434d98313 100644
--- a/benchmark/path/relative-posix.js
+++ b/benchmark/path/relative-posix.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { posix } = require('path');
const bench = common.createBenchmark(main, {
paths: [
@@ -15,23 +15,20 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.posix;
- var from = String(conf.paths);
+function main({ n, paths }) {
var to = '';
- const delimIdx = from.indexOf('|');
+ const delimIdx = paths.indexOf('|');
if (delimIdx > -1) {
- to = from.slice(delimIdx + 1);
- from = from.slice(0, delimIdx);
+ to = paths.slice(delimIdx + 1);
+ paths = paths.slice(0, delimIdx);
}
for (var i = 0; i < n; i++) {
- p.relative(from, to);
+ posix.relative(paths, to);
}
bench.start();
for (i = 0; i < n; i++) {
- p.relative(from, to);
+ posix.relative(paths, to);
}
bench.end(n);
}
diff --git a/benchmark/path/relative-win32.js b/benchmark/path/relative-win32.js
index f109cd9d96d15b..4a97e82e028a0c 100644
--- a/benchmark/path/relative-win32.js
+++ b/benchmark/path/relative-win32.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { win32 } = require('path');
const bench = common.createBenchmark(main, {
paths: [
@@ -13,25 +13,22 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.win32;
- var from = String(conf.paths);
+function main({ n, paths }) {
var to = '';
- const delimIdx = from.indexOf('|');
+ const delimIdx = paths.indexOf('|');
if (delimIdx > -1) {
- to = from.slice(delimIdx + 1);
- from = from.slice(0, delimIdx);
+ to = paths.slice(delimIdx + 1);
+ paths = paths.slice(0, delimIdx);
}
// Warmup
for (var i = 0; i < n; i++) {
- p.relative(from, to);
+ win32.relative(paths, to);
}
bench.start();
for (i = 0; i < n; i++) {
- p.relative(from, to);
+ win32.relative(paths, to);
}
bench.end(n);
}
diff --git a/benchmark/path/resolve-posix.js b/benchmark/path/resolve-posix.js
index 4ef0d46e284e32..91f4c1da102a5c 100644
--- a/benchmark/path/resolve-posix.js
+++ b/benchmark/path/resolve-posix.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { posix } = require('path');
const bench = common.createBenchmark(main, {
paths: [
@@ -12,14 +12,12 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.posix;
- const args = String(conf.paths).split('|');
+function main({ n, paths }) {
+ const args = paths.split('|');
bench.start();
for (var i = 0; i < n; i++) {
- p.resolve.apply(null, args);
+ posix.resolve.apply(null, args);
}
bench.end(n);
}
diff --git a/benchmark/path/resolve-win32.js b/benchmark/path/resolve-win32.js
index c7d8b4cbb75df0..1047da5184b528 100644
--- a/benchmark/path/resolve-win32.js
+++ b/benchmark/path/resolve-win32.js
@@ -1,6 +1,6 @@
'use strict';
const common = require('../common.js');
-const path = require('path');
+const { win32 } = require('path');
const bench = common.createBenchmark(main, {
paths: [
@@ -12,14 +12,12 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const n = +conf.n;
- const p = path.win32;
- const args = String(conf.paths).split('|');
+function main({ n, paths }) {
+ const args = paths.split('|');
bench.start();
for (var i = 0; i < n; i++) {
- p.resolve.apply(null, args);
+ win32.resolve.apply(null, args);
}
bench.end(n);
}
diff --git a/benchmark/process/bench-env.js b/benchmark/process/bench-env.js
index 66f966f587bb7f..a332d3cbd61895 100644
--- a/benchmark/process/bench-env.js
+++ b/benchmark/process/bench-env.js
@@ -7,8 +7,7 @@ const bench = common.createBenchmark(main, {
});
-function main(conf) {
- const n = conf.n >>> 0;
+function main({ n }) {
bench.start();
for (var i = 0; i < n; i++) {
// Access every item in object to process values.
diff --git a/benchmark/process/bench-hrtime.js b/benchmark/process/bench-hrtime.js
index 8a2920a238d042..9152a32b22d213 100644
--- a/benchmark/process/bench-hrtime.js
+++ b/benchmark/process/bench-hrtime.js
@@ -8,13 +8,12 @@ const bench = common.createBenchmark(main, {
type: ['raw', 'diff']
});
-function main(conf) {
- const n = conf.n | 0;
+function main({ n, type }) {
const hrtime = process.hrtime;
var noDead = hrtime();
var i;
- if (conf.type === 'raw') {
+ if (type === 'raw') {
bench.start();
for (i = 0; i < n; i++) {
noDead = hrtime();
diff --git a/benchmark/process/memoryUsage.js b/benchmark/process/memoryUsage.js
index 8b5aea871819ef..f9b969ab885d8b 100644
--- a/benchmark/process/memoryUsage.js
+++ b/benchmark/process/memoryUsage.js
@@ -5,9 +5,7 @@ const bench = common.createBenchmark(main, {
n: [1e5]
});
-function main(conf) {
- const n = +conf.n;
-
+function main({ n }) {
bench.start();
for (var i = 0; i < n; i++) {
process.memoryUsage();
diff --git a/benchmark/process/next-tick-breadth-args.js b/benchmark/process/next-tick-breadth-args.js
index ca608f15daa743..d759b955c429fe 100644
--- a/benchmark/process/next-tick-breadth-args.js
+++ b/benchmark/process/next-tick-breadth-args.js
@@ -5,8 +5,8 @@ const bench = common.createBenchmark(main, {
millions: [4]
});
-function main(conf) {
- const N = +conf.millions * 1e6;
+function main({ millions }) {
+ const N = millions * 1e6;
var n = 0;
function cb1(arg1) {
diff --git a/benchmark/process/next-tick-breadth.js b/benchmark/process/next-tick-breadth.js
index 51951ce0afd645..aebd623869b577 100644
--- a/benchmark/process/next-tick-breadth.js
+++ b/benchmark/process/next-tick-breadth.js
@@ -5,8 +5,8 @@ const bench = common.createBenchmark(main, {
millions: [4]
});
-function main(conf) {
- const N = +conf.millions * 1e6;
+function main({ millions }) {
+ const N = millions * 1e6;
var n = 0;
function cb() {
diff --git a/benchmark/process/next-tick-depth-args.js b/benchmark/process/next-tick-depth-args.js
index de792b303cca23..1c1b95bdc84bd1 100644
--- a/benchmark/process/next-tick-depth-args.js
+++ b/benchmark/process/next-tick-depth-args.js
@@ -7,8 +7,8 @@ const bench = common.createBenchmark(main, {
process.maxTickDepth = Infinity;
-function main(conf) {
- var n = +conf.millions * 1e6;
+function main({ millions }) {
+ var n = millions * 1e6;
function cb4(arg1, arg2, arg3, arg4) {
if (--n) {
@@ -21,7 +21,7 @@ function main(conf) {
else
process.nextTick(cb1, 0);
} else
- bench.end(+conf.millions);
+ bench.end(millions);
}
function cb3(arg1, arg2, arg3) {
if (--n) {
@@ -34,7 +34,7 @@ function main(conf) {
else
process.nextTick(cb1, 0);
} else
- bench.end(+conf.millions);
+ bench.end(millions);
}
function cb2(arg1, arg2) {
if (--n) {
@@ -47,7 +47,7 @@ function main(conf) {
else
process.nextTick(cb1, 0);
} else
- bench.end(+conf.millions);
+ bench.end(millions);
}
function cb1(arg1) {
if (--n) {
@@ -60,7 +60,7 @@ function main(conf) {
else
process.nextTick(cb1, 0);
} else
- bench.end(+conf.millions);
+ bench.end(millions);
}
bench.start();
process.nextTick(cb1, true);
diff --git a/benchmark/process/next-tick-depth.js b/benchmark/process/next-tick-depth.js
index e11beb4d0b1f31..99fc83c3772276 100644
--- a/benchmark/process/next-tick-depth.js
+++ b/benchmark/process/next-tick-depth.js
@@ -6,8 +6,8 @@ const bench = common.createBenchmark(main, {
process.maxTickDepth = Infinity;
-function main(conf) {
- var n = +conf.millions * 1e6;
+function main({ millions }) {
+ var n = millions * 1e6;
bench.start();
process.nextTick(onNextTick);
@@ -15,6 +15,6 @@ function main(conf) {
if (--n)
process.nextTick(onNextTick);
else
- bench.end(+conf.millions);
+ bench.end(millions);
}
}
diff --git a/benchmark/process/next-tick-exec-args.js b/benchmark/process/next-tick-exec-args.js
index 5ff017bb29cd5b..9e8ff73838460a 100644
--- a/benchmark/process/next-tick-exec-args.js
+++ b/benchmark/process/next-tick-exec-args.js
@@ -4,8 +4,8 @@ const bench = common.createBenchmark(main, {
millions: [5]
});
-function main(conf) {
- var n = +conf.millions * 1e6;
+function main({ millions }) {
+ var n = millions * 1e6;
bench.start();
for (var i = 0; i < n; i++) {
@@ -20,6 +20,6 @@ function main(conf) {
}
function onNextTick(i) {
if (i + 1 === n)
- bench.end(+conf.millions);
+ bench.end(millions);
}
}
diff --git a/benchmark/process/next-tick-exec.js b/benchmark/process/next-tick-exec.js
index 12c9d4624a903c..a8897cd7456476 100644
--- a/benchmark/process/next-tick-exec.js
+++ b/benchmark/process/next-tick-exec.js
@@ -4,8 +4,8 @@ const bench = common.createBenchmark(main, {
millions: [5]
});
-function main(conf) {
- var n = +conf.millions * 1e6;
+function main({ millions }) {
+ var n = millions * 1e6;
bench.start();
for (var i = 0; i < n; i++) {
@@ -13,6 +13,6 @@ function main(conf) {
}
function onNextTick(i) {
if (i + 1 === n)
- bench.end(+conf.millions);
+ bench.end(millions);
}
}
diff --git a/benchmark/querystring/querystring-parse.js b/benchmark/querystring/querystring-parse.js
index 740dfc9d21b173..db650165eb9cda 100644
--- a/benchmark/querystring/querystring-parse.js
+++ b/benchmark/querystring/querystring-parse.js
@@ -8,9 +8,7 @@ const bench = common.createBenchmark(main, {
n: [1e6],
});
-function main(conf) {
- const type = conf.type;
- const n = conf.n | 0;
+function main({ type, n }) {
const input = inputs[type];
var i;
// Execute the function a "sufficient" number of times before the timed
diff --git a/benchmark/querystring/querystring-stringify.js b/benchmark/querystring/querystring-stringify.js
index 97b8f1fc703ba2..cd1debd4df622d 100644
--- a/benchmark/querystring/querystring-stringify.js
+++ b/benchmark/querystring/querystring-stringify.js
@@ -7,10 +7,7 @@ const bench = common.createBenchmark(main, {
n: [1e7],
});
-function main(conf) {
- const type = conf.type;
- const n = conf.n | 0;
-
+function main({ type, n }) {
const inputs = {
noencode: {
foo: 'bar',
diff --git a/benchmark/querystring/querystring-unescapebuffer.js b/benchmark/querystring/querystring-unescapebuffer.js
index e37af180eef0ea..4f73ed024b11b1 100644
--- a/benchmark/querystring/querystring-unescapebuffer.js
+++ b/benchmark/querystring/querystring-unescapebuffer.js
@@ -12,10 +12,7 @@ const bench = common.createBenchmark(main, {
n: [10e6],
});
-function main(conf) {
- const input = conf.input;
- const n = conf.n | 0;
-
+function main({ input, n }) {
bench.start();
for (var i = 0; i < n; i += 1)
querystring.unescapeBuffer(input);
diff --git a/benchmark/streams/readable-bigread.js b/benchmark/streams/readable-bigread.js
index 34d478fb478478..99213afaeb8f28 100644
--- a/benchmark/streams/readable-bigread.js
+++ b/benchmark/streams/readable-bigread.js
@@ -7,8 +7,7 @@ const bench = common.createBenchmark(main, {
n: [100e1]
});
-function main(conf) {
- const n = +conf.n;
+function main({ n }) {
const b = new Buffer(32);
const s = new Readable();
function noop() {}
diff --git a/benchmark/streams/readable-bigunevenread.js b/benchmark/streams/readable-bigunevenread.js
index d176166ae4f432..e2f2c1406a1da0 100644
--- a/benchmark/streams/readable-bigunevenread.js
+++ b/benchmark/streams/readable-bigunevenread.js
@@ -7,8 +7,7 @@ const bench = common.createBenchmark(main, {
n: [100e1]
});
-function main(conf) {
- const n = +conf.n;
+function main({ n }) {
const b = new Buffer(32);
const s = new Readable();
function noop() {}
diff --git a/benchmark/streams/readable-boundaryread.js b/benchmark/streams/readable-boundaryread.js
index 4834da0a2c5bf8..835c7d18b51285 100644
--- a/benchmark/streams/readable-boundaryread.js
+++ b/benchmark/streams/readable-boundaryread.js
@@ -8,11 +8,10 @@ const bench = common.createBenchmark(main, {
type: ['string', 'buffer']
});
-function main(conf) {
- const n = +conf.n;
+function main({ n, type }) {
const s = new Readable();
var data = 'a'.repeat(32);
- if (conf.type === 'buffer')
+ if (type === 'buffer')
data = Buffer.from(data);
s._read = function() {};
diff --git a/benchmark/streams/readable-readall.js b/benchmark/streams/readable-readall.js
index be34afbeabc090..5715e42017c795 100644
--- a/benchmark/streams/readable-readall.js
+++ b/benchmark/streams/readable-readall.js
@@ -7,8 +7,7 @@ const bench = common.createBenchmark(main, {
n: [50e2]
});
-function main(conf) {
- const n = +conf.n;
+function main({ n }) {
const b = new Buffer(32);
const s = new Readable();
function noop() {}
diff --git a/benchmark/streams/readable-unevenread.js b/benchmark/streams/readable-unevenread.js
index ebbc727ad23ec3..d7a408b1c56a31 100644
--- a/benchmark/streams/readable-unevenread.js
+++ b/benchmark/streams/readable-unevenread.js
@@ -7,8 +7,7 @@ const bench = common.createBenchmark(main, {
n: [100e1]
});
-function main(conf) {
- const n = +conf.n;
+function main({ n }) {
const b = new Buffer(32);
const s = new Readable();
function noop() {}
diff --git a/benchmark/streams/transform-creation.js b/benchmark/streams/transform-creation.js
index bd5ac96fa71e32..abfab0c8e25321 100644
--- a/benchmark/streams/transform-creation.js
+++ b/benchmark/streams/transform-creation.js
@@ -13,9 +13,7 @@ function MyTransform() {
inherits(MyTransform, Transform);
MyTransform.prototype._transform = function() {};
-function main(conf) {
- const n = +conf.n;
-
+function main({ n }) {
bench.start();
for (var i = 0; i < n; ++i)
new MyTransform();
diff --git a/benchmark/streams/writable-manywrites.js b/benchmark/streams/writable-manywrites.js
index fadafe86e4cf70..6fcb07e849d615 100644
--- a/benchmark/streams/writable-manywrites.js
+++ b/benchmark/streams/writable-manywrites.js
@@ -7,8 +7,7 @@ const bench = common.createBenchmark(main, {
n: [2e6]
});
-function main(conf) {
- const n = +conf.n;
+function main({ n }) {
const b = Buffer.allocUnsafe(1024);
const s = new Writable();
s._write = function(chunk, encoding, cb) {
diff --git a/benchmark/string_decoder/string-decoder-create.js b/benchmark/string_decoder/string-decoder-create.js
index 17c0f6750d0721..386f99e7c0ee6f 100644
--- a/benchmark/string_decoder/string-decoder-create.js
+++ b/benchmark/string_decoder/string-decoder-create.js
@@ -9,10 +9,7 @@ const bench = common.createBenchmark(main, {
n: [25e6]
});
-function main(conf) {
- const encoding = conf.encoding;
- const n = conf.n | 0;
-
+function main({ encoding, n }) {
bench.start();
for (var i = 0; i < n; ++i) {
const sd = new StringDecoder(encoding);
diff --git a/benchmark/string_decoder/string-decoder.js b/benchmark/string_decoder/string-decoder.js
index 31cf7bf2f0a8a5..95baa893bbbf94 100644
--- a/benchmark/string_decoder/string-decoder.js
+++ b/benchmark/string_decoder/string-decoder.js
@@ -4,8 +4,8 @@ const StringDecoder = require('string_decoder').StringDecoder;
const bench = common.createBenchmark(main, {
encoding: ['ascii', 'utf8', 'base64-utf8', 'base64-ascii', 'utf16le'],
- inlen: [32, 128, 1024, 4096],
- chunk: [16, 64, 256, 1024],
+ inLen: [32, 128, 1024, 4096],
+ chunkLen: [16, 64, 256, 1024],
n: [25e5]
});
@@ -13,12 +13,7 @@ const UTF8_ALPHA = 'Blåbærsyltetøy';
const ASC_ALPHA = 'Blueberry jam';
const UTF16_BUF = Buffer.from('Blåbærsyltetøy', 'utf16le');
-function main(conf) {
- const encoding = conf.encoding;
- const inLen = conf.inlen | 0;
- const chunkLen = conf.chunk | 0;
- const n = conf.n | 0;
-
+function main({ encoding, inLen, chunkLen, n }) {
var alpha;
var buf;
const chunks = [];
diff --git a/benchmark/timers/immediate.js b/benchmark/timers/immediate.js
index bbe81555cacc97..7ddb5cb05af40d 100644
--- a/benchmark/timers/immediate.js
+++ b/benchmark/timers/immediate.js
@@ -6,9 +6,9 @@ const bench = common.createBenchmark(main, {
type: ['depth', 'depth1', 'breadth', 'breadth1', 'breadth4', 'clear']
});
-function main(conf) {
- const N = +conf.thousands * 1e3;
- switch (conf.type) {
+function main({ thousands, type }) {
+ const N = thousands * 1e3;
+ switch (type) {
case 'depth':
depth(N);
break;
diff --git a/benchmark/timers/set-immediate-breadth-args.js b/benchmark/timers/set-immediate-breadth-args.js
index 348cb62fb2cc1a..d5b5a9878066db 100644
--- a/benchmark/timers/set-immediate-breadth-args.js
+++ b/benchmark/timers/set-immediate-breadth-args.js
@@ -5,8 +5,8 @@ const bench = common.createBenchmark(main, {
millions: [5]
});
-function main(conf) {
- const N = +conf.millions * 1e6;
+function main({ millions }) {
+ const N = millions * 1e6;
process.on('exit', function() {
bench.end(N / 1e6);
diff --git a/benchmark/timers/set-immediate-breadth.js b/benchmark/timers/set-immediate-breadth.js
index 3d8b038342634d..a4b217b5bff8d6 100644
--- a/benchmark/timers/set-immediate-breadth.js
+++ b/benchmark/timers/set-immediate-breadth.js
@@ -5,8 +5,8 @@ const bench = common.createBenchmark(main, {
millions: [10]
});
-function main(conf) {
- const N = +conf.millions * 1e6;
+function main({ millions }) {
+ const N = millions * 1e6;
process.on('exit', function() {
bench.end(N / 1e6);
diff --git a/benchmark/timers/set-immediate-depth-args.js b/benchmark/timers/set-immediate-depth-args.js
index 704b1814514a93..fe1340c4bd55f2 100644
--- a/benchmark/timers/set-immediate-depth-args.js
+++ b/benchmark/timers/set-immediate-depth-args.js
@@ -5,8 +5,8 @@ const bench = common.createBenchmark(main, {
millions: [5]
});
-function main(conf) {
- const N = +conf.millions * 1e6;
+function main({ millions }) {
+ const N = millions * 1e6;
process.on('exit', function() {
bench.end(N / 1e6);
diff --git a/benchmark/timers/timers-breadth.js b/benchmark/timers/timers-breadth.js
index 02ebd5bb0d082b..b05b3f91b1859d 100644
--- a/benchmark/timers/timers-breadth.js
+++ b/benchmark/timers/timers-breadth.js
@@ -5,8 +5,8 @@ const bench = common.createBenchmark(main, {
thousands: [5000],
});
-function main(conf) {
- const N = +conf.thousands * 1e3;
+function main({ thousands }) {
+ const N = thousands * 1e3;
var n = 0;
bench.start();
function cb() {
diff --git a/benchmark/timers/timers-cancel-pooled.js b/benchmark/timers/timers-cancel-pooled.js
index 23cef153876f20..33897507c83937 100644
--- a/benchmark/timers/timers-cancel-pooled.js
+++ b/benchmark/timers/timers-cancel-pooled.js
@@ -6,8 +6,8 @@ const bench = common.createBenchmark(main, {
millions: [5],
});
-function main(conf) {
- const iterations = +conf.millions * 1e6;
+function main({ millions }) {
+ const iterations = millions * 1e6;
var timer = setTimeout(() => {}, 1);
for (var i = 0; i < iterations; i++) {
diff --git a/benchmark/timers/timers-cancel-unpooled.js b/benchmark/timers/timers-cancel-unpooled.js
index 50931e35124724..57e0139dfe1a4a 100644
--- a/benchmark/timers/timers-cancel-unpooled.js
+++ b/benchmark/timers/timers-cancel-unpooled.js
@@ -6,8 +6,8 @@ const bench = common.createBenchmark(main, {
millions: [1],
});
-function main(conf) {
- const iterations = +conf.millions * 1e6;
+function main({ millions }) {
+ const iterations = millions * 1e6;
const timersList = [];
for (var i = 0; i < iterations; i++) {
diff --git a/benchmark/timers/timers-depth.js b/benchmark/timers/timers-depth.js
index 42dc652b277781..ca74eee393fd45 100644
--- a/benchmark/timers/timers-depth.js
+++ b/benchmark/timers/timers-depth.js
@@ -5,8 +5,8 @@ const bench = common.createBenchmark(main, {
thousands: [1],
});
-function main(conf) {
- const N = +conf.thousands * 1e3;
+function main({ thousands }) {
+ const N = thousands * 1e3;
var n = 0;
bench.start();
setTimeout(cb, 1);
diff --git a/benchmark/timers/timers-insert-pooled.js b/benchmark/timers/timers-insert-pooled.js
index 8bbc84290ad9b7..59d2c490c3a9b2 100644
--- a/benchmark/timers/timers-insert-pooled.js
+++ b/benchmark/timers/timers-insert-pooled.js
@@ -5,8 +5,8 @@ const bench = common.createBenchmark(main, {
millions: [5],
});
-function main(conf) {
- const iterations = +conf.millions * 1e6;
+function main({ millions }) {
+ const iterations = millions * 1e6;
bench.start();
diff --git a/benchmark/timers/timers-insert-unpooled.js b/benchmark/timers/timers-insert-unpooled.js
index efe8e9aaa579c2..56526633358e42 100644
--- a/benchmark/timers/timers-insert-unpooled.js
+++ b/benchmark/timers/timers-insert-unpooled.js
@@ -6,8 +6,8 @@ const bench = common.createBenchmark(main, {
millions: [1],
});
-function main(conf) {
- const iterations = +conf.millions * 1e6;
+function main({ millions }) {
+ const iterations = millions * 1e6;
const timersList = [];
diff --git a/benchmark/timers/timers-timeout-pooled.js b/benchmark/timers/timers-timeout-pooled.js
index d39c8cf969a49b..df88e2784f8f91 100644
--- a/benchmark/timers/timers-timeout-pooled.js
+++ b/benchmark/timers/timers-timeout-pooled.js
@@ -8,8 +8,8 @@ const bench = common.createBenchmark(main, {
millions: [10],
});
-function main(conf) {
- const iterations = +conf.millions * 1e6;
+function main({ millions }) {
+ const iterations = millions * 1e6;
let count = 0;
// Function tracking on the hidden class in V8 can cause misleading
diff --git a/benchmark/tls/convertprotocols.js b/benchmark/tls/convertprotocols.js
index 5d561455051a0c..1ee2672bee7bd7 100644
--- a/benchmark/tls/convertprotocols.js
+++ b/benchmark/tls/convertprotocols.js
@@ -7,9 +7,7 @@ const bench = common.createBenchmark(main, {
n: [1, 50000]
});
-function main(conf) {
- const n = +conf.n;
-
+function main({ n }) {
var i = 0;
var m = {};
// First call dominates results
diff --git a/benchmark/tls/throughput.js b/benchmark/tls/throughput.js
index 51feb85cbaccc1..f63257c49693d6 100644
--- a/benchmark/tls/throughput.js
+++ b/benchmark/tls/throughput.js
@@ -6,20 +6,15 @@ const bench = common.createBenchmark(main, {
size: [2, 1024, 1024 * 1024]
});
-var dur, type, encoding, size;
-var server;
-
const path = require('path');
const fs = require('fs');
const cert_dir = path.resolve(__dirname, '../../test/fixtures');
var options;
const tls = require('tls');
-function main(conf) {
- dur = +conf.dur;
- type = conf.type;
- size = +conf.size;
-
+function main({ dur, type, size }) {
+ var encoding;
+ var server;
var chunk;
switch (type) {
case 'buf':
diff --git a/benchmark/tls/tls-connect.js b/benchmark/tls/tls-connect.js
index 628b040ee88c9b..67f2d5f8a932e0 100644
--- a/benchmark/tls/tls-connect.js
+++ b/benchmark/tls/tls-connect.js
@@ -16,10 +16,7 @@ var dur;
var concurrency;
var running = true;
-function main(conf) {
- dur = +conf.dur;
- concurrency = +conf.concurrency;
-
+function main({ dur, concurrency }) {
const cert_dir = path.resolve(__dirname, '../../test/fixtures');
const options = {
key: fs.readFileSync(`${cert_dir}/test_key.pem`),
diff --git a/benchmark/url/legacy-vs-whatwg-url-get-prop.js b/benchmark/url/legacy-vs-whatwg-url-get-prop.js
index 229a4e60652b64..93603c258cf1f2 100644
--- a/benchmark/url/legacy-vs-whatwg-url-get-prop.js
+++ b/benchmark/url/legacy-vs-whatwg-url-get-prop.js
@@ -71,11 +71,7 @@ function useWHATWG(n, input) {
return noDead;
}
-function main(conf) {
- const type = conf.type;
- const n = conf.n | 0;
- const method = conf.method;
-
+function main({ type, n, method }) {
const input = inputs[type];
if (!input) {
throw new Error('Unknown input type');
diff --git a/benchmark/url/legacy-vs-whatwg-url-parse.js b/benchmark/url/legacy-vs-whatwg-url-parse.js
index ec386b7b85597d..da42d5a189af47 100644
--- a/benchmark/url/legacy-vs-whatwg-url-parse.js
+++ b/benchmark/url/legacy-vs-whatwg-url-parse.js
@@ -31,11 +31,7 @@ function useWHATWG(n, input) {
return noDead;
}
-function main(conf) {
- const type = conf.type;
- const n = conf.n | 0;
- const method = conf.method;
-
+function main({ type, n, method }) {
const input = inputs[type];
if (!input) {
throw new Error('Unknown input type');
diff --git a/benchmark/url/legacy-vs-whatwg-url-searchparams-parse.js b/benchmark/url/legacy-vs-whatwg-url-searchparams-parse.js
index b4a80af4e5eabd..51953ec8707374 100644
--- a/benchmark/url/legacy-vs-whatwg-url-searchparams-parse.js
+++ b/benchmark/url/legacy-vs-whatwg-url-searchparams-parse.js
@@ -28,11 +28,7 @@ function useWHATWG(n, input) {
bench.end(n);
}
-function main(conf) {
- const type = conf.type;
- const n = conf.n | 0;
- const method = conf.method;
-
+function main({ type, n, method }) {
const input = inputs[type];
if (!input) {
throw new Error('Unknown input type');
diff --git a/benchmark/url/legacy-vs-whatwg-url-searchparams-serialize.js b/benchmark/url/legacy-vs-whatwg-url-searchparams-serialize.js
index 2b8d2c36a810b3..3490782a1bf421 100644
--- a/benchmark/url/legacy-vs-whatwg-url-searchparams-serialize.js
+++ b/benchmark/url/legacy-vs-whatwg-url-searchparams-serialize.js
@@ -30,11 +30,7 @@ function useWHATWG(n, input, prop) {
bench.end(n);
}
-function main(conf) {
- const type = conf.type;
- const n = conf.n | 0;
- const method = conf.method;
-
+function main({ type, n, method }) {
const input = inputs[type];
if (!input) {
throw new Error('Unknown input type');
diff --git a/benchmark/url/legacy-vs-whatwg-url-serialize.js b/benchmark/url/legacy-vs-whatwg-url-serialize.js
index 35b459a10c0e0b..e92b941b5d57e5 100644
--- a/benchmark/url/legacy-vs-whatwg-url-serialize.js
+++ b/benchmark/url/legacy-vs-whatwg-url-serialize.js
@@ -33,11 +33,7 @@ function useWHATWG(n, input, prop) {
return noDead;
}
-function main(conf) {
- const type = conf.type;
- const n = conf.n | 0;
- const method = conf.method;
-
+function main({ type, n, method }) {
const input = inputs[type];
if (!input) {
throw new Error('Unknown input type');
diff --git a/benchmark/url/url-format.js b/benchmark/url/url-format.js
index dc8e020879e400..14696af8e31c3f 100644
--- a/benchmark/url/url-format.js
+++ b/benchmark/url/url-format.js
@@ -12,10 +12,7 @@ const bench = common.createBenchmark(main, {
n: [25e6]
});
-function main(conf) {
- const type = conf.type;
- const n = conf.n | 0;
-
+function main({ type, n }) {
const input = inputs[type] || '';
// Force-optimize url.format() so that the benchmark doesn't get
diff --git a/benchmark/url/url-resolve.js b/benchmark/url/url-resolve.js
index 421a70ef6d59f1..48978574ea24ec 100644
--- a/benchmark/url/url-resolve.js
+++ b/benchmark/url/url-resolve.js
@@ -18,13 +18,12 @@ const bench = common.createBenchmark(main, {
n: [1e5]
});
-function main(conf) {
- const n = conf.n | 0;
- const href = hrefs[conf.href];
- const path = paths[conf.path];
+function main({ n, href, path }) {
+ const h = hrefs[href];
+ const p = paths[path];
bench.start();
for (var i = 0; i < n; i += 1)
- url.resolve(href, path);
+ url.resolve(h, p);
bench.end(n);
}
diff --git a/benchmark/url/url-searchparams-iteration.js b/benchmark/url/url-searchparams-iteration.js
index 0f4b71a0a183dd..2b13992bdfcfc0 100644
--- a/benchmark/url/url-searchparams-iteration.js
+++ b/benchmark/url/url-searchparams-iteration.js
@@ -44,10 +44,7 @@ function iterator(n) {
assert.strictEqual(noDead[1], '3rd');
}
-function main(conf) {
- const method = conf.method;
- const n = conf.n | 0;
-
+function main({ method, n }) {
switch (method) {
case 'forEach':
forEach(n);
diff --git a/benchmark/url/url-searchparams-read.js b/benchmark/url/url-searchparams-read.js
index 762ffcca03d69d..29235ee81e0e14 100644
--- a/benchmark/url/url-searchparams-read.js
+++ b/benchmark/url/url-searchparams-read.js
@@ -37,11 +37,7 @@ function has(n, param) {
bench.end(n);
}
-function main(conf) {
- const method = conf.method;
- const param = conf.param;
- const n = conf.n | 0;
-
+function main({ method, param, n }) {
switch (method) {
case 'get':
get(n, param);
diff --git a/benchmark/url/url-searchparams-sort.js b/benchmark/url/url-searchparams-sort.js
index 677ce511cf3ea2..524dacb6d52dc4 100644
--- a/benchmark/url/url-searchparams-sort.js
+++ b/benchmark/url/url-searchparams-sort.js
@@ -31,10 +31,9 @@ const bench = common.createBenchmark(main, {
flags: ['--expose-internals']
});
-function main(conf) {
+function main({ type, n }) {
const searchParams = require('internal/url').searchParamsSymbol;
- const input = inputs[conf.type];
- const n = conf.n | 0;
+ const input = inputs[type];
const params = new URLSearchParams();
const array = getParams(input);
diff --git a/benchmark/url/usvstring.js b/benchmark/url/usvstring.js
index 40a945037385cf..91abe8d67351c7 100644
--- a/benchmark/url/usvstring.js
+++ b/benchmark/url/usvstring.js
@@ -16,10 +16,9 @@ const bench = common.createBenchmark(main, {
flags: ['--expose-internals']
});
-function main(conf) {
+function main({ input, n }) {
const { toUSVString } = require('internal/url');
- const str = inputs[conf.input];
- const n = conf.n | 0;
+ const str = inputs[input];
bench.start();
for (var i = 0; i < n; i++)
diff --git a/benchmark/url/whatwg-url-idna.js b/benchmark/url/whatwg-url-idna.js
index 3d0ea3dc8fe516..c1e3d4a0b85cec 100644
--- a/benchmark/url/whatwg-url-idna.js
+++ b/benchmark/url/whatwg-url-idna.js
@@ -31,15 +31,13 @@ const bench = common.createBenchmark(main, {
n: [5e6]
});
-function main(conf) {
- const n = conf.n | 0;
- const to = conf.to;
- const input = inputs[conf.input][to];
+function main({ n, to, input }) {
+ const value = inputs[input][to];
const method = to === 'ascii' ? domainToASCII : domainToUnicode;
bench.start();
for (var i = 0; i < n; i++) {
- method(input);
+ method(value);
}
bench.end(n);
}
diff --git a/benchmark/url/whatwg-url-properties.js b/benchmark/url/whatwg-url-properties.js
index 3a865d2335ab3c..f526c07f139be9 100644
--- a/benchmark/url/whatwg-url-properties.js
+++ b/benchmark/url/whatwg-url-properties.js
@@ -46,11 +46,9 @@ function getAlternative(prop) {
return alternatives[prop];
}
-function main(conf) {
- const n = conf.n | 0;
- const input = inputs[conf.input];
- const url = new URL(input);
- const prop = conf.prop;
+function main({ n, input, prop }) {
+ const value = inputs[input];
+ const url = new URL(value);
switch (prop) {
case 'protocol':
diff --git a/benchmark/util/normalize-encoding.js b/benchmark/util/normalize-encoding.js
index 2cdfd54442114d..96eab1912d0761 100644
--- a/benchmark/util/normalize-encoding.js
+++ b/benchmark/util/normalize-encoding.js
@@ -47,11 +47,9 @@ function getInput(input) {
}
}
-function main(conf) {
- const normalizeEncoding = require('internal/util').normalizeEncoding;
-
- const n = conf.n | 0;
- const inputs = getInput(conf.input);
+function main({ input, n }) {
+ const { normalizeEncoding } = require('internal/util');
+ const inputs = getInput(input);
var noDead = '';
bench.start();
diff --git a/benchmark/util/type-check.js b/benchmark/util/type-check.js
index ee8dd7e4ece188..e1d1ac553fedcf 100644
--- a/benchmark/util/type-check.js
+++ b/benchmark/util/type-check.js
@@ -34,16 +34,15 @@ const bench = common.createBenchmark(main, {
flags: ['--expose-internals']
});
-function main(conf) {
+function main({ type, argument, version, n }) {
// For testing, if supplied with an empty type, default to ArrayBufferView.
- conf.type = conf.type || 'ArrayBufferView';
+ type = type || 'ArrayBufferView';
const util = process.binding('util');
const types = require('internal/util/types');
- const n = (+conf.n) | 0;
- const func = { native: util, js: types }[conf.version][`is${conf.type}`];
- const arg = args[conf.type][conf.argument];
+ const func = { native: util, js: types }[version][`is${type}`];
+ const arg = args[type][argument];
bench.start();
for (var i = 0; i < n; i++) {
diff --git a/benchmark/v8/get-stats.js b/benchmark/v8/get-stats.js
index 96de7572397161..6ee742858629c2 100644
--- a/benchmark/v8/get-stats.js
+++ b/benchmark/v8/get-stats.js
@@ -11,9 +11,7 @@ const bench = common.createBenchmark(main, {
n: [1e6]
});
-function main(conf) {
- const n = +conf.n;
- const method = conf.method;
+function main({ method, n }) {
var i = 0;
bench.start();
for (; i < n; i++)
diff --git a/benchmark/vm/run-in-context.js b/benchmark/vm/run-in-context.js
index 6e26a6d0ebeb38..da8f56a6e0153b 100644
--- a/benchmark/vm/run-in-context.js
+++ b/benchmark/vm/run-in-context.js
@@ -10,10 +10,8 @@ const bench = common.createBenchmark(main, {
const vm = require('vm');
-function main(conf) {
- const n = +conf.n;
- const options = conf.breakOnSigint ? { breakOnSigint: true } : {};
- const withSigintListener = !!conf.withSigintListener;
+function main({ n, breakOnSigint, withSigintListener }) {
+ const options = breakOnSigint ? { breakOnSigint: true } : {};
process.removeAllListeners('SIGINT');
if (withSigintListener)
diff --git a/benchmark/vm/run-in-this-context.js b/benchmark/vm/run-in-this-context.js
index a0c737f46954f1..33fd3a34d81f8f 100644
--- a/benchmark/vm/run-in-this-context.js
+++ b/benchmark/vm/run-in-this-context.js
@@ -10,10 +10,8 @@ const bench = common.createBenchmark(main, {
const vm = require('vm');
-function main(conf) {
- const n = +conf.n;
- const options = conf.breakOnSigint ? { breakOnSigint: true } : {};
- const withSigintListener = !!conf.withSigintListener;
+function main({ n, breakOnSigint, withSigintListener }) {
+ const options = breakOnSigint ? { breakOnSigint: true } : {};
process.removeAllListeners('SIGINT');
if (withSigintListener)
diff --git a/benchmark/zlib/creation.js b/benchmark/zlib/creation.js
index 5046ef50ecff06..4984bf1a86b755 100644
--- a/benchmark/zlib/creation.js
+++ b/benchmark/zlib/creation.js
@@ -10,14 +10,13 @@ const bench = common.createBenchmark(main, {
n: [5e5]
});
-function main(conf) {
- const n = +conf.n;
- const fn = zlib[`create${conf.type}`];
+function main({ n, type, options }) {
+ const fn = zlib[`create${type}`];
if (typeof fn !== 'function')
throw new Error('Invalid zlib type');
var i = 0;
- if (conf.options === 'true') {
+ if (options === 'true') {
const opts = {};
bench.start();
for (; i < n; ++i)
diff --git a/benchmark/zlib/deflate.js b/benchmark/zlib/deflate.js
index 00993b64462539..5e86d659803747 100644
--- a/benchmark/zlib/deflate.js
+++ b/benchmark/zlib/deflate.js
@@ -8,10 +8,8 @@ const bench = common.createBenchmark(main, {
n: [4e5]
});
-function main(conf) {
- const n = +conf.n;
- const method = conf.method;
- const chunk = Buffer.alloc(+conf.inputLen, 'a');
+function main({ n, method, inputLen }) {
+ const chunk = Buffer.alloc(inputLen, 'a');
var i = 0;
switch (method) {
diff --git a/common.gypi b/common.gypi
index b5e1bc0628d22c..0ee48812415799 100644
--- a/common.gypi
+++ b/common.gypi
@@ -27,7 +27,7 @@
# Reset this number to 0 on major V8 upgrades.
# Increment by one for each non-official patch applied to deps/v8.
- 'v8_embedder_string': '-node.17',
+ 'v8_embedder_string': '-node.18',
# Enable disassembler for `--print-code` v8 options
'v8_enable_disassembler': 1,
diff --git a/configure b/configure
index e434059d6f520b..fc1be60c97496c 100755
--- a/configure
+++ b/configure
@@ -878,7 +878,6 @@ def configure_node(o):
configure_mips(o)
if flavor == 'aix':
- o['variables']['node_core_target_name'] = 'node_base'
o['variables']['node_target_type'] = 'static_library'
if target_arch in ('x86', 'x64', 'ia32', 'x32'):
@@ -900,8 +899,6 @@ def configure_node(o):
if options.systemtap_includes:
o['include_dirs'] += [options.systemtap_includes]
o['variables']['node_use_dtrace'] = b(use_dtrace)
- o['variables']['uv_use_dtrace'] = b(use_dtrace)
- o['variables']['uv_parent_path'] = '/deps/uv/'
elif options.with_dtrace:
raise Exception(
'DTrace is currently only supported on SunOS, MacOS or Linux systems.')
@@ -978,7 +975,6 @@ def configure_node(o):
o['variables']['library_files'] = options.linked_module
o['variables']['asan'] = int(options.enable_asan or 0)
- o['variables']['debug_devtools'] = 'node'
if options.use_xcode and options.use_ninja:
raise Exception('--xcode and --ninja cannot be used together.')
@@ -988,6 +984,13 @@ def configure_node(o):
else:
o['variables']['coverage'] = 'false'
+ if options.shared:
+ o['variables']['node_target_type'] = 'shared_library'
+ elif options.enable_static:
+ o['variables']['node_target_type'] = 'static_library'
+ else:
+ o['variables']['node_target_type'] = 'executable'
+
def configure_library(lib, output):
shared_lib = 'shared_' + lib
output['variables']['node_' + shared_lib] = b(getattr(options, shared_lib))
@@ -1346,8 +1349,6 @@ def configure_intl(o):
# this is the input '.dat' file to use .. icudt*.dat
# may be little-endian if from a icu-project.org tarball
o['variables']['icu_data_in'] = icu_data_in
- # this is the icudt*.dat file which node will be using (platform endianness)
- o['variables']['icu_data_file'] = icu_data_file
if not os.path.isfile(icu_data_path):
print('Error: ICU prebuilt data file %s does not exist.' % icu_data_path)
print('See the README.md.')
@@ -1484,6 +1485,7 @@ config = {
'BUILDTYPE': 'Debug' if options.debug else 'Release',
'USE_XCODE': str(int(options.use_xcode or 0)),
'PYTHON': sys.executable,
+ 'NODE_TARGET_TYPE': variables['node_target_type'],
}
if options.prefix:
diff --git a/deps/uv/AUTHORS b/deps/uv/AUTHORS
index b227123e4ac35c..c826c8e13306a7 100644
--- a/deps/uv/AUTHORS
+++ b/deps/uv/AUTHORS
@@ -321,3 +321,7 @@ Pekka Nikander
Ed Schouten
Xu Meng
Matt Harrison
+Anna Henningsen
+Jérémy Lal
+Ben Wijen
+elephantp
diff --git a/deps/uv/ChangeLog b/deps/uv/ChangeLog
index 113c28aed136a7..163500245bf560 100644
--- a/deps/uv/ChangeLog
+++ b/deps/uv/ChangeLog
@@ -1,3 +1,66 @@
+2018.01.20, Version 1.19.1 (Stable), 8202d1751196c2374ad370f7f3779daef89befae
+
+Changes since version 1.19.0:
+
+* Revert "unix,tcp: avoid marking server sockets connected" (Ben Noordhuis)
+
+* Revert "unix,fs: fix for potential partial reads/writes" (Ben Noordhuis)
+
+* Revert "win: use RemoveDirectoryW() instead of _wmrmdir()" (Ben Noordhuis)
+
+* cygwin: fix compilation of ifaddrs impl (Brad King)
+
+
+2018.01.18, Version 1.19.0 (Stable), effbb7c9d29090b2e085a40867f8cdfa916a66df
+
+Changes since version 1.18.0:
+
+* core: add getter/setter functions for easier ABI compat (Anna Henningsen)
+
+* unix: make get(set)_process_title MT-safe (Matt Harrison)
+
+* unix,win: wait for threads to start (Ben Noordhuis)
+
+* test: add threadpool init/teardown test (Bartosz Sosnowski)
+
+* win, process: uv_kill improvements (Bartosz Sosnowski)
+
+* win: set _WIN32_WINNT to 0x0600 (cjihrig)
+
+* zos: implement uv_fs_event* functions (jBarz)
+
+* unix,tcp: avoid marking server sockets connected (Jameson Nash)
+
+* doc: mark Windows 7 as Tier 1 support (Bartosz Sosnowski)
+
+* win: map 0.0.0.0 and :: addresses to localhost (Bartosz Sosnowski)
+
+* build: install libuv.pc unconditionally (Ben Noordhuis)
+
+* test: remove custom timeout for thread test on ppc (Ben Noordhuis)
+
+* test: allow multicast not permitted status (Jérémy Lal)
+
+* test: allow net unreachable status in udp test (Ben Noordhuis)
+
+* unix: use SA_RESTART when setting our sighandler (Brad King)
+
+* unix,fs: fix for potential partial reads/writes (Ben Wijen)
+
+* win,build: do not build executable installer for dll (Bert Belder)
+
+* win: allow directory symlinks to be created in a non-elevated context (Bert
+ Belder)
+
+* zos,test: accept SIGKILL for flaky test (jBarz)
+
+* win: use RemoveDirectoryW() instead of _wmrmdir() (Ben Noordhuis)
+
+* unix: fix uv_cpu_info() error on FreeBSD (elephantp)
+
+* zos,test: decrease pings to avoid timeout (jBarz)
+
+
2017.12.02, Version 1.18.0 (Stable), 1489c98b7fc17f1702821a269eb0c5e730c5c813
Changes since version 1.17.0:
diff --git a/deps/uv/Makefile.am b/deps/uv/Makefile.am
index e01cf416638bfe..ae9d96bcf61ef9 100644
--- a/deps/uv/Makefile.am
+++ b/deps/uv/Makefile.am
@@ -29,6 +29,7 @@ libuv_la_SOURCES = src/fs-poll.c \
src/inet.c \
src/queue.h \
src/threadpool.c \
+ src/uv-data-getter-setters.c \
src/uv-common.c \
src/uv-common.h \
src/version.c
@@ -158,6 +159,7 @@ test_run_tests_SOURCES = test/blackhole-server.c \
test/test-close-fd.c \
test/test-close-order.c \
test/test-condvar.c \
+ test/test-connect-unspecified.c \
test/test-connection-fail.c \
test/test-cwd-and-chdir.c \
test/test-default-loop-close.c \
@@ -174,6 +176,7 @@ test_run_tests_SOURCES = test/blackhole-server.c \
test/test-fs-poll.c \
test/test-fs.c \
test/test-fork.c \
+ test/test-getters-setters.c \
test/test-get-currentexe.c \
test/test-get-loadavg.c \
test/test-get-memory.c \
@@ -220,6 +223,7 @@ test_run_tests_SOURCES = test/blackhole-server.c \
test/test-poll-closesocket.c \
test/test-poll-oob.c \
test/test-process-title.c \
+ test/test-process-title-threadsafe.c \
test/test-queue-foreach-delete.c \
test/test-ref.c \
test/test-run-nowait.c \
@@ -455,13 +459,10 @@ libuv_la_CFLAGS += -D_UNIX03_THREADS \
-qFLOAT=IEEE
libuv_la_LDFLAGS += -qXPLINK
libuv_la_SOURCES += src/unix/pthread-fixes.c \
- src/unix/no-fsevents.c \
src/unix/os390.c \
src/unix/os390-syscalls.c \
src/unix/proctitle.c
endif
-if HAVE_PKG_CONFIG
pkgconfigdir = $(libdir)/pkgconfig
pkgconfig_DATA = @PACKAGE_NAME@.pc
-endif
diff --git a/deps/uv/SUPPORTED_PLATFORMS.md b/deps/uv/SUPPORTED_PLATFORMS.md
index 08fd5f4a9a1100..c56913bbc2fff1 100644
--- a/deps/uv/SUPPORTED_PLATFORMS.md
+++ b/deps/uv/SUPPORTED_PLATFORMS.md
@@ -4,7 +4,7 @@
|---|---|---|---|
| GNU/Linux | Tier 1 | Linux >= 2.6.32 with glibc >= 2.12 | |
| macOS | Tier 1 | macOS >= 10.7 | |
-| Windows | Tier 1 | Windows >= 8.1 | MSVC 2008 and later are supported |
+| Windows | Tier 1 | >= Windows 7 | MSVC 2008 and later are supported |
| FreeBSD | Tier 1 | >= 9 (see note) | |
| AIX | Tier 2 | >= 6 | Maintainers: @libuv/aix |
| z/OS | Tier 2 | >= V2R2 | Maintainers: @libuv/zos |
diff --git a/deps/uv/appveyor.yml b/deps/uv/appveyor.yml
index f77e640eb10f09..1b018a59cad86c 100644
--- a/deps/uv/appveyor.yml
+++ b/deps/uv/appveyor.yml
@@ -28,12 +28,5 @@ build_script:
- cmd: set ARCH=%platform%
- cmd: vcbuild.bat release %ARCH% shared
-after_build:
- - '"%PROGRAMFILES(x86)%\NSIS\makensis" /DVERSION=%APPVEYOR_BUILD_VERSION% /DARCH=%ARCH% libuv.nsi'
-
-artifacts:
- - name: Installer
- path: 'libuv-*.exe'
-
cache:
- C:\projects\libuv\build\gyp
diff --git a/deps/uv/checksparse.sh b/deps/uv/checksparse.sh
index d4a983d02618b5..27eb529bcae13c 100755
--- a/deps/uv/checksparse.sh
+++ b/deps/uv/checksparse.sh
@@ -53,6 +53,7 @@ src/unix/tty.c
src/unix/udp.c
src/uv-common.c
src/uv-common.h
+src/uv-data-getter-setters.c
"
TESTS="
@@ -100,6 +101,7 @@ test/test-fs-copyfile.c
test/test-fs-event.c
test/test-fs-poll.c
test/test-fs.c
+test/test-getters-setters.c
test/test-get-currentexe.c
test/test-get-loadavg.c
test/test-get-memory.c
@@ -126,6 +128,7 @@ test/test-platform-output.c
test/test-poll-close.c
test/test-poll.c
test/test-process-title.c
+test/test-process-title-threadsafe.c
test/test-ref.c
test/test-run-nowait.c
test/test-run-once.c
diff --git a/deps/uv/configure.ac b/deps/uv/configure.ac
index 7eb1674dbaa7fb..75fb13c8ce7b23 100644
--- a/deps/uv/configure.ac
+++ b/deps/uv/configure.ac
@@ -13,7 +13,7 @@
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
AC_PREREQ(2.57)
-AC_INIT([libuv], [1.18.0], [https://github.com/libuv/libuv/issues])
+AC_INIT([libuv], [1.19.1], [https://github.com/libuv/libuv/issues])
AC_CONFIG_MACRO_DIR([m4])
m4_include([m4/libuv-extra-automake-flags.m4])
m4_include([m4/as_case.m4])
@@ -68,10 +68,5 @@ AS_CASE([$host_os],[mingw*], [
])
AS_CASE([$host_os], [netbsd*], [AC_CHECK_LIB([kvm], [kvm_open])])
AC_CHECK_HEADERS([sys/ahafs_evProds.h])
-AC_CHECK_PROG(PKG_CONFIG, pkg-config, yes)
-AM_CONDITIONAL([HAVE_PKG_CONFIG], [test "x$PKG_CONFIG" != "x"])
-AS_IF([test "x$PKG_CONFIG" != "x"], [
- AC_CONFIG_FILES([libuv.pc])
-])
-AC_CONFIG_FILES([Makefile])
+AC_CONFIG_FILES([Makefile libuv.pc])
AC_OUTPUT
diff --git a/deps/uv/docs/src/fs.rst b/deps/uv/docs/src/fs.rst
index 16d5e05c7834cf..87af828a28a7fa 100644
--- a/deps/uv/docs/src/fs.rst
+++ b/deps/uv/docs/src/fs.rst
@@ -340,6 +340,36 @@ API
.. note::
These functions are not implemented on Windows.
+.. c:function:: uv_fs_type uv_fs_get_type(const uv_fs_t* req)
+
+ Returns `req->fs_type`.
+
+ .. versionadded:: 1.19.0
+
+.. c:function:: ssize_t uv_fs_get_result(const uv_fs_t* req)
+
+ Returns `req->result`.
+
+ .. versionadded:: 1.19.0
+
+.. c:function:: void* uv_fs_get_ptr(const uv_fs_t* req)
+
+ Returns `req->ptr`.
+
+ .. versionadded:: 1.19.0
+
+.. c:function:: const char* uv_fs_get_path(const uv_fs_t* req)
+
+ Returns `req->path`.
+
+ .. versionadded:: 1.19.0
+
+.. c:function:: uv_stat_t* uv_fs_get_statbuf(uv_fs_t* req)
+
+ Returns `&req->statbuf`.
+
+ .. versionadded:: 1.19.0
+
.. seealso:: The :c:type:`uv_req_t` API functions also apply.
Helper functions
diff --git a/deps/uv/docs/src/fs_event.rst b/deps/uv/docs/src/fs_event.rst
index 2af3e9802bd0a1..bd076aaeb40494 100644
--- a/deps/uv/docs/src/fs_event.rst
+++ b/deps/uv/docs/src/fs_event.rst
@@ -19,7 +19,13 @@ the best backend for the job on each platform.
See documentation_ for more details.
+ The z/OS file system events monitoring infrastructure does not notify of file
+ creation/deletion within a directory that is being monitored.
+ See the `IBM Knowledge centre`_ for more details.
+
.. _documentation: http://www.ibm.com/developerworks/aix/library/au-aix_event_infrastructure/
+ .. _`IBM Knowledge centre`: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_2.2.0/com.ibm.zos.v2r1.bpxb100/ioc.htm
+
diff --git a/deps/uv/docs/src/handle.rst b/deps/uv/docs/src/handle.rst
index a0f3d05fdb1b4a..e4cb90b5f7e14b 100644
--- a/deps/uv/docs/src/handle.rst
+++ b/deps/uv/docs/src/handle.rst
@@ -211,6 +211,38 @@ just for some handle types.
Be very careful when using this function. libuv assumes it's in control of the file
descriptor so any change to it may lead to malfunction.
+.. c:function:: uv_loop_t* uv_handle_get_loop(const uv_handle_t* handle)
+
+ Returns `handle->loop`.
+
+ .. versionadded:: 1.19.0
+
+.. c:function:: void* uv_handle_get_data(const uv_handle_t* handle)
+
+ Returns `handle->data`.
+
+ .. versionadded:: 1.19.0
+
+.. c:function:: void* uv_handle_set_data(uv_handle_t* handle, void* data)
+
+ Sets `handle->data` to `data`.
+
+ .. versionadded:: 1.19.0
+
+.. c:function:: uv_handle_type uv_handle_get_type(const uv_handle_t* handle)
+
+ Returns `handle->type`.
+
+ .. versionadded:: 1.19.0
+
+.. c:function:: const char* uv_handle_type_name(uv_handle_type type)
+
+ Returns the name for the equivalent struct for a given handle type,
+ e.g. `"pipe"` (as in :c:type:`uv_pipe_t`) for `UV_NAMED_PIPE`.
+
+ If no such handle type exists, this returns `NULL`.
+
+ .. versionadded:: 1.19.0
.. _refcount:
diff --git a/deps/uv/docs/src/loop.rst b/deps/uv/docs/src/loop.rst
index 18dd135cd63834..dcde5049ac2baa 100644
--- a/deps/uv/docs/src/loop.rst
+++ b/deps/uv/docs/src/loop.rst
@@ -222,3 +222,15 @@ API
Any previous value returned from :c:func`uv_backend_fd` is now
invalid. That function must be called again to determine the
correct backend file descriptor.
+
+.. c:function:: void* uv_loop_get_data(const uv_loop_t* loop)
+
+ Returns `loop->data`.
+
+ .. versionadded:: 1.19.0
+
+.. c:function:: void* uv_loop_set_data(uv_loop_t* loop, void* data)
+
+ Sets `loop->data` to `data`.
+
+ .. versionadded:: 1.19.0
diff --git a/deps/uv/docs/src/misc.rst b/deps/uv/docs/src/misc.rst
index a653413e0927bd..07908c98ff8e9c 100644
--- a/deps/uv/docs/src/misc.rst
+++ b/deps/uv/docs/src/misc.rst
@@ -197,8 +197,7 @@ API
`UV_EINVAL` is returned. If `size` cannot accommodate the process title and
terminating `NULL` character, the function returns `UV_ENOBUFS`.
- .. warning::
- `uv_get_process_title` is not thread safe on any platform except Windows.
+ .. versionchanged:: 1.18.1 now thread-safe on all supported platforms.
.. c:function:: int uv_set_process_title(const char* title)
@@ -208,8 +207,7 @@ API
larger than the available space. Other platforms will return `UV_ENOMEM` if
they cannot allocate enough space to duplicate the contents of `title`.
- .. warning::
- `uv_set_process_title` is not thread safe on any platform except Windows.
+ .. versionchanged:: 1.18.1 now thread-safe on all supported platforms.
.. c:function:: int uv_resident_set_memory(size_t* rss)
diff --git a/deps/uv/docs/src/process.rst b/deps/uv/docs/src/process.rst
index b0380ddfb72e5e..ecc3cbf34814eb 100644
--- a/deps/uv/docs/src/process.rst
+++ b/deps/uv/docs/src/process.rst
@@ -222,4 +222,10 @@ API
Sends the specified signal to the given PID. Check the documentation
on :c:ref:`signal` for signal support, specially on Windows.
+.. c:function:: uv_pid_t uv_process_get_pid(const uv_process_t* handle)
+
+ Returns `handle->pid`.
+
+ .. versionadded:: 1.19.0
+
.. seealso:: The :c:type:`uv_handle_t` API functions also apply.
diff --git a/deps/uv/docs/src/request.rst b/deps/uv/docs/src/request.rst
index 660b80ae9573b3..54d9a2f30939da 100644
--- a/deps/uv/docs/src/request.rst
+++ b/deps/uv/docs/src/request.rst
@@ -80,3 +80,30 @@ API
Returns the size of the given request type. Useful for FFI binding writers
who don't want to know the structure layout.
+
+.. c:function:: void* uv_req_get_data(const uv_req_t* req)
+
+ Returns `req->data`.
+
+ .. versionadded:: 1.19.0
+
+.. c:function:: void* uv_req_set_data(uv_req_t* req, void* data)
+
+ Sets `req->data` to `data`.
+
+ .. versionadded:: 1.19.0
+
+.. c:function:: uv_req_type uv_req_get_type(const uv_req_t* req)
+
+ Returns `req->type`.
+
+ .. versionadded:: 1.19.0
+
+.. c:function:: const char* uv_req_type_name(uv_req_type type)
+
+ Returns the name for the equivalent struct for a given request type,
+ e.g. `"connect"` (as in :c:type:`uv_connect_t`) for `UV_CONNECT`.
+
+ If no such request type exists, this returns `NULL`.
+
+ .. versionadded:: 1.19.0
diff --git a/deps/uv/docs/src/stream.rst b/deps/uv/docs/src/stream.rst
index 1f4e87e63a9db3..9ec23622512519 100644
--- a/deps/uv/docs/src/stream.rst
+++ b/deps/uv/docs/src/stream.rst
@@ -228,4 +228,10 @@ API
.. versionchanged:: 1.4.0 UNIX implementation added.
+.. c:function:: size_t uv_stream_get_write_queue_size(const uv_stream_t* stream)
+
+ Returns `stream->write_queue_size`.
+
+ .. versionadded:: 1.19.0
+
.. seealso:: The :c:type:`uv_handle_t` API functions also apply.
diff --git a/deps/uv/docs/src/tcp.rst b/deps/uv/docs/src/tcp.rst
index a1a5824561add9..e761b460d0e636 100644
--- a/deps/uv/docs/src/tcp.rst
+++ b/deps/uv/docs/src/tcp.rst
@@ -102,7 +102,14 @@ API
and an uninitialized :c:type:`uv_connect_t`. `addr` should point to an
initialized ``struct sockaddr_in`` or ``struct sockaddr_in6``.
+ On Windows if the `addr` is initialized to point to an unspecified address
+ (``0.0.0.0`` or ``::``) it will be changed to point to ``localhost``.
+ This is done to match the behavior of Linux systems.
+
The callback is made when the connection has been established or when a
connection error happened.
+ .. versionchanged:: 1.19.0 added ``0.0.0.0`` and ``::`` to ``localhost``
+ mapping
+
.. seealso:: The :c:type:`uv_stream_t` API functions also apply.
diff --git a/deps/uv/docs/src/udp.rst b/deps/uv/docs/src/udp.rst
index dd46603394ee7a..8148828522ee2e 100644
--- a/deps/uv/docs/src/udp.rst
+++ b/deps/uv/docs/src/udp.rst
@@ -243,6 +243,10 @@ API
with :c:func:`uv_udp_bind` it will be bound to 0.0.0.0
(the "all interfaces" IPv4 address) and a random port number.
+ On Windows if the `addr` is initialized to point to an unspecified address
+ (``0.0.0.0`` or ``::``) it will be changed to point to ``localhost``.
+ This is done to match the behavior of Linux systems.
+
:param req: UDP request handle. Need not be initialized.
:param handle: UDP handle. Should have been initialized with
@@ -259,6 +263,9 @@ API
:returns: 0 on success, or an error code < 0 on failure.
+ .. versionchanged:: 1.19.0 added ``0.0.0.0`` and ``::`` to ``localhost``
+ mapping
+
.. c:function:: int uv_udp_try_send(uv_udp_t* handle, const uv_buf_t bufs[], unsigned int nbufs, const struct sockaddr* addr)
Same as :c:func:`uv_udp_send`, but won't queue a send request if it can't
@@ -292,4 +299,16 @@ API
:returns: 0 on success, or an error code < 0 on failure.
+.. c:function:: size_t uv_udp_get_send_queue_size(const uv_udp_t* handle)
+
+ Returns `handle->send_queue_size`.
+
+ .. versionadded:: 1.19.0
+
+.. c:function:: size_t uv_udp_get_send_queue_count(const uv_udp_t* handle)
+
+ Returns `handle->send_queue_count`.
+
+ .. versionadded:: 1.19.0
+
.. seealso:: The :c:type:`uv_handle_t` API functions also apply.
diff --git a/deps/uv/include/uv-os390.h b/deps/uv/include/uv-os390.h
index 58f926111aa6af..39e7384db31a5b 100644
--- a/deps/uv/include/uv-os390.h
+++ b/deps/uv/include/uv-os390.h
@@ -27,4 +27,7 @@
#define UV_PLATFORM_LOOP_FIELDS \
void* ep; \
+#define UV_PLATFORM_FS_EVENT_FIELDS \
+ char rfis_rftok[8]; \
+
#endif /* UV_MVS_H */
diff --git a/deps/uv/include/uv-version.h b/deps/uv/include/uv-version.h
index 831ee54de4486e..581d761df98139 100644
--- a/deps/uv/include/uv-version.h
+++ b/deps/uv/include/uv-version.h
@@ -31,8 +31,8 @@
*/
#define UV_VERSION_MAJOR 1
-#define UV_VERSION_MINOR 18
-#define UV_VERSION_PATCH 0
+#define UV_VERSION_MINOR 19
+#define UV_VERSION_PATCH 1
#define UV_VERSION_IS_RELEASE 1
#define UV_VERSION_SUFFIX ""
diff --git a/deps/uv/include/uv-win.h b/deps/uv/include/uv-win.h
index b96bed22ace759..4c6c50a29c357e 100644
--- a/deps/uv/include/uv-win.h
+++ b/deps/uv/include/uv-win.h
@@ -20,7 +20,7 @@
*/
#ifndef _WIN32_WINNT
-# define _WIN32_WINNT 0x0502
+# define _WIN32_WINNT 0x0600
#endif
#if !defined(_SSIZE_T_) && !defined(_SSIZE_T_DEFINED)
diff --git a/deps/uv/include/uv.h b/deps/uv/include/uv.h
index b11666e2e65806..3a061132cce528 100644
--- a/deps/uv/include/uv.h
+++ b/deps/uv/include/uv.h
@@ -425,7 +425,17 @@ struct uv_handle_s {
};
UV_EXTERN size_t uv_handle_size(uv_handle_type type);
+UV_EXTERN uv_handle_type uv_handle_get_type(const uv_handle_t* handle);
+UV_EXTERN const char* uv_handle_type_name(uv_handle_type type);
+UV_EXTERN void* uv_handle_get_data(const uv_handle_t* handle);
+UV_EXTERN uv_loop_t* uv_handle_get_loop(const uv_handle_t* handle);
+UV_EXTERN void uv_handle_set_data(uv_handle_t* handle, void* data);
+
UV_EXTERN size_t uv_req_size(uv_req_type type);
+UV_EXTERN void* uv_req_get_data(const uv_req_t* req);
+UV_EXTERN void uv_req_set_data(uv_req_t* req, void* data);
+UV_EXTERN uv_req_type uv_req_get_type(const uv_req_t* req);
+UV_EXTERN const char* uv_req_type_name(uv_req_type type);
UV_EXTERN int uv_is_active(const uv_handle_t* handle);
@@ -465,6 +475,8 @@ struct uv_stream_s {
UV_STREAM_FIELDS
};
+UV_EXTERN size_t uv_stream_get_write_queue_size(const uv_stream_t* stream);
+
UV_EXTERN int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb);
UV_EXTERN int uv_accept(uv_stream_t* server, uv_stream_t* client);
@@ -642,6 +654,8 @@ UV_EXTERN int uv_udp_recv_start(uv_udp_t* handle,
uv_alloc_cb alloc_cb,
uv_udp_recv_cb recv_cb);
UV_EXTERN int uv_udp_recv_stop(uv_udp_t* handle);
+UV_EXTERN size_t uv_udp_get_send_queue_size(const uv_udp_t* handle);
+UV_EXTERN size_t uv_udp_get_send_queue_count(const uv_udp_t* handle);
/*
@@ -962,6 +976,7 @@ UV_EXTERN int uv_spawn(uv_loop_t* loop,
const uv_process_options_t* options);
UV_EXTERN int uv_process_kill(uv_process_t*, int signum);
UV_EXTERN int uv_kill(int pid, int signum);
+UV_EXTERN uv_pid_t uv_process_get_pid(const uv_process_t*);
/*
@@ -1135,6 +1150,12 @@ struct uv_fs_s {
UV_FS_PRIVATE_FIELDS
};
+UV_EXTERN uv_fs_type uv_fs_get_type(const uv_fs_t*);
+UV_EXTERN ssize_t uv_fs_get_result(const uv_fs_t*);
+UV_EXTERN void* uv_fs_get_ptr(const uv_fs_t*);
+UV_EXTERN const char* uv_fs_get_path(const uv_fs_t*);
+UV_EXTERN uv_stat_t* uv_fs_get_statbuf(uv_fs_t*);
+
UV_EXTERN void uv_fs_req_cleanup(uv_fs_t* req);
UV_EXTERN int uv_fs_close(uv_loop_t* loop,
uv_fs_t* req,
@@ -1516,6 +1537,8 @@ struct uv_loop_s {
UV_LOOP_PRIVATE_FIELDS
};
+UV_EXTERN void* uv_loop_get_data(const uv_loop_t*);
+UV_EXTERN void uv_loop_set_data(uv_loop_t*, void* data);
/* Don't export the private CPP symbols. */
#undef UV_HANDLE_TYPE_PRIVATE
diff --git a/deps/uv/libuv.nsi b/deps/uv/libuv.nsi
deleted file mode 100644
index 159756e196ce47..00000000000000
--- a/deps/uv/libuv.nsi
+++ /dev/null
@@ -1,86 +0,0 @@
-; NSIS installer script for libuv
-
-!include "MUI2.nsh"
-
-Name "libuv"
-OutFile "libuv-${ARCH}-${VERSION}.exe"
-
-!include "x64.nsh"
-# Default install location, for 32-bit files
-InstallDir "$PROGRAMFILES\libuv"
-
-# Override install and registry locations if this is a 64-bit install.
-function .onInit
- ${If} ${ARCH} == "x64"
- SetRegView 64
- StrCpy $INSTDIR "$PROGRAMFILES64\libuv"
- ${EndIf}
-functionEnd
-
-;--------------------------------
-; Installer pages
-!insertmacro MUI_PAGE_WELCOME
-!insertmacro MUI_PAGE_DIRECTORY
-!insertmacro MUI_PAGE_INSTFILES
-!insertmacro MUI_PAGE_FINISH
-
-
-;--------------------------------
-; Uninstaller pages
-!insertmacro MUI_UNPAGE_WELCOME
-!insertmacro MUI_UNPAGE_CONFIRM
-!insertmacro MUI_UNPAGE_INSTFILES
-!insertmacro MUI_UNPAGE_FINISH
-
-;--------------------------------
-; Languages
-!insertmacro MUI_LANGUAGE "English"
-
-;--------------------------------
-; Installer sections
-
-Section "Files" SecInstall
- SectionIn RO
- SetOutPath "$INSTDIR"
- File "Release\*.dll"
- File "Release\*.lib"
- File "LICENSE"
- File "README.md"
-
- SetOutPath "$INSTDIR\include"
- File "include\uv.h"
- File "include\uv-errno.h"
- File "include\uv-threadpool.h"
- File "include\uv-version.h"
- File "include\uv-win.h"
- File "include\tree.h"
-
- WriteUninstaller "$INSTDIR\Uninstall.exe"
- WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "DisplayName" "libuv-${ARCH}-${VERSION}"
- WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "UninstallString" "$\"$INSTDIR\Uninstall.exe$\""
- WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "QuietUninstallString" "$\"$INSTDIR\Uninstall.exe$\" /S"
- WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "HelpLink" "http://libuv.org/"
- WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "URLInfoAbout" "http://libuv.org/"
- WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "DisplayVersion" "${VERSION}"
- WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "NoModify" "1"
- WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "NoRepair" "1"
-SectionEnd
-
-Section "Uninstall"
- Delete "$INSTDIR\libuv.dll"
- Delete "$INSTDIR\libuv.lib"
- Delete "$INSTDIR\LICENSE"
- Delete "$INSTDIR\README.md"
-
- Delete "$INSTDIR\include\uv.h"
- Delete "$INSTDIR\include\uv-errno.h"
- Delete "$INSTDIR\include\uv-threadpool.h"
- Delete "$INSTDIR\include\uv-version.h"
- Delete "$INSTDIR\include\uv-win.h"
- Delete "$INSTDIR\include\tree.h"
-
- Delete "$INSTDIR\Uninstall.exe"
- RMDir "$INSTDIR"
- DeleteRegKey HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}"
-SectionEnd
-
diff --git a/deps/uv/src/threadpool.c b/deps/uv/src/threadpool.c
index 108934112c582a..413d1c204c2660 100644
--- a/deps/uv/src/threadpool.c
+++ b/deps/uv/src/threadpool.c
@@ -38,7 +38,6 @@ static uv_thread_t* threads;
static uv_thread_t default_threads[4];
static QUEUE exit_message;
static QUEUE wq;
-static volatile int initialized;
static void uv__cancelled(struct uv__work* w) {
@@ -53,7 +52,8 @@ static void worker(void* arg) {
struct uv__work* w;
QUEUE* q;
- (void) arg;
+ uv_sem_post((uv_sem_t*) arg);
+ arg = NULL;
for (;;) {
uv_mutex_lock(&mutex);
@@ -105,7 +105,7 @@ static void post(QUEUE* q) {
UV_DESTRUCTOR(static void cleanup(void)) {
unsigned int i;
- if (initialized == 0)
+ if (nthreads == 0)
return;
post(&exit_message);
@@ -122,7 +122,6 @@ UV_DESTRUCTOR(static void cleanup(void)) {
threads = NULL;
nthreads = 0;
- initialized = 0;
}
#endif
@@ -130,6 +129,7 @@ UV_DESTRUCTOR(static void cleanup(void)) {
static void init_threads(void) {
unsigned int i;
const char* val;
+ uv_sem_t sem;
nthreads = ARRAY_SIZE(default_threads);
val = getenv("UV_THREADPOOL_SIZE");
@@ -157,11 +157,17 @@ static void init_threads(void) {
QUEUE_INIT(&wq);
+ if (uv_sem_init(&sem, 0))
+ abort();
+
for (i = 0; i < nthreads; i++)
- if (uv_thread_create(threads + i, worker, NULL))
+ if (uv_thread_create(threads + i, worker, &sem))
abort();
- initialized = 1;
+ for (i = 0; i < nthreads; i++)
+ uv_sem_wait(&sem);
+
+ uv_sem_destroy(&sem);
}
diff --git a/deps/uv/src/unix/aix.c b/deps/uv/src/unix/aix.c
index 06f19a4fc9fc54..fd413090feddf0 100644
--- a/deps/uv/src/unix/aix.c
+++ b/deps/uv/src/unix/aix.c
@@ -65,11 +65,18 @@
#define RDWR_BUF_SIZE 4096
#define EQ(a,b) (strcmp(a,b) == 0)
+static uv_mutex_t process_title_mutex;
+static uv_once_t process_title_mutex_once = UV_ONCE_INIT;
static void* args_mem = NULL;
static char** process_argv = NULL;
static int process_argc = 0;
static char* process_title_ptr = NULL;
+static void init_process_title_mutex_once(void) {
+ uv_mutex_init(&process_title_mutex);
+}
+
+
int uv__platform_loop_init(uv_loop_t* loop) {
loop->fs_fd = -1;
@@ -856,6 +863,9 @@ int uv_set_process_title(const char* title) {
if (new_title == NULL)
return -ENOMEM;
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
/* If this is the first time this is set,
* don't free and set argv[1] to NULL.
*/
@@ -868,6 +878,8 @@ int uv_set_process_title(const char* title) {
if (process_argc > 1)
process_argv[1] = NULL;
+ uv_mutex_unlock(&process_title_mutex);
+
return 0;
}
@@ -880,8 +892,13 @@ int uv_get_process_title(char* buffer, size_t size) {
else if (size <= len)
return -ENOBUFS;
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
memcpy(buffer, process_argv[0], len + 1);
+ uv_mutex_unlock(&process_title_mutex);
+
return 0;
}
diff --git a/deps/uv/src/unix/bsd-ifaddrs.c b/deps/uv/src/unix/bsd-ifaddrs.c
index 2593b9ff330e84..ea3166c5e977c7 100644
--- a/deps/uv/src/unix/bsd-ifaddrs.c
+++ b/deps/uv/src/unix/bsd-ifaddrs.c
@@ -36,6 +36,7 @@ static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
return 1;
if (ent->ifa_addr == NULL)
return 1;
+#if !defined(__CYGWIN__) && !defined(__MSYS__)
/*
* If `exclude_type` is `UV__EXCLUDE_IFPHYS`, just see whether `sa_family`
* equals to `AF_LINK` or not. Otherwise, the result depends on the operation
@@ -43,6 +44,7 @@ static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
*/
if (exclude_type == UV__EXCLUDE_IFPHYS)
return (ent->ifa_addr->sa_family != AF_LINK);
+#endif
#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__DragonFly__)
/*
* On BSD getifaddrs returns information related to the raw underlying
diff --git a/deps/uv/src/unix/freebsd.c b/deps/uv/src/unix/freebsd.c
index dba94298d1c06d..f2b3f247a05e3a 100644
--- a/deps/uv/src/unix/freebsd.c
+++ b/deps/uv/src/unix/freebsd.c
@@ -47,9 +47,16 @@
# define CP_INTR 4
#endif
+static uv_mutex_t process_title_mutex;
+static uv_once_t process_title_mutex_once = UV_ONCE_INIT;
static char *process_title;
+static void init_process_title_mutex_once(void) {
+ uv_mutex_init(&process_title_mutex);
+}
+
+
int uv__platform_loop_init(uv_loop_t* loop) {
return uv__kqueue_init(loop);
}
@@ -163,8 +170,15 @@ int uv_set_process_title(const char* title) {
char* new_title;
new_title = uv__strdup(title);
- if (process_title == NULL)
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ if (process_title == NULL) {
+ uv_mutex_unlock(&process_title_mutex);
return -ENOMEM;
+ }
+
uv__free(process_title);
process_title = new_title;
@@ -180,6 +194,8 @@ int uv_set_process_title(const char* title) {
process_title,
strlen(process_title) + 1);
+ uv_mutex_unlock(&process_title_mutex);
+
return 0;
}
@@ -190,17 +206,24 @@ int uv_get_process_title(char* buffer, size_t size) {
if (buffer == NULL || size == 0)
return -EINVAL;
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
if (process_title) {
len = strlen(process_title) + 1;
- if (size < len)
+ if (size < len) {
+ uv_mutex_unlock(&process_title_mutex);
return -ENOBUFS;
+ }
memcpy(buffer, process_title, len);
} else {
len = 0;
}
+ uv_mutex_unlock(&process_title_mutex);
+
buffer[len] = '\0';
return 0;
@@ -253,6 +276,7 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
uv_cpu_info_t* cpu_info;
const char* maxcpus_key;
const char* cptimes_key;
+ const char* model_key;
char model[512];
long* cp_times;
int numcpus;
@@ -271,8 +295,20 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
cptimes_key = "kern.cp_times";
#endif
+#if defined(__arm__) || defined(__aarch64__)
+ /* The key hw.model and hw.clockrate are not available on FreeBSD ARM. */
+ model_key = "hw.machine";
+ cpuspeed = 0;
+#else
+ model_key = "hw.model";
+
+ size = sizeof(cpuspeed);
+ if (sysctlbyname("hw.clockrate", &cpuspeed, &size, NULL, 0))
+ return -errno;
+#endif
+
size = sizeof(model);
- if (sysctlbyname("hw.model", &model, &size, NULL, 0))
+ if (sysctlbyname(model_key, &model, &size, NULL, 0))
return -errno;
size = sizeof(numcpus);
@@ -285,12 +321,6 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
*count = numcpus;
- size = sizeof(cpuspeed);
- if (sysctlbyname("hw.clockrate", &cpuspeed, &size, NULL, 0)) {
- uv__free(*cpu_infos);
- return -errno;
- }
-
/* kern.cp_times on FreeBSD i386 gives an array up to maxcpus instead of
* ncpu.
*/
diff --git a/deps/uv/src/unix/netbsd.c b/deps/uv/src/unix/netbsd.c
index d9066349c1d623..742507233144a1 100644
--- a/deps/uv/src/unix/netbsd.c
+++ b/deps/uv/src/unix/netbsd.c
@@ -40,9 +40,16 @@
#include
#include
+static uv_mutex_t process_title_mutex;
+static uv_once_t process_title_mutex_once = UV_ONCE_INIT;
static char *process_title;
+static void init_process_title_mutex_once(void) {
+ uv_mutex_init(&process_title_mutex);
+}
+
+
int uv__platform_loop_init(uv_loop_t* loop) {
return uv__kqueue_init(loop);
}
@@ -137,12 +144,21 @@ int uv_set_process_title(const char* title) {
char* new_title;
new_title = uv__strdup(title);
- if (process_title == NULL)
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ if (process_title == NULL) {
+ uv_mutex_unlock(&process_title_mutex);
return -ENOMEM;
+ }
+
uv__free(process_title);
process_title = new_title;
setproctitle("%s", title);
+ uv_mutex_unlock(&process_title_mutex);
+
return 0;
}
@@ -153,17 +169,24 @@ int uv_get_process_title(char* buffer, size_t size) {
if (buffer == NULL || size == 0)
return -EINVAL;
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
if (process_title) {
len = strlen(process_title) + 1;
- if (size < len)
+ if (size < len) {
+ uv_mutex_unlock(&process_title_mutex);
return -ENOBUFS;
+ }
memcpy(buffer, process_title, len);
} else {
len = 0;
}
+ uv_mutex_unlock(&process_title_mutex);
+
buffer[len] = '\0';
return 0;
diff --git a/deps/uv/src/unix/openbsd.c b/deps/uv/src/unix/openbsd.c
index d1c90289e5691e..c0ffa564b4c2e9 100644
--- a/deps/uv/src/unix/openbsd.c
+++ b/deps/uv/src/unix/openbsd.c
@@ -36,9 +36,16 @@
#include
+static uv_mutex_t process_title_mutex;
+static uv_once_t process_title_mutex_once = UV_ONCE_INIT;
static char *process_title;
+static void init_process_title_mutex_once(void) {
+ uv_mutex_init(&process_title_mutex);
+}
+
+
int uv__platform_loop_init(uv_loop_t* loop) {
return uv__kqueue_init(loop);
}
@@ -149,11 +156,21 @@ int uv_set_process_title(const char* title) {
char* new_title;
new_title = uv__strdup(title);
- if (process_title == NULL)
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ if (process_title == NULL) {
+ uv_mutex_unlock(&process_title_mutex);
return -ENOMEM;
+ }
+
uv__free(process_title);
process_title = new_title;
setproctitle("%s", title);
+
+ uv_mutex_unlock(&process_title_mutex);
+
return 0;
}
@@ -164,17 +181,24 @@ int uv_get_process_title(char* buffer, size_t size) {
if (buffer == NULL || size == 0)
return -EINVAL;
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
if (process_title) {
len = strlen(process_title) + 1;
- if (size < len)
+ if (size < len) {
+ uv_mutex_unlock(&process_title_mutex);
return -ENOBUFS;
+ }
memcpy(buffer, process_title, len);
} else {
len = 0;
}
+ uv_mutex_unlock(&process_title_mutex);
+
buffer[len] = '\0';
return 0;
diff --git a/deps/uv/src/unix/os390-syscalls.c b/deps/uv/src/unix/os390-syscalls.c
index 5bc489387ef3c5..21558ea8689a00 100644
--- a/deps/uv/src/unix/os390-syscalls.c
+++ b/deps/uv/src/unix/os390-syscalls.c
@@ -25,6 +25,8 @@
#include
#include
#include
+#include
+#include
#define CW_CONDVAR 32
@@ -103,10 +105,19 @@ static void maybe_resize(uv__os390_epoll* lst, unsigned int len) {
unsigned int newsize;
unsigned int i;
struct pollfd* newlst;
+ struct pollfd event;
if (len <= lst->size)
return;
+ if (lst->size == 0)
+ event.fd = -1;
+ else {
+ /* Extract the message queue at the end. */
+ event = lst->items[lst->size - 1];
+ lst->items[lst->size - 1].fd = -1;
+ }
+
newsize = next_power_of_two(len);
newlst = uv__realloc(lst->items, newsize * sizeof(lst->items[0]));
@@ -115,11 +126,40 @@ static void maybe_resize(uv__os390_epoll* lst, unsigned int len) {
for (i = lst->size; i < newsize; ++i)
newlst[i].fd = -1;
+ /* Restore the message queue at the end */
+ newlst[newsize - 1] = event;
+
lst->items = newlst;
lst->size = newsize;
}
+static void init_message_queue(uv__os390_epoll* lst) {
+ struct {
+ long int header;
+ char body;
+ } msg;
+
+ /* initialize message queue */
+ lst->msg_queue = msgget(IPC_PRIVATE, 0622 | IPC_CREAT);
+ if (lst->msg_queue == -1)
+ abort();
+
+ /*
+ On z/OS, the message queue will be affiliated with the process only
+ when a send is performed on it. Once this is done, the system
+ can be queried for all message queues belonging to our process id.
+ */
+ msg.header = 1;
+ if (msgsnd(lst->msg_queue, &msg, sizeof(msg.body), 0) != 0)
+ abort();
+
+ /* Clean up the dummy message sent above */
+ if (msgrcv(lst->msg_queue, &msg, sizeof(msg.body), 0, 0) != sizeof(msg.body))
+ abort();
+}
+
+
static void before_fork(void) {
uv_mutex_lock(&global_epoll_lock);
}
@@ -139,8 +179,13 @@ static void child_fork(void) {
/* reset epoll list */
while (!QUEUE_EMPTY(&global_epoll_queue)) {
+ uv__os390_epoll* lst;
q = QUEUE_HEAD(&global_epoll_queue);
QUEUE_REMOVE(q);
+ lst = QUEUE_DATA(q, uv__os390_epoll, member);
+ uv__free(lst->items);
+ lst->items = NULL;
+ lst->size = 0;
}
uv_mutex_unlock(&global_epoll_lock);
@@ -166,6 +211,10 @@ uv__os390_epoll* epoll_create1(int flags) {
/* initialize list */
lst->size = 0;
lst->items = NULL;
+ init_message_queue(lst);
+ maybe_resize(lst, 1);
+ lst->items[lst->size - 1].fd = lst->msg_queue;
+ lst->items[lst->size - 1].events = POLLIN;
uv_once(&once, epoll_init);
uv_mutex_lock(&global_epoll_lock);
QUEUE_INSERT_TAIL(&global_epoll_queue, &lst->member);
@@ -182,15 +231,20 @@ int epoll_ctl(uv__os390_epoll* lst,
struct epoll_event *event) {
uv_mutex_lock(&global_epoll_lock);
- if(op == EPOLL_CTL_DEL) {
+ if (op == EPOLL_CTL_DEL) {
if (fd >= lst->size || lst->items[fd].fd == -1) {
uv_mutex_unlock(&global_epoll_lock);
errno = ENOENT;
return -1;
}
lst->items[fd].fd = -1;
- } else if(op == EPOLL_CTL_ADD) {
- maybe_resize(lst, fd + 1);
+ } else if (op == EPOLL_CTL_ADD) {
+
+ /* Resizing to 'fd + 1' would expand the list to contain at least
+ * 'fd'. But we need to guarantee that the last index on the list
+ * is reserved for the message queue. So specify 'fd + 2' instead.
+ */
+ maybe_resize(lst, fd + 2);
if (lst->items[fd].fd != -1) {
uv_mutex_unlock(&global_epoll_lock);
errno = EEXIST;
@@ -198,7 +252,7 @@ int epoll_ctl(uv__os390_epoll* lst,
}
lst->items[fd].fd = fd;
lst->items[fd].events = event->events;
- } else if(op == EPOLL_CTL_MOD) {
+ } else if (op == EPOLL_CTL_MOD) {
if (fd >= lst->size || lst->items[fd].fd == -1) {
uv_mutex_unlock(&global_epoll_lock);
errno = ENOENT;
@@ -215,17 +269,19 @@ int epoll_ctl(uv__os390_epoll* lst,
int epoll_wait(uv__os390_epoll* lst, struct epoll_event* events,
int maxevents, int timeout) {
- size_t size;
+ nmsgsfds_t size;
struct pollfd* pfds;
int pollret;
int reventcount;
- size = lst->size;
+ size = _SET_FDS_MSGS(size, 1, lst->size - 1);
pfds = lst->items;
pollret = poll(pfds, size, timeout);
if (pollret <= 0)
return pollret;
+ pollret = _NFDS(pollret) + _NMSGS(pollret);
+
reventcount = 0;
for (int i = 0;
i < lst->size && i < maxevents && reventcount < pollret; ++i) {
@@ -261,9 +317,14 @@ int epoll_file_close(int fd) {
}
void epoll_queue_close(uv__os390_epoll* lst) {
+ /* Remove epoll instance from global queue */
uv_mutex_lock(&global_epoll_lock);
QUEUE_REMOVE(&lst->member);
uv_mutex_unlock(&global_epoll_lock);
+
+ /* Free resources */
+ msgctl(lst->msg_queue, IPC_RMID, NULL);
+ lst->msg_queue = -1;
uv__free(lst->items);
lst->items = NULL;
}
diff --git a/deps/uv/src/unix/os390-syscalls.h b/deps/uv/src/unix/os390-syscalls.h
index 5ce6a681bf1cb3..6e34a88cb95d1b 100644
--- a/deps/uv/src/unix/os390-syscalls.h
+++ b/deps/uv/src/unix/os390-syscalls.h
@@ -50,6 +50,7 @@ typedef struct {
QUEUE member;
struct pollfd* items;
unsigned long size;
+ int msg_queue;
} uv__os390_epoll;
/* epoll api */
diff --git a/deps/uv/src/unix/os390.c b/deps/uv/src/unix/os390.c
index 127656db8789e6..081438e8e73d3c 100644
--- a/deps/uv/src/unix/os390.c
+++ b/deps/uv/src/unix/os390.c
@@ -26,6 +26,8 @@
#include
#include
#include
+#include
+#include
#if defined(__clang__)
#include "csrsic.h"
#else
@@ -684,11 +686,124 @@ int uv__io_check_fd(uv_loop_t* loop, int fd) {
return 0;
}
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+ uv_fs_event_stop(handle);
+}
+
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+ uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
+ return 0;
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb,
+ const char* filename, unsigned int flags) {
+ uv__os390_epoll* ep;
+ _RFIS reg_struct;
+ char* path;
+ int rc;
+
+ if (uv__is_active(handle))
+ return -EINVAL;
+
+ ep = handle->loop->ep;
+ assert(ep->msg_queue != -1);
+
+ reg_struct.__rfis_cmd = _RFIS_REG;
+ reg_struct.__rfis_qid = ep->msg_queue;
+ reg_struct.__rfis_type = 1;
+ memcpy(reg_struct.__rfis_utok, &handle, sizeof(handle));
+
+ path = uv__strdup(filename);
+ if (path == NULL)
+ return -ENOMEM;
+
+ rc = __w_pioctl(path, _IOCC_REGFILEINT, sizeof(reg_struct), ®_struct);
+ if (rc != 0)
+ return -errno;
+
+ uv__handle_start(handle);
+ handle->path = path;
+ handle->cb = cb;
+ memcpy(handle->rfis_rftok, reg_struct.__rfis_rftok,
+ sizeof(handle->rfis_rftok));
+
+ return 0;
+}
+
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+ uv__os390_epoll* ep;
+ _RFIS reg_struct;
+ int rc;
+
+ if (!uv__is_active(handle))
+ return 0;
+
+ ep = handle->loop->ep;
+ assert(ep->msg_queue != -1);
+
+ reg_struct.__rfis_cmd = _RFIS_UNREG;
+ reg_struct.__rfis_qid = ep->msg_queue;
+ reg_struct.__rfis_type = 1;
+ memcpy(reg_struct.__rfis_rftok, handle->rfis_rftok,
+ sizeof(handle->rfis_rftok));
+
+ /*
+ * This call will take "/" as the path argument in case we
+ * don't care to supply the correct path. The system will simply
+ * ignore it.
+ */
+ rc = __w_pioctl("/", _IOCC_REGFILEINT, sizeof(reg_struct), ®_struct);
+ if (rc != 0 && errno != EALREADY && errno != ENOENT)
+ abort();
+
+ uv__handle_stop(handle);
+
+ return 0;
+}
+
+
+static int os390_message_queue_handler(uv__os390_epoll* ep) {
+ uv_fs_event_t* handle;
+ int msglen;
+ int events;
+ _RFIM msg;
+
+ if (ep->msg_queue == -1)
+ return 0;
+
+ msglen = msgrcv(ep->msg_queue, &msg, sizeof(msg), 0, IPC_NOWAIT);
+
+ if (msglen == -1 && errno == ENOMSG)
+ return 0;
+
+ if (msglen == -1)
+ abort();
+
+ events = 0;
+ if (msg.__rfim_event == _RFIM_ATTR || msg.__rfim_event == _RFIM_WRITE)
+ events = UV_CHANGE;
+ else if (msg.__rfim_event == _RFIM_RENAME)
+ events = UV_RENAME;
+ else
+ /* Some event that we are not interested in. */
+ return 0;
+
+ handle = *(uv_fs_event_t**)(msg.__rfim_utok);
+ handle->cb(handle, uv__basename_r(handle->path), events, 0);
+ return 1;
+}
+
+
void uv__io_poll(uv_loop_t* loop, int timeout) {
static const int max_safe_timeout = 1789569;
struct epoll_event events[1024];
struct epoll_event* pe;
struct epoll_event e;
+ uv__os390_epoll* ep;
int real_timeout;
QUEUE* q;
uv__io_t* w;
@@ -802,6 +917,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (fd == -1)
continue;
+ ep = loop->ep;
+ if (fd == ep->msg_queue) {
+ os390_message_queue_handler(ep);
+ continue;
+ }
+
assert(fd >= 0);
assert((unsigned) fd < loop->nwatchers);
@@ -866,7 +987,12 @@ void uv__set_process_title(const char* title) {
}
int uv__io_fork(uv_loop_t* loop) {
- uv__platform_loop_delete(loop);
+ /*
+ Nullify the msg queue but don't close it because
+ it is still being used by the parent.
+ */
+ loop->ep = NULL;
+ uv__platform_loop_delete(loop);
return uv__platform_loop_init(loop);
}
diff --git a/deps/uv/src/unix/proctitle.c b/deps/uv/src/unix/proctitle.c
index 2ed0b21c6625bd..1b3a798820e282 100644
--- a/deps/uv/src/unix/proctitle.c
+++ b/deps/uv/src/unix/proctitle.c
@@ -26,6 +26,8 @@
extern void uv__set_process_title(const char* title);
+static uv_mutex_t process_title_mutex;
+static uv_once_t process_title_mutex_once = UV_ONCE_INIT;
static void* args_mem;
static struct {
@@ -34,6 +36,11 @@ static struct {
} process_title;
+static void init_process_title_mutex_once(void) {
+ uv_mutex_init(&process_title_mutex);
+}
+
+
char** uv_setup_args(int argc, char** argv) {
char** new_argv;
size_t size;
@@ -81,12 +88,16 @@ char** uv_setup_args(int argc, char** argv) {
int uv_set_process_title(const char* title) {
- if (process_title.len == 0)
- return 0;
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ if (process_title.len != 0) {
+ /* No need to terminate, byte after is always '\0'. */
+ strncpy(process_title.str, title, process_title.len);
+ uv__set_process_title(title);
+ }
- /* No need to terminate, byte after is always '\0'. */
- strncpy(process_title.str, title, process_title.len);
- uv__set_process_title(title);
+ uv_mutex_unlock(&process_title_mutex);
return 0;
}
@@ -95,14 +106,22 @@ int uv_set_process_title(const char* title) {
int uv_get_process_title(char* buffer, size_t size) {
if (buffer == NULL || size == 0)
return -EINVAL;
- else if (size <= process_title.len)
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ if (size <= process_title.len) {
+ uv_mutex_unlock(&process_title_mutex);
return -ENOBUFS;
+ }
if (process_title.len != 0)
memcpy(buffer, process_title.str, process_title.len + 1);
buffer[process_title.len] = '\0';
+ uv_mutex_unlock(&process_title_mutex);
+
return 0;
}
diff --git a/deps/uv/src/unix/signal.c b/deps/uv/src/unix/signal.c
index cb09ead50a4c45..3759778011223f 100644
--- a/deps/uv/src/unix/signal.c
+++ b/deps/uv/src/unix/signal.c
@@ -28,6 +28,9 @@
#include
#include
+#ifndef SA_RESTART
+# define SA_RESTART 0
+#endif
typedef struct {
uv_signal_t* handle;
@@ -216,7 +219,9 @@ static int uv__signal_register_handler(int signum, int oneshot) {
if (sigfillset(&sa.sa_mask))
abort();
sa.sa_handler = uv__signal_handler;
- sa.sa_flags = oneshot ? SA_RESETHAND : 0;
+ sa.sa_flags = SA_RESTART;
+ if (oneshot)
+ sa.sa_flags |= SA_RESETHAND;
/* XXX save old action so we can restore it later on? */
if (sigaction(signum, &sa, NULL))
diff --git a/deps/uv/src/uv-data-getter-setters.c b/deps/uv/src/uv-data-getter-setters.c
new file mode 100644
index 00000000000000..533e4a2fe12bb3
--- /dev/null
+++ b/deps/uv/src/uv-data-getter-setters.c
@@ -0,0 +1,96 @@
+#include "uv.h"
+
+const char* uv_handle_type_name(uv_handle_type type) {
+ switch (type) {
+#define XX(uc,lc) case UV_##uc: return #lc;
+ UV_HANDLE_TYPE_MAP(XX)
+#undef XX
+ case UV_FILE: return "file";
+ case UV_HANDLE_TYPE_MAX:
+ case UV_UNKNOWN_HANDLE: return NULL;
+ }
+ return NULL;
+}
+
+uv_handle_type uv_handle_get_type(const uv_handle_t* handle) {
+ return handle->type;
+}
+
+void* uv_handle_get_data(const uv_handle_t* handle) {
+ return handle->data;
+}
+
+uv_loop_t* uv_handle_get_loop(const uv_handle_t* handle) {
+ return handle->loop;
+}
+
+void uv_handle_set_data(uv_handle_t* handle, void* data) {
+ handle->data = data;
+}
+
+const char* uv_req_type_name(uv_req_type type) {
+ switch (type) {
+#define XX(uc,lc) case UV_##uc: return #lc;
+ UV_REQ_TYPE_MAP(XX)
+#undef XX
+ case UV_REQ_TYPE_MAX:
+ case UV_UNKNOWN_REQ: return NULL;
+ }
+ return NULL;
+}
+
+uv_req_type uv_req_get_type(const uv_req_t* req) {
+ return req->type;
+}
+
+void* uv_req_get_data(const uv_req_t* req) {
+ return req->data;
+}
+
+void uv_req_set_data(uv_req_t* req, void* data) {
+ req->data = data;
+}
+
+size_t uv_stream_get_write_queue_size(const uv_stream_t* stream) {
+ return stream->write_queue_size;
+}
+
+size_t uv_udp_get_send_queue_size(const uv_udp_t* handle) {
+ return handle->send_queue_size;
+}
+
+size_t uv_udp_get_send_queue_count(const uv_udp_t* handle) {
+ return handle->send_queue_count;
+}
+
+uv_pid_t uv_process_get_pid(const uv_process_t* proc) {
+ return proc->pid;
+}
+
+uv_fs_type uv_fs_get_type(const uv_fs_t* req) {
+ return req->fs_type;
+}
+
+ssize_t uv_fs_get_result(const uv_fs_t* req) {
+ return req->result;
+}
+
+void* uv_fs_get_ptr(const uv_fs_t* req) {
+ return req->ptr;
+}
+
+const char* uv_fs_get_path(const uv_fs_t* req) {
+ return req->path;
+}
+
+uv_stat_t* uv_fs_get_statbuf(uv_fs_t* req) {
+ return &req->statbuf;
+}
+
+void* uv_loop_get_data(const uv_loop_t* loop) {
+ return loop->data;
+}
+
+void uv_loop_set_data(uv_loop_t* loop, void* data) {
+ loop->data = data;
+}
diff --git a/deps/uv/src/win/fs.c b/deps/uv/src/win/fs.c
index 11c7c13edd04d6..097b00e08d50d5 100644
--- a/deps/uv/src/win/fs.c
+++ b/deps/uv/src/win/fs.c
@@ -1785,7 +1785,7 @@ static void fs__symlink(uv_fs_t* req) {
}
if (req->fs.info.file_flags & UV_FS_SYMLINK_DIR)
- flags = SYMBOLIC_LINK_FLAG_DIRECTORY;
+ flags = SYMBOLIC_LINK_FLAG_DIRECTORY | uv__file_symlink_usermode_flag;
else
flags = uv__file_symlink_usermode_flag;
diff --git a/deps/uv/src/win/process.c b/deps/uv/src/win/process.c
index 764250e138c48d..cc06d9e22abcb6 100644
--- a/deps/uv/src/win/process.c
+++ b/deps/uv/src/win/process.c
@@ -1173,6 +1173,10 @@ int uv_spawn(uv_loop_t* loop,
static int uv__kill(HANDLE process_handle, int signum) {
+ if (signum < 0 || signum >= NSIG) {
+ return UV_EINVAL;
+ }
+
switch (signum) {
case SIGTERM:
case SIGKILL:
@@ -1237,8 +1241,15 @@ int uv_process_kill(uv_process_t* process, int signum) {
int uv_kill(int pid, int signum) {
int err;
- HANDLE process_handle = OpenProcess(PROCESS_TERMINATE |
- PROCESS_QUERY_INFORMATION, FALSE, pid);
+ HANDLE process_handle;
+
+ if (pid == 0) {
+ process_handle = GetCurrentProcess();
+ } else {
+ process_handle = OpenProcess(PROCESS_TERMINATE | PROCESS_QUERY_INFORMATION,
+ FALSE,
+ pid);
+ }
if (process_handle == NULL) {
err = GetLastError();
diff --git a/deps/uv/src/win/tcp.c b/deps/uv/src/win/tcp.c
index e63a63e7712af1..fd6efbaf891d64 100644
--- a/deps/uv/src/win/tcp.c
+++ b/deps/uv/src/win/tcp.c
@@ -747,10 +747,15 @@ static int uv_tcp_try_connect(uv_connect_t* req,
uv_connect_cb cb) {
uv_loop_t* loop = handle->loop;
const struct sockaddr* bind_addr;
+ struct sockaddr_storage converted;
BOOL success;
DWORD bytes;
int err;
+ err = uv__convert_to_localhost_if_unspecified(addr, &converted);
+ if (err)
+ return err;
+
if (handle->delayed_error) {
return handle->delayed_error;
}
@@ -782,12 +787,12 @@ static int uv_tcp_try_connect(uv_connect_t* req,
memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
success = handle->tcp.conn.func_connectex(handle->socket,
- addr,
- addrlen,
- NULL,
- 0,
- &bytes,
- &req->u.io.overlapped);
+ (const struct sockaddr*) &converted,
+ addrlen,
+ NULL,
+ 0,
+ &bytes,
+ &req->u.io.overlapped);
if (UV_SUCCEEDED_WITHOUT_IOCP(success)) {
/* Process the req without IOCP. */
diff --git a/deps/uv/src/win/udp.c b/deps/uv/src/win/udp.c
index 21348f3796926a..cd1d0e07b23cb9 100644
--- a/deps/uv/src/win/udp.c
+++ b/deps/uv/src/win/udp.c
@@ -923,10 +923,15 @@ int uv__udp_try_send(uv_udp_t* handle,
unsigned int addrlen) {
DWORD bytes;
const struct sockaddr* bind_addr;
+ struct sockaddr_storage converted;
int err;
assert(nbufs > 0);
+ err = uv__convert_to_localhost_if_unspecified(addr, &converted);
+ if (err)
+ return err;
+
/* Already sending a message.*/
if (handle->send_queue_count != 0)
return UV_EAGAIN;
@@ -948,7 +953,7 @@ int uv__udp_try_send(uv_udp_t* handle,
nbufs,
&bytes,
0,
- addr,
+ (const struct sockaddr*) &converted,
addrlen,
NULL,
NULL);
diff --git a/deps/uv/src/win/winsock.c b/deps/uv/src/win/winsock.c
index e86d76b131caa4..7cfa90f8af5127 100644
--- a/deps/uv/src/win/winsock.c
+++ b/deps/uv/src/win/winsock.c
@@ -559,3 +559,31 @@ int WSAAPI uv_msafd_poll(SOCKET socket, AFD_POLL_INFO* info_in,
return SOCKET_ERROR;
}
}
+
+int uv__convert_to_localhost_if_unspecified(const struct sockaddr* addr,
+ struct sockaddr_storage* storage) {
+ struct sockaddr_in* dest4;
+ struct sockaddr_in6* dest6;
+
+ if (addr == NULL)
+ return UV_EINVAL;
+
+ switch (addr->sa_family) {
+ case AF_INET:
+ dest4 = (struct sockaddr_in*) storage;
+ memcpy(dest4, addr, sizeof(*dest4));
+ if (dest4->sin_addr.s_addr == 0)
+ dest4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ return 0;
+ case AF_INET6:
+ dest6 = (struct sockaddr_in6*) storage;
+ memcpy(dest6, addr, sizeof(*dest6));
+ if (memcmp(&dest6->sin6_addr,
+ &uv_addr_ip6_any_.sin6_addr,
+ sizeof(uv_addr_ip6_any_.sin6_addr)) == 0)
+ dest6->sin6_addr = (struct in6_addr) IN6ADDR_LOOPBACK_INIT;
+ return 0;
+ default:
+ return UV_EINVAL;
+ }
+}
diff --git a/deps/uv/src/win/winsock.h b/deps/uv/src/win/winsock.h
index 7c007ab4934608..7ecb755bfb061b 100644
--- a/deps/uv/src/win/winsock.h
+++ b/deps/uv/src/win/winsock.h
@@ -187,4 +187,7 @@ typedef struct _IP_ADAPTER_UNICAST_ADDRESS_LH {
#endif
+int uv__convert_to_localhost_if_unspecified(const struct sockaddr* addr,
+ struct sockaddr_storage* storage);
+
#endif /* UV_WIN_WINSOCK_H_ */
diff --git a/deps/uv/test/task.h b/deps/uv/test/task.h
index 67eb9804926824..af99d92fb45414 100644
--- a/deps/uv/test/task.h
+++ b/deps/uv/test/task.h
@@ -209,7 +209,7 @@ UNUSED static int can_ipv6(void) {
return supported;
}
-#if defined(__MVS__) || defined(__CYGWIN__) || defined(__MSYS__)
+#if defined(__CYGWIN__) || defined(__MSYS__)
# define NO_FS_EVENTS "Filesystem watching not supported on this platform."
#endif
diff --git a/deps/uv/test/test-connect-unspecified.c b/deps/uv/test/test-connect-unspecified.c
new file mode 100644
index 00000000000000..04e1c8a5f7c682
--- /dev/null
+++ b/deps/uv/test/test-connect-unspecified.c
@@ -0,0 +1,61 @@
+/* Copyright libuv project contributors. All rights reserved.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy
+* of this software and associated documentation files (the "Software"), to
+* deal in the Software without restriction, including without limitation the
+* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+* sell copies of the Software, and to permit persons to whom the Software is
+* furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+* IN THE SOFTWARE.
+*/
+
+#include "uv.h"
+#include "task.h"
+
+static void connect_4(uv_connect_t* req, int status) {
+ ASSERT(status != UV_EADDRNOTAVAIL);
+}
+
+static void connect_6(uv_connect_t* req, int status) {
+ ASSERT(status != UV_EADDRNOTAVAIL);
+}
+
+TEST_IMPL(connect_unspecified) {
+ uv_loop_t* loop;
+ uv_tcp_t socket4;
+ struct sockaddr_in addr4;
+ uv_connect_t connect4;
+ uv_tcp_t socket6;
+ struct sockaddr_in6 addr6;
+ uv_connect_t connect6;
+
+ loop = uv_default_loop();
+
+ ASSERT(uv_tcp_init(loop, &socket4) == 0);
+ ASSERT(uv_ip4_addr("0.0.0.0", TEST_PORT, &addr4) == 0);
+ ASSERT(uv_tcp_connect(&connect4,
+ &socket4,
+ (const struct sockaddr*) &addr4,
+ connect_4) == 0);
+
+ ASSERT(uv_tcp_init(loop, &socket6) == 0);
+ ASSERT(uv_ip6_addr("::", TEST_PORT, &addr6) == 0);
+ ASSERT(uv_tcp_connect(&connect6,
+ &socket6,
+ (const struct sockaddr*) &addr6,
+ connect_6) == 0);
+
+ ASSERT(uv_run(loop, UV_RUN_DEFAULT) == 0);
+
+ return 0;
+}
diff --git a/deps/uv/test/test-fork.c b/deps/uv/test/test-fork.c
index ba85b531064ae5..924c65b2141134 100644
--- a/deps/uv/test/test-fork.c
+++ b/deps/uv/test/test-fork.c
@@ -533,10 +533,12 @@ TEST_IMPL(fork_fs_events_file_parent_child) {
#if defined(NO_FS_EVENTS)
RETURN_SKIP(NO_FS_EVENTS);
#endif
-#if defined(__sun) || defined(_AIX)
+#if defined(__sun) || defined(_AIX) || defined(__MVS__)
/* It's not possible to implement this without additional
* bookkeeping on SunOS. For AIX it is possible, but has to be
* written. See https://github.com/libuv/libuv/pull/846#issuecomment-287170420
+ * TODO: On z/OS, we need to open another message queue and subscribe to the
+ * same events as the parent.
*/
return 0;
#else
diff --git a/deps/uv/test/test-fs-event.c b/deps/uv/test/test-fs-event.c
index fba6b5440b0fc3..dc47b3a62d2399 100644
--- a/deps/uv/test/test-fs-event.c
+++ b/deps/uv/test/test-fs-event.c
@@ -396,6 +396,8 @@ static void timer_cb_watch_twice(uv_timer_t* handle) {
TEST_IMPL(fs_event_watch_dir) {
#if defined(NO_FS_EVENTS)
RETURN_SKIP(NO_FS_EVENTS);
+#elif defined(__MVS__)
+ RETURN_SKIP("Directory watching not supported on this platform.");
#endif
uv_loop_t* loop = uv_default_loop();
@@ -820,6 +822,8 @@ static void fs_event_cb_close(uv_fs_event_t* handle, const char* filename,
TEST_IMPL(fs_event_close_in_callback) {
#if defined(NO_FS_EVENTS)
RETURN_SKIP(NO_FS_EVENTS);
+#elif defined(__MVS__)
+ RETURN_SKIP("Directory watching not supported on this platform.");
#endif
uv_loop_t* loop;
int r;
diff --git a/deps/uv/test/test-fs.c b/deps/uv/test/test-fs.c
index cae02dd1fddec8..7c481f0711978f 100644
--- a/deps/uv/test/test-fs.c
+++ b/deps/uv/test/test-fs.c
@@ -1861,7 +1861,7 @@ TEST_IMPL(fs_symlink) {
}
-TEST_IMPL(fs_symlink_dir) {
+int test_symlink_dir_impl(int type) {
uv_fs_t req;
int r;
char* test_dir;
@@ -1895,8 +1895,12 @@ TEST_IMPL(fs_symlink_dir) {
test_dir = "test_dir";
#endif
- r = uv_fs_symlink(NULL, &req, test_dir, "test_dir_symlink",
- UV_FS_SYMLINK_JUNCTION, NULL);
+ r = uv_fs_symlink(NULL, &req, test_dir, "test_dir_symlink", type, NULL);
+ if (type == UV_FS_SYMLINK_DIR && (r == UV_ENOTSUP || r == UV_EPERM)) {
+ uv_fs_req_cleanup(&req);
+ RETURN_SKIP("this version of Windows doesn't support unprivileged "
+ "creation of directory symlinks");
+ }
fprintf(stderr, "r == %i\n", r);
ASSERT(r == 0);
ASSERT(req.result == 0);
@@ -2005,6 +2009,13 @@ TEST_IMPL(fs_symlink_dir) {
return 0;
}
+TEST_IMPL(fs_symlink_dir) {
+ return test_symlink_dir_impl(UV_FS_SYMLINK_DIR);
+}
+
+TEST_IMPL(fs_symlink_junction) {
+ return test_symlink_dir_impl(UV_FS_SYMLINK_JUNCTION);
+}
#ifdef _WIN32
TEST_IMPL(fs_non_symlink_reparse_point) {
diff --git a/deps/uv/test/test-getters-setters.c b/deps/uv/test/test-getters-setters.c
new file mode 100644
index 00000000000000..60a1b9264da179
--- /dev/null
+++ b/deps/uv/test/test-getters-setters.c
@@ -0,0 +1,88 @@
+#include "uv.h"
+#include "task.h"
+#include
+#include
+
+int cookie1;
+int cookie2;
+int cookie3;
+
+
+TEST_IMPL(handle_type_name) {
+ ASSERT(strcmp(uv_handle_type_name(UV_NAMED_PIPE), "pipe") == 0);
+ ASSERT(strcmp(uv_handle_type_name(UV_UDP), "udp") == 0);
+ ASSERT(strcmp(uv_handle_type_name(UV_FILE), "file") == 0);
+ ASSERT(uv_handle_type_name(UV_HANDLE_TYPE_MAX) == NULL);
+ ASSERT(uv_handle_type_name(UV_HANDLE_TYPE_MAX + 1) == NULL);
+ ASSERT(uv_handle_type_name(UV_UNKNOWN_HANDLE) == NULL);
+ return 0;
+}
+
+
+TEST_IMPL(req_type_name) {
+ ASSERT(strcmp(uv_req_type_name(UV_REQ), "req") == 0);
+ ASSERT(strcmp(uv_req_type_name(UV_UDP_SEND), "udp_send") == 0);
+ ASSERT(strcmp(uv_req_type_name(UV_WORK), "work") == 0);
+ ASSERT(uv_req_type_name(UV_REQ_TYPE_MAX) == NULL);
+ ASSERT(uv_req_type_name(UV_REQ_TYPE_MAX + 1) == NULL);
+ ASSERT(uv_req_type_name(UV_UNKNOWN_REQ) == NULL);
+ return 0;
+}
+
+
+TEST_IMPL(getters_setters) {
+ uv_loop_t* loop;
+ uv_pipe_t* pipe;
+ uv_fs_t* fs;
+ int r;
+
+ loop = malloc(uv_loop_size());
+ ASSERT(loop != NULL);
+ r = uv_loop_init(loop);
+ ASSERT(r == 0);
+
+ uv_loop_set_data(loop, &cookie1);
+ ASSERT(loop->data == &cookie1);
+ ASSERT(uv_loop_get_data(loop) == &cookie1);
+
+ pipe = malloc(uv_handle_size(UV_NAMED_PIPE));
+ r = uv_pipe_init(loop, pipe, 0);
+ ASSERT(uv_handle_get_type((uv_handle_t*)pipe) == UV_NAMED_PIPE);
+
+ ASSERT(uv_handle_get_loop((uv_handle_t*)pipe) == loop);
+ pipe->data = &cookie2;
+ ASSERT(uv_handle_get_data((uv_handle_t*)pipe) == &cookie2);
+ uv_handle_set_data((uv_handle_t*)pipe, &cookie1);
+ ASSERT(uv_handle_get_data((uv_handle_t*)pipe) == &cookie1);
+ ASSERT(pipe->data == &cookie1);
+
+ ASSERT(uv_stream_get_write_queue_size((uv_stream_t*)pipe) == 0);
+ pipe->write_queue_size++;
+ ASSERT(uv_stream_get_write_queue_size((uv_stream_t*)pipe) == 1);
+ pipe->write_queue_size--;
+ uv_close((uv_handle_t*)pipe, NULL);
+
+ r = uv_run(loop, UV_RUN_DEFAULT);
+ ASSERT(r == 0);
+
+ fs = malloc(uv_req_size(UV_FS));
+ uv_fs_stat(loop, fs, ".", NULL);
+
+ r = uv_run(loop, UV_RUN_DEFAULT);
+ ASSERT(r == 0);
+
+ ASSERT(uv_fs_get_type(fs) == UV_FS_STAT);
+ ASSERT(uv_fs_get_result(fs) == 0);
+ ASSERT(uv_fs_get_ptr(fs) == uv_fs_get_statbuf(fs));
+ ASSERT(uv_fs_get_statbuf(fs)->st_mode & S_IFDIR);
+ ASSERT(strcmp(uv_fs_get_path(fs), ".") == 0);
+ uv_fs_req_cleanup(fs);
+
+ r = uv_loop_close(loop);
+ ASSERT(r == 0);
+
+ free(pipe);
+ free(fs);
+ free(loop);
+ return 0;
+}
diff --git a/deps/uv/test/test-list.h b/deps/uv/test/test-list.h
index 2adbe6a017cfc3..5a50ec6713f03f 100644
--- a/deps/uv/test/test-list.h
+++ b/deps/uv/test/test-list.h
@@ -28,6 +28,7 @@ TEST_DECLARE (run_once)
TEST_DECLARE (run_nowait)
TEST_DECLARE (loop_alive)
TEST_DECLARE (loop_close)
+TEST_DECLARE (loop_instant_close)
TEST_DECLARE (loop_stop)
TEST_DECLARE (loop_update_time)
TEST_DECLARE (loop_backend_timeout)
@@ -54,6 +55,7 @@ TEST_DECLARE (tty_file)
TEST_DECLARE (tty_pty)
TEST_DECLARE (stdio_over_pipes)
TEST_DECLARE (ip6_pton)
+TEST_DECLARE (connect_unspecified)
TEST_DECLARE (ipc_listen_before_write)
TEST_DECLARE (ipc_listen_after_write)
#ifndef _WIN32
@@ -214,6 +216,7 @@ TEST_DECLARE (async_null_cb)
TEST_DECLARE (eintr_handling)
TEST_DECLARE (get_currentexe)
TEST_DECLARE (process_title)
+TEST_DECLARE (process_title_threadsafe)
TEST_DECLARE (cwd_and_chdir)
TEST_DECLARE (get_memory)
TEST_DECLARE (get_passwd)
@@ -264,6 +267,7 @@ TEST_DECLARE (spawn_tcp_server)
TEST_DECLARE (fs_poll)
TEST_DECLARE (fs_poll_getpath)
TEST_DECLARE (kill)
+TEST_DECLARE (kill_invalid_signum)
TEST_DECLARE (fs_file_noent)
TEST_DECLARE (fs_file_nametoolong)
TEST_DECLARE (fs_file_loop)
@@ -285,6 +289,7 @@ TEST_DECLARE (fs_realpath)
TEST_DECLARE (fs_symlink)
TEST_DECLARE (fs_symlink_dir)
#ifdef _WIN32
+TEST_DECLARE (fs_symlink_junction)
TEST_DECLARE (fs_non_symlink_reparse_point)
#endif
TEST_DECLARE (fs_utime)
@@ -397,6 +402,10 @@ HELPER_DECLARE (pipe_echo_server)
TEST_DECLARE (queue_foreach_delete)
+TEST_DECLARE (handle_type_name)
+TEST_DECLARE (req_type_name)
+TEST_DECLARE (getters_setters)
+
#ifndef _WIN32
TEST_DECLARE (fork_timer)
TEST_DECLARE (fork_socketpair)
@@ -422,6 +431,7 @@ TASK_LIST_START
TEST_ENTRY (run_nowait)
TEST_ENTRY (loop_alive)
TEST_ENTRY (loop_close)
+ TEST_ENTRY (loop_instant_close)
TEST_ENTRY (loop_stop)
TEST_ENTRY (loop_update_time)
TEST_ENTRY (loop_backend_timeout)
@@ -459,6 +469,7 @@ TASK_LIST_START
TEST_ENTRY (tty_pty)
TEST_ENTRY (stdio_over_pipes)
TEST_ENTRY (ip6_pton)
+ TEST_ENTRY (connect_unspecified)
TEST_ENTRY (ipc_listen_before_write)
TEST_ENTRY (ipc_listen_after_write)
#ifndef _WIN32
@@ -668,6 +679,7 @@ TASK_LIST_START
TEST_ENTRY (get_currentexe)
TEST_ENTRY (process_title)
+ TEST_ENTRY (process_title_threadsafe)
TEST_ENTRY (cwd_and_chdir)
@@ -748,6 +760,7 @@ TASK_LIST_START
TEST_ENTRY (fs_poll)
TEST_ENTRY (fs_poll_getpath)
TEST_ENTRY (kill)
+ TEST_ENTRY (kill_invalid_signum)
TEST_ENTRY (poll_close_doesnt_corrupt_stack)
TEST_ENTRY (poll_closesocket)
@@ -803,6 +816,7 @@ TASK_LIST_START
TEST_ENTRY (fs_symlink)
TEST_ENTRY (fs_symlink_dir)
#ifdef _WIN32
+ TEST_ENTRY (fs_symlink_junction)
TEST_ENTRY (fs_non_symlink_reparse_point)
#endif
TEST_ENTRY (fs_stat_missing_path)
@@ -843,14 +857,7 @@ TASK_LIST_START
TEST_ENTRY (get_osfhandle_valid_handle)
TEST_ENTRY (threadpool_queue_work_simple)
TEST_ENTRY (threadpool_queue_work_einval)
-#if defined(__PPC__) || defined(__PPC64__) /* For linux PPC and AIX */
- /* pthread_join takes a while, especially on AIX.
- * Therefore being gratuitous with timeout.
- */
- TEST_ENTRY_CUSTOM (threadpool_multiple_event_loops, 0, 0, 120000)
-#else
TEST_ENTRY (threadpool_multiple_event_loops)
-#endif
TEST_ENTRY (threadpool_cancel_getaddrinfo)
TEST_ENTRY (threadpool_cancel_getnameinfo)
TEST_ENTRY (threadpool_cancel_work)
@@ -870,6 +877,10 @@ TASK_LIST_START
TEST_ENTRY (queue_foreach_delete)
+ TEST_ENTRY (handle_type_name)
+ TEST_ENTRY (req_type_name)
+ TEST_ENTRY (getters_setters)
+
#ifndef _WIN32
TEST_ENTRY (fork_timer)
TEST_ENTRY (fork_socketpair)
diff --git a/deps/uv/test/test-loop-close.c b/deps/uv/test/test-loop-close.c
index 971c9d725bec02..f0f3e627f971e2 100644
--- a/deps/uv/test/test-loop-close.c
+++ b/deps/uv/test/test-loop-close.c
@@ -55,3 +55,21 @@ TEST_IMPL(loop_close) {
return 0;
}
+
+static void loop_instant_close_work_cb(uv_work_t* req) {
+}
+
+static void loop_instant_close_after_work_cb(uv_work_t* req, int status) {
+}
+
+TEST_IMPL(loop_instant_close) {
+ static uv_loop_t loop;
+ static uv_work_t req;
+ ASSERT(0 == uv_loop_init(&loop));
+ ASSERT(0 == uv_queue_work(&loop,
+ &req,
+ loop_instant_close_work_cb,
+ loop_instant_close_after_work_cb));
+ MAKE_VALGRIND_HAPPY();
+ return 0;
+}
diff --git a/deps/uv/test/test-ping-pong.c b/deps/uv/test/test-ping-pong.c
index bdc967151ed8f2..508f0db67bcf77 100644
--- a/deps/uv/test/test-ping-pong.c
+++ b/deps/uv/test/test-ping-pong.c
@@ -27,7 +27,7 @@
static int completed_pingers = 0;
-#if defined(__CYGWIN__) || defined(__MSYS__)
+#if defined(__CYGWIN__) || defined(__MSYS__) || defined(__MVS__)
#define NUM_PINGS 100 /* fewer pings to avoid timeout */
#else
#define NUM_PINGS 1000
diff --git a/deps/uv/test/test-process-title-threadsafe.c b/deps/uv/test/test-process-title-threadsafe.c
new file mode 100644
index 00000000000000..d986576ed93c02
--- /dev/null
+++ b/deps/uv/test/test-process-title-threadsafe.c
@@ -0,0 +1,90 @@
+/* Copyright libuv project contributors. All rights reserved.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy
+* of this software and associated documentation files (the "Software"), to
+* deal in the Software without restriction, including without limitation the
+* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+* sell copies of the Software, and to permit persons to whom the Software is
+* furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+* IN THE SOFTWARE.
+*/
+
+
+#include "uv.h"
+#include "task.h"
+
+#include
+
+#ifdef __APPLE__
+# define NUM_ITERATIONS 20
+#else
+# define NUM_ITERATIONS 50
+#endif
+
+static const char* titles[] = {
+ "8L2NY0Kdj0XyNFZnmUZigIOfcWjyNr0SkMmUhKw99VLUsZFrvCQQC3XIRfNR8pjyMjXObllled",
+ "jUAcscJN49oLSN8GdmXj2Wo34XX2T2vp2j5khfajNQarlOulp57cE130yiY53ipJFnPyTn5i82",
+ "9niCI5icXGFS72XudhXqo5alftmZ1tpE7B3cwUmrq0CCDjC84FzBNB8XAHqvpNQfI2QAQG6ztT",
+ "n8qXVXuG6IEHDpabJgTEiwtpY6LHMZ8MgznnMpdHARu5EywufA6hcBaQfetb0YhEsK0ykDd7JU"
+};
+
+static void getter_thread_body(void* arg) {
+ char buffer[512];
+
+ for (;;) {
+ ASSERT(0 == uv_get_process_title(buffer, sizeof(buffer)));
+ ASSERT(
+ 0 == strcmp(buffer, titles[0]) ||
+ 0 == strcmp(buffer, titles[1]) ||
+ 0 == strcmp(buffer, titles[2]) ||
+ 0 == strcmp(buffer, titles[3]));
+
+ uv_sleep(0);
+ }
+}
+
+
+static void setter_thread_body(void* arg) {
+ int i;
+
+ for (i = 0; i < NUM_ITERATIONS; i++) {
+ ASSERT(0 == uv_set_process_title(titles[0]));
+ ASSERT(0 == uv_set_process_title(titles[1]));
+ ASSERT(0 == uv_set_process_title(titles[2]));
+ ASSERT(0 == uv_set_process_title(titles[3]));
+ }
+}
+
+
+TEST_IMPL(process_title_threadsafe) {
+ uv_thread_t setter_threads[4];
+ uv_thread_t getter_thread;
+ int i;
+
+#if defined(__sun) || defined(__CYGWIN__) || defined(__MSYS__) || \
+ defined(__MVS__)
+ RETURN_SKIP("uv_(get|set)_process_title is not implemented.");
+#else
+
+ ASSERT(0 == uv_set_process_title(titles[0]));
+ ASSERT(0 == uv_thread_create(&getter_thread, getter_thread_body, NULL));
+
+ for (i = 0; i < (int) ARRAY_SIZE(setter_threads); i++)
+ ASSERT(0 == uv_thread_create(&setter_threads[i], setter_thread_body, NULL));
+
+ for (i = 0; i < (int) ARRAY_SIZE(setter_threads); i++)
+ ASSERT(0 == uv_thread_join(&setter_threads[i]));
+
+ return 0;
+#endif
+}
diff --git a/deps/uv/test/test-signal.c b/deps/uv/test/test-signal.c
index 9a881510c72151..c2ce5ec0e0a85e 100644
--- a/deps/uv/test/test-signal.c
+++ b/deps/uv/test/test-signal.c
@@ -22,6 +22,26 @@
#include "uv.h"
#include "task.h"
+#ifndef _WIN32
+#include
+#endif
+
+TEST_IMPL(kill_invalid_signum) {
+ uv_pid_t pid;
+
+ pid = uv_os_getpid();
+
+ ASSERT(uv_kill(pid, -1) == UV_EINVAL);
+#ifdef _WIN32
+ /* NSIG is not available on all platforms. */
+ ASSERT(uv_kill(pid, NSIG) == UV_EINVAL);
+#endif
+ ASSERT(uv_kill(pid, 4096) == UV_EINVAL);
+
+ MAKE_VALGRIND_HAPPY();
+ return 0;
+}
+
/* For Windows we test only signum handling */
#ifdef _WIN32
static void signum_test_cb(uv_signal_t* handle, int signum) {
diff --git a/deps/uv/test/test-spawn.c b/deps/uv/test/test-spawn.c
index 4b138265a5bc51..4a2869a18afa43 100644
--- a/deps/uv/test/test-spawn.c
+++ b/deps/uv/test/test-spawn.c
@@ -92,7 +92,7 @@ static void kill_cb(uv_process_t* process,
#else
ASSERT(exit_status == 0);
#endif
-#if defined(__APPLE__)
+#if defined(__APPLE__) || defined(__MVS__)
/*
* At least starting with Darwin Kernel Version 16.4.0, sending a SIGTERM to a
* process that is still starting up kills it with SIGKILL instead of SIGTERM.
@@ -805,6 +805,8 @@ TEST_IMPL(spawn_detached) {
ASSERT(exit_cb_called == 0);
+ ASSERT(process.pid == uv_process_get_pid(&process));
+
r = uv_kill(process.pid, 0);
ASSERT(r == 0);
@@ -1560,9 +1562,6 @@ TEST_IMPL(spawn_fs_open) {
#ifndef _WIN32
TEST_IMPL(closed_fd_events) {
-#if defined(__MVS__)
- RETURN_SKIP("Filesystem watching not supported on this platform.");
-#endif
uv_stdio_container_t stdio[3];
uv_pipe_t pipe_handle;
int fd[2];
diff --git a/deps/uv/test/test-udp-multicast-interface.c b/deps/uv/test/test-udp-multicast-interface.c
index 71001a77e03e18..0b3c0e62da559f 100644
--- a/deps/uv/test/test-udp-multicast-interface.c
+++ b/deps/uv/test/test-udp-multicast-interface.c
@@ -44,7 +44,7 @@ static void close_cb(uv_handle_t* handle) {
static void sv_send_cb(uv_udp_send_t* req, int status) {
ASSERT(req != NULL);
- ASSERT(status == 0 || status == UV_ENETUNREACH);
+ ASSERT(status == 0 || status == UV_ENETUNREACH || status == UV_EPERM);
CHECK_HANDLE(req->handle);
sv_send_cb_called++;
diff --git a/deps/uv/test/test-udp-multicast-ttl.c b/deps/uv/test/test-udp-multicast-ttl.c
index 7f1af9b9dd9bd2..e92608be4809bf 100644
--- a/deps/uv/test/test-udp-multicast-ttl.c
+++ b/deps/uv/test/test-udp-multicast-ttl.c
@@ -44,7 +44,7 @@ static void close_cb(uv_handle_t* handle) {
static void sv_send_cb(uv_udp_send_t* req, int status) {
ASSERT(req != NULL);
- ASSERT(status == 0 || status == UV_ENETUNREACH);
+ ASSERT(status == 0 || status == UV_ENETUNREACH || status == UV_EPERM);
CHECK_HANDLE(req->handle);
sv_send_cb_called++;
diff --git a/deps/uv/test/test-udp-send-hang-loop.c b/deps/uv/test/test-udp-send-hang-loop.c
index 6253ff7a4134c0..bf4dfebfb35098 100644
--- a/deps/uv/test/test-udp-send-hang-loop.c
+++ b/deps/uv/test/test-udp-send-hang-loop.c
@@ -67,7 +67,7 @@ static void idle_cb(uv_idle_t* handle) {
static void send_cb(uv_udp_send_t* req, int status) {
ASSERT(req != NULL);
- ASSERT(status == 0);
+ ASSERT(status == 0 || status == UV_ENETUNREACH);
CHECK_OBJECT(req->handle, uv_udp_t, client);
CHECK_OBJECT(req, uv_udp_send_t, send_req);
req->handle = NULL;
diff --git a/deps/uv/uv.gyp b/deps/uv/uv.gyp
index 96fb801a77b034..46606c5bda868d 100644
--- a/deps/uv/uv.gyp
+++ b/deps/uv/uv.gyp
@@ -78,6 +78,7 @@
'src/inet.c',
'src/queue.h',
'src/threadpool.c',
+ 'src/uv-data-getter-setters.c',
'src/uv-common.c',
'src/uv-common.h',
'src/version.c'
@@ -339,7 +340,6 @@
['OS=="zos"', {
'sources': [
'src/unix/pthread-fixes.c',
- 'src/unix/no-fsevents.c',
'src/unix/os390.c',
'src/unix/os390-syscalls.c'
]
@@ -366,6 +366,7 @@
'test/test-callback-order.c',
'test/test-close-fd.c',
'test/test-close-order.c',
+ 'test/test-connect-unspecified.c',
'test/test-connection-fail.c',
'test/test-cwd-and-chdir.c',
'test/test-default-loop-close.c',
@@ -380,6 +381,7 @@
'test/test-fs.c',
'test/test-fs-copyfile.c',
'test/test-fs-event.c',
+ 'test/test-getters-setters.c',
'test/test-get-currentexe.c',
'test/test-get-memory.c',
'test/test-get-passwd.c',
@@ -425,6 +427,7 @@
'test/test-poll-closesocket.c',
'test/test-poll-oob.c',
'test/test-process-title.c',
+ 'test/test-process-title-threadsafe.c',
'test/test-queue-foreach-delete.c',
'test/test-ref.c',
'test/test-run-nowait.c',
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index f555dbdbe0e5a7..1788eba51c8fe6 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -3634,6 +3634,7 @@ void ParserBase::ParseFormalParameter(FormalParametersT* parameters,
// BindingElement[?Yield, ?GeneratorParameter]
bool is_rest = parameters->has_rest;
+ FuncNameInferrer::State fni_state(fni_);
ExpressionT pattern = ParsePrimaryExpression(CHECK_OK_CUSTOM(Void));
ValidateBindingPattern(CHECK_OK_CUSTOM(Void));
diff --git a/deps/v8/test/message/fail/func-name-inferrer-arg-1.js b/deps/v8/test/message/fail/func-name-inferrer-arg-1.js
new file mode 100644
index 00000000000000..6c28367d921433
--- /dev/null
+++ b/deps/v8/test/message/fail/func-name-inferrer-arg-1.js
@@ -0,0 +1,10 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function (param = function() { throw new Error('boom') }) {
+ (() => {
+ param();
+ })();
+
+})();
diff --git a/deps/v8/test/message/fail/func-name-inferrer-arg-1.out b/deps/v8/test/message/fail/func-name-inferrer-arg-1.out
new file mode 100644
index 00000000000000..3c19121a0a6efa
--- /dev/null
+++ b/deps/v8/test/message/fail/func-name-inferrer-arg-1.out
@@ -0,0 +1,8 @@
+*%(basename)s:5: Error: boom
+(function (param = function() { throw new Error('boom') }) {
+ ^
+Error: boom
+ at param (*%(basename)s:5:39)
+ at *%(basename)s:7:5
+ at *%(basename)s:8:5
+ at *%(basename)s:10:3
\ No newline at end of file
diff --git a/deps/v8/test/message/fail/func-name-inferrer-arg.js b/deps/v8/test/message/fail/func-name-inferrer-arg.js
new file mode 100644
index 00000000000000..3fcd044b9b1831
--- /dev/null
+++ b/deps/v8/test/message/fail/func-name-inferrer-arg.js
@@ -0,0 +1,10 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function (param) {
+ (() => {
+ throw new Error('boom');
+ })();
+
+})();
diff --git a/deps/v8/test/message/fail/func-name-inferrer-arg.out b/deps/v8/test/message/fail/func-name-inferrer-arg.out
new file mode 100644
index 00000000000000..06e001d1d5e641
--- /dev/null
+++ b/deps/v8/test/message/fail/func-name-inferrer-arg.out
@@ -0,0 +1,7 @@
+*%(basename)s:7: Error: boom
+ throw new Error('boom');
+ ^
+Error: boom
+ at *%(basename)s:7:11
+ at *%(basename)s:8:5
+ at *%(basename)s:10:3
\ No newline at end of file
diff --git a/doc/STYLE_GUIDE.md b/doc/STYLE_GUIDE.md
index 8ea9c8fe81b732..7fc2d6e0b1bb13 100644
--- a/doc/STYLE_GUIDE.md
+++ b/doc/STYLE_GUIDE.md
@@ -3,7 +3,7 @@
* Documentation is written in markdown files with names formatted as
`lowercase-with-dashes.md`.
* Underscores in filenames are allowed only when they are present in the
- topic the document will describe (e.g., `child_process`).
+ topic the document will describe (e.g. `child_process`).
* Some files, such as top-level markdown files, are exceptions.
* Documents should be word-wrapped at 80 characters.
* The formatting described in `.editorconfig` is preferred.
diff --git a/doc/api/addons.md b/doc/api/addons.md
index e2df5f30e9a32a..c6802530f6dc67 100644
--- a/doc/api/addons.md
+++ b/doc/api/addons.md
@@ -221,7 +221,7 @@ illustration of how it can be used.
> Stability: 1 - Experimental
N-API is an API for building native Addons. It is independent from
-the underlying JavaScript runtime (e.g., V8) and is maintained as part of
+the underlying JavaScript runtime (e.g. V8) and is maintained as part of
Node.js itself. This API will be Application Binary Interface (ABI) stable
across version of Node.js. It is intended to insulate Addons from
changes in the underlying JavaScript engine and allow modules
diff --git a/doc/api/assert.md b/doc/api/assert.md
index f4654bf67eae3f..3df420cab64ff1 100644
--- a/doc/api/assert.md
+++ b/doc/api/assert.md
@@ -375,6 +375,8 @@ argument in callbacks.
```js
const assert = require('assert');
+assert.ifError(null);
+// OK
assert.ifError(0);
// OK
assert.ifError(1);
diff --git a/doc/api/async_hooks.md b/doc/api/async_hooks.md
index e8cb9344c4c123..4fa23f28d116b0 100644
--- a/doc/api/async_hooks.md
+++ b/doc/api/async_hooks.md
@@ -244,10 +244,10 @@ RANDOMBYTESREQUEST, TLSWRAP, Timeout, Immediate, TickObject
There is also the `PROMISE` resource type, which is used to track `Promise`
instances and asynchronous work scheduled by them.
-Users are be able to define their own `type` when using the public embedder API.
+Users are able to define their own `type` when using the public embedder API.
*Note:* It is possible to have type name collisions. Embedders are encouraged
-to use a unique prefixes, such as the npm package name, to prevent collisions
+to use unique prefixes, such as the npm package name, to prevent collisions
when listening to the hooks.
###### `triggerId`
@@ -470,6 +470,14 @@ init for PROMISE with id 6, trigger id: 5 # the Promise returned by then()
#### `async_hooks.executionAsyncId()`
+
+
* Returns: {number} The `asyncId` of the current execution context. Useful to
track when something calls.
@@ -484,7 +492,7 @@ fs.open(path, 'r', (err, fd) => {
});
```
-The ID returned fom `executionAsyncId()` is related to execution timing, not
+The ID returned from `executionAsyncId()` is related to execution timing, not
causality (which is covered by `triggerAsyncId()`). For example:
```js
diff --git a/doc/api/buffer.md b/doc/api/buffer.md
index 41b4e643055af2..1e5b23cb4b9982 100644
--- a/doc/api/buffer.md
+++ b/doc/api/buffer.md
@@ -1583,7 +1583,7 @@ endian format (`readDoubleBE()` returns big endian, `readDoubleLE()` returns
little endian).
Setting `noAssert` to `true` allows `offset` to be beyond the end of `buf`, but
-the result should be considered undefined behavior.
+the resulting behavior is undefined.
Examples:
@@ -1619,7 +1619,7 @@ endian format (`readFloatBE()` returns big endian, `readFloatLE()` returns
little endian).
Setting `noAssert` to `true` allows `offset` to be beyond the end of `buf`, but
-the result should be considered undefined behavior.
+the resulting behavior is undefined.
Examples:
@@ -1652,7 +1652,7 @@ added: v0.5.0
Reads a signed 8-bit integer from `buf` at the specified `offset`.
Setting `noAssert` to `true` allows `offset` to be beyond the end of `buf`, but
-the result should be considered undefined behavior.
+the resulting behavior is undefined.
Integers read from a `Buffer` are interpreted as two's complement signed values.
@@ -1686,7 +1686,7 @@ the specified endian format (`readInt16BE()` returns big endian,
`readInt16LE()` returns little endian).
Setting `noAssert` to `true` allows `offset` to be beyond the end of `buf`, but
-the result should be considered undefined behavior.
+the resulting behavior is undefined.
Integers read from a `Buffer` are interpreted as two's complement signed values.
@@ -1720,7 +1720,7 @@ the specified endian format (`readInt32BE()` returns big endian,
`readInt32LE()` returns little endian).
Setting `noAssert` to `true` allows `offset` to be beyond the end of `buf`, but
-the result should be considered undefined behavior.
+the resulting behavior is undefined.
Integers read from a `Buffer` are interpreted as two's complement signed values.
@@ -1755,7 +1755,7 @@ and interprets the result as a two's complement signed value. Supports up to 48
bits of accuracy.
Setting `noAssert` to `true` allows `offset` to be beyond the end of `buf`, but
-the result should be considered undefined behavior.
+the resulting behavior is undefined.
Examples:
@@ -1784,7 +1784,7 @@ added: v0.5.0
Reads an unsigned 8-bit integer from `buf` at the specified `offset`.
Setting `noAssert` to `true` allows `offset` to be beyond the end of `buf`, but
-the result should be considered undefined behavior.
+the resulting behavior is undefined.
Examples:
@@ -1816,7 +1816,7 @@ specified endian format (`readUInt16BE()` returns big endian, `readUInt16LE()`
returns little endian).
Setting `noAssert` to `true` allows `offset` to be beyond the end of `buf`, but
-the result should be considered undefined behavior.
+the resulting behavior is undefined.
Examples:
@@ -1854,7 +1854,7 @@ specified endian format (`readUInt32BE()` returns big endian,
`readUInt32LE()` returns little endian).
Setting `noAssert` to `true` allows `offset` to be beyond the end of `buf`, but
-the result should be considered undefined behavior.
+the resulting behavior is undefined.
Examples:
@@ -1887,7 +1887,7 @@ and interprets the result as an unsigned integer. Supports up to 48
bits of accuracy.
Setting `noAssert` to `true` allows `offset` to be beyond the end of `buf`, but
-the result should be considered undefined behavior.
+the resulting behavior is undefined.
Examples:
@@ -2225,7 +2225,7 @@ endian). `value` *should* be a valid 64-bit double. Behavior is undefined when
`value` is anything other than a 64-bit double.
Setting `noAssert` to `true` allows the encoded form of `value` to extend beyond
-the end of `buf`, but the result should be considered undefined behavior.
+the end of `buf`, but the resulting behavior is undefined.
Examples:
@@ -2260,7 +2260,7 @@ endian). `value` *should* be a valid 32-bit float. Behavior is undefined when
`value` is anything other than a 32-bit float.
Setting `noAssert` to `true` allows the encoded form of `value` to extend beyond
-the end of `buf`, but the result should be considered undefined behavior.
+the end of `buf`, but the resulting behavior is undefined.
Examples:
@@ -2293,7 +2293,7 @@ signed 8-bit integer. Behavior is undefined when `value` is anything other than
a signed 8-bit integer.
Setting `noAssert` to `true` allows the encoded form of `value` to extend beyond
-the end of `buf`, but the result should be considered undefined behavior.
+the end of `buf`, but the resulting behavior is undefined.
`value` is interpreted and written as a two's complement signed integer.
@@ -2326,7 +2326,7 @@ endian). `value` *should* be a valid signed 16-bit integer. Behavior is undefine
when `value` is anything other than a signed 16-bit integer.
Setting `noAssert` to `true` allows the encoded form of `value` to extend beyond
-the end of `buf`, but the result should be considered undefined behavior.
+the end of `buf`, but the resulting behavior is undefined.
`value` is interpreted and written as a two's complement signed integer.
@@ -2359,7 +2359,7 @@ endian). `value` *should* be a valid signed 32-bit integer. Behavior is undefine
when `value` is anything other than a signed 32-bit integer.
Setting `noAssert` to `true` allows the encoded form of `value` to extend beyond
-the end of `buf`, but the result should be considered undefined behavior.
+the end of `buf`, but the resulting behavior is undefined.
`value` is interpreted and written as a two's complement signed integer.
@@ -2393,7 +2393,7 @@ Supports up to 48 bits of accuracy. Behavior is undefined when `value` is
anything other than a signed integer.
Setting `noAssert` to `true` allows the encoded form of `value` to extend beyond
-the end of `buf`, but the result should be considered undefined behavior.
+the end of `buf`, but the resulting behavior is undefined.
Examples:
@@ -2426,7 +2426,7 @@ valid unsigned 8-bit integer. Behavior is undefined when `value` is anything
other than an unsigned 8-bit integer.
Setting `noAssert` to `true` allows the encoded form of `value` to extend beyond
-the end of `buf`, but the result should be considered undefined behavior.
+the end of `buf`, but the resulting behavior is undefined.
Examples:
@@ -2459,7 +2459,7 @@ endian). `value` should be a valid unsigned 16-bit integer. Behavior is
undefined when `value` is anything other than an unsigned 16-bit integer.
Setting `noAssert` to `true` allows the encoded form of `value` to extend beyond
-the end of `buf`, but the result should be considered undefined behavior.
+the end of `buf`, but the resulting behavior is undefined.
Examples:
@@ -2496,7 +2496,7 @@ endian). `value` should be a valid unsigned 32-bit integer. Behavior is
undefined when `value` is anything other than an unsigned 32-bit integer.
Setting `noAssert` to `true` allows the encoded form of `value` to extend beyond
-the end of `buf`, but the result should be considered undefined behavior.
+the end of `buf`, but the resulting behavior is undefined.
Examples:
@@ -2532,7 +2532,7 @@ Supports up to 48 bits of accuracy. Behavior is undefined when `value` is
anything other than an unsigned integer.
Setting `noAssert` to `true` allows the encoded form of `value` to extend beyond
-the end of `buf`, but the result should be considered undefined behavior.
+the end of `buf`, but the resulting behavior is undefined.
Examples:
diff --git a/doc/api/child_process.md b/doc/api/child_process.md
index 2f86d5156fc438..fcfd79150abf53 100644
--- a/doc/api/child_process.md
+++ b/doc/api/child_process.md
@@ -25,7 +25,7 @@ ls.on('close', (code) => {
});
```
-By default, pipes for `stdin`, `stdout` and `stderr` are established between
+By default, pipes for `stdin`, `stdout`, and `stderr` are established between
the parent Node.js process and the spawned child. It is possible to stream data
through these pipes in a non-blocking way. *Note, however, that some programs
use line-buffered I/O internally. While that does not affect Node.js, it can
@@ -170,7 +170,7 @@ exec('echo "The \\$HOME variable is $HOME"');
//The $HOME variable is escaped in the first instance, but not in the second
```
-*Note*: Never pass unsanitised user input to this function. Any input
+*Note*: Never pass unsanitized user input to this function. Any input
containing shell metacharacters may be used to trigger arbitrary command
execution.
@@ -418,7 +418,7 @@ The `child_process.spawn()` method spawns a new process using the given
`command`, with command line arguments in `args`. If omitted, `args` defaults
to an empty array.
-*Note*: If the `shell` option is enabled, do not pass unsanitised user input to
+*Note*: If the `shell` option is enabled, do not pass unsanitized user input to
this function. Any input containing shell metacharacters may be used to
trigger arbitrary command execution.
@@ -661,7 +661,7 @@ The [`child_process.spawnSync()`][], [`child_process.execSync()`][], and
the Node.js event loop, pausing execution of any additional code until the
spawned process exits.
-Blocking calls like these are mostly useful for simplifying general purpose
+Blocking calls like these are mostly useful for simplifying general-purpose
scripting tasks and for simplifying the loading/processing of application
configuration at startup.
@@ -715,7 +715,7 @@ completely exited.
does not exit, the parent process will still wait until the child process has
exited.
-If the process times out, or has a non-zero exit code, this method ***will***
+If the process times out or has a non-zero exit code, this method ***will***
throw an [`Error`][] that will include the full result of the underlying
[`child_process.spawnSync()`][].
@@ -767,11 +767,11 @@ exited. *Note that if the child process intercepts and handles the `SIGTERM`
signal and doesn't exit, the parent process will wait until the child
process has exited.*
-If the process times out, or has a non-zero exit code, this method ***will***
+If the process times out or has a non-zero exit code, this method ***will***
throw. The [`Error`][] object will contain the entire result from
[`child_process.spawnSync()`][]
-*Note*: Never pass unsanitised user input to this function. Any input
+*Note*: Never pass unsanitized user input to this function. Any input
containing shell metacharacters may be used to trigger arbitrary command
execution.
@@ -839,7 +839,7 @@ completely exited. Note that if the process intercepts and handles the
`SIGTERM` signal and doesn't exit, the parent process will wait until the child
process has exited.
-*Note*: If the `shell` option is enabled, do not pass unsanitised user input
+*Note*: If the `shell` option is enabled, do not pass unsanitized user input
to this function. Any input containing shell metacharacters may be used to
trigger arbitrary command execution.
@@ -1124,10 +1124,10 @@ process.send({ foo: 'bar', baz: NaN });
Child Node.js processes will have a [`process.send()`][] method of their own that
allows the child to send messages back to the parent.
-There is a special case when sending a `{cmd: 'NODE_foo'}` message. All messages
-containing a `NODE_` prefix in its `cmd` property are considered to be reserved
-for use within Node.js core and will not be emitted in the child's
-[`process.on('message')`][] event. Rather, such messages are emitted using the
+There is a special case when sending a `{cmd: 'NODE_foo'}` message. Messages
+containing a `NODE_` prefix in the `cmd` property are reserved for use within
+Node.js core and will not be emitted in the child's [`process.on('message')`][]
+event. Rather, such messages are emitted using the
`process.on('internalMessage')` event and are consumed internally by Node.js.
Applications should avoid using such messages or listening for
`'internalMessage'` events as it is subject to change without notice.
diff --git a/doc/api/cluster.md b/doc/api/cluster.md
index 016fdf6c185c68..8e165e2c33d7c8 100644
--- a/doc/api/cluster.md
+++ b/doc/api/cluster.md
@@ -711,6 +711,8 @@ changes:
* `exec` {string} File path to worker file. **Default:** `process.argv[1]`
* `args` {Array} String arguments passed to worker.
**Default:** `process.argv.slice(2)`
+ * `cwd` {string} Current working directory of the worker process. **Default:**
+ `undefined` (inherits from parent process)
* `silent` {boolean} Whether or not to send output to parent's stdio.
**Default:** `false`
* `stdio` {Array} Configures the stdio of forked processes. Because the
diff --git a/doc/api/crypto.md b/doc/api/crypto.md
index cc9838e9625bb5..f264f4ed20538e 100644
--- a/doc/api/crypto.md
+++ b/doc/api/crypto.md
@@ -1250,7 +1250,7 @@ password always creates the same key. The low iteration count and
non-cryptographically secure hash algorithm allow passwords to be tested very
rapidly.
-In line with OpenSSL's recommendation to use pbkdf2 instead of
+In line with OpenSSL's recommendation to use PBKDF2 instead of
[`EVP_BytesToKey`][] it is recommended that developers derive a key and IV on
their own using [`crypto.pbkdf2()`][] and to use [`crypto.createCipheriv()`][]
to create the `Cipher` object. Users should not use ciphers with counter mode
@@ -1312,7 +1312,7 @@ password always creates the same key. The low iteration count and
non-cryptographically secure hash algorithm allow passwords to be tested very
rapidly.
-In line with OpenSSL's recommendation to use pbkdf2 instead of
+In line with OpenSSL's recommendation to use PBKDF2 instead of
[`EVP_BytesToKey`][] it is recommended that developers derive a key and IV on
their own using [`crypto.pbkdf2()`][] and to use [`crypto.createDecipheriv()`][]
to create the `Decipher` object.
diff --git a/doc/api/dgram.md b/doc/api/dgram.md
index 0aa6a67668c32b..e44bf3eea5a166 100644
--- a/doc/api/dgram.md
+++ b/doc/api/dgram.md
@@ -161,7 +161,7 @@ added: v0.11.14
-->
* `options` {Object} Required. Supports the following properties:
- * `port` {Integer}
+ * `port` {integer}
* `address` {string}
* `exclusive` {boolean}
* `callback` {Function}
@@ -390,7 +390,7 @@ packets may be sent to a local interface's broadcast address.
added: v8.6.0
-->
-* `multicastInterface` {String}
+* `multicastInterface` {string}
*Note: All references to scope in this section are referring to
[IPv6 Zone Indices][], which are defined by [RFC 4007][]. In string form, an IP
diff --git a/doc/api/dns.md b/doc/api/dns.md
index 4ae44a211a0e06..c1ec1cfa51c41c 100644
--- a/doc/api/dns.md
+++ b/doc/api/dns.md
@@ -483,7 +483,7 @@ added: v0.1.27
Uses the DNS protocol to resolve text queries (`TXT` records) for the
`hostname`. The `records` argument passed to the `callback` function is a
-two-dimensional array of the text records available for `hostname` (e.g.,
+two-dimensional array of the text records available for `hostname` (e.g.
`[ ['v=spf1 ip4:0.0.0.0 ', '~all' ] ]`). Each sub-array contains TXT chunks of
one record. Depending on the use case, these could be either joined together or
treated separately.
diff --git a/doc/api/errors.md b/doc/api/errors.md
index 73fd1d52cbeedc..134f52265e0f6d 100644
--- a/doc/api/errors.md
+++ b/doc/api/errors.md
@@ -1265,6 +1265,24 @@ While using `N-API`, a constructor passed was not a function.
While using `N-API`, `Constructor.prototype` was not an object.
+
+### ERR_NAPI_INVALID_DATAVIEW_ARGS
+
+While calling `napi_create_dataview()`, a given `offset` was outside the bounds
+of the dataview or `offset + length` was larger than a length of given `buffer`.
+
+
+### ERR_NAPI_INVALID_TYPEDARRAY_ALIGNMENT
+
+While calling `napi_create_typedarray()`, the provided `offset` was not a
+multiple of the element size.
+
+
+### ERR_NAPI_INVALID_TYPEDARRAY_LENGTH
+
+While calling `napi_create_typedarray()`, `(length * size_of_element) +
+byte_offset` was larger than the length of given `buffer`.
+
### ERR_NO_CRYPTO
@@ -1284,11 +1302,6 @@ A Node.js API was called in an unsupported manner.
For example: `Buffer.write(string, encoding, offset[, length])`
-
-### ERR_OUTOFMEMORY
-
-An operation caused an out-of-memory condition.
-
### ERR_OUT_OF_RANGE
diff --git a/doc/api/esm.md b/doc/api/esm.md
index 08d3ea6b3060b9..b90927c0d57cec 100644
--- a/doc/api/esm.md
+++ b/doc/api/esm.md
@@ -33,14 +33,15 @@ node --experimental-modules my-app.mjs
### Supported
Only the CLI argument for the main entry point to the program can be an entry
-point into an ESM graph. Dynamic import can also be used with the flag
-`--harmony-dynamic-import` to create entry points into ESM graphs at run time.
+point into an ESM graph. In the future `import()` can be used to create entry
+points into ESM graphs at run time.
### Unsupported
| Feature | Reason |
| --- | --- |
-| `require('./foo.mjs')` | ES Modules have differing resolution and timing, use dynamic import |
+| `require('./foo.mjs')` | ES Modules have differing resolution and timing, use language standard `import()` |
+| `import()` | pending newer V8 release used in Node.js |
| `import.meta` | pending V8 implementation |
## Notable differences between `import` and `require`
@@ -132,7 +133,7 @@ module. This can be one of the following:
| `format` | Description |
| --- | --- |
| `"esm"` | Load a standard JavaScript module |
-| `"cjs"` | Load a node-style CommonJS module |
+| `"commonjs"` | Load a node-style CommonJS module |
| `"builtin"` | Load a node builtin CommonJS module |
| `"json"` | Load a JSON file |
| `"addon"` | Load a [C++ Addon][addons] |
diff --git a/doc/api/fs.md b/doc/api/fs.md
index e1dabde956c9d1..f26dc6efbd204b 100644
--- a/doc/api/fs.md
+++ b/doc/api/fs.md
@@ -843,7 +843,7 @@ changes:
description: The `file` parameter can be a file descriptor now.
-->
-* `file` {string|Buffer|number} filename or file descriptor
+* `file` {string|Buffer|URL|number} filename or file descriptor
* `data` {string|Buffer}
* `options` {Object|string}
* `encoding` {string|null} **Default:** `'utf8'`
@@ -898,7 +898,7 @@ changes:
description: The `file` parameter can be a file descriptor now.
-->
-* `file` {string|Buffer|number} filename or file descriptor
+* `file` {string|Buffer|URL|number} filename or file descriptor
* `data` {string|Buffer}
* `options` {Object|string}
* `encoding` {string|null} **Default:** `'utf8'`
@@ -1334,7 +1334,7 @@ deprecated: v1.0.0
* `path` {string|Buffer|URL}
* `callback` {Function}
- * `exists` {Boolean}
+ * `exists` {boolean}
Test whether or not the given path exists by checking with the file system.
Then call the `callback` argument with either true or false. Example:
@@ -1709,7 +1709,7 @@ changes:
it will emit a deprecation warning.
-->
-* `path` {string|Buffer}
+* `path` {string|Buffer|URL}
* `mode` {integer}
* `callback` {Function}
* `err` {Error}
@@ -1724,7 +1724,7 @@ Only available on macOS.
deprecated: v0.4.7
-->
-* `path` {string|Buffer}
+* `path` {string|Buffer|URL}
* `mode` {integer}
Synchronous lchmod(2). Returns `undefined`.
@@ -1739,7 +1739,7 @@ changes:
it will emit a deprecation warning.
-->
-* `path` {string|Buffer}
+* `path` {string|Buffer|URL}
* `uid` {integer}
* `gid` {integer}
* `callback` {Function}
@@ -1753,7 +1753,7 @@ to the completion callback.
deprecated: v0.4.7
-->
-* `path` {string|Buffer}
+* `path` {string|Buffer|URL}
* `uid` {integer}
* `gid` {integer}
@@ -2694,7 +2694,7 @@ changes:
it will emit a deprecation warning.
-->
-* `path` {string|Buffer}
+* `path` {string|Buffer|URL}
* `len` {integer} **Default:** `0`
* `callback` {Function}
* `err` {Error}
@@ -2711,7 +2711,7 @@ being thrown in the future.
added: v0.8.6
-->
-* `path` {string|Buffer}
+* `path` {string|Buffer|URL}
* `len` {integer} **Default:** `0`
Synchronous truncate(2). Returns `undefined`. A file descriptor can also be
@@ -2760,10 +2760,9 @@ Synchronous unlink(2). Returns `undefined`.
added: v0.1.31
-->
-* `filename` {string|Buffer}
-* `listener` {Function|undefined} **Default:** `undefined`
- * `eventType` {string}
- * `filename` {string|Buffer}
+* `filename` {string|Buffer|URL}
+* `listener` {Function} Optional, a listener previously attached using
+ `fs.watchFile()`
Stop watching for changes on `filename`. If `listener` is specified, only that
particular listener is removed. Otherwise, *all* listeners are removed,
@@ -3121,7 +3120,7 @@ changes:
description: The `file` parameter can be a file descriptor now.
-->
-* `file` {string|Buffer|integer} filename or file descriptor
+* `file` {string|Buffer|URL|integer} filename or file descriptor
* `data` {string|Buffer|Uint8Array}
* `options` {Object|string}
* `encoding` {string|null} **Default:** `'utf8'`
@@ -3172,7 +3171,7 @@ changes:
description: The `file` parameter can be a file descriptor now.
-->
-* `file` {string|Buffer|integer} filename or file descriptor
+* `file` {string|Buffer|URL|integer} filename or file descriptor
* `data` {string|Buffer|Uint8Array}
* `options` {Object|string}
* `encoding` {string|null} **Default:** `'utf8'`
diff --git a/doc/api/http.md b/doc/api/http.md
index 536d247226f3a2..02c78550e0d703 100644
--- a/doc/api/http.md
+++ b/doc/api/http.md
@@ -622,7 +622,7 @@ Once a socket is assigned to this request and is connected
added: v0.5.9
-->
-* `timeout` {number} Milliseconds before a request is considered to be timed out.
+* `timeout` {number} Milliseconds before a request times out.
* `callback` {Function} Optional function to be called when a timeout occurs. Same as binding to the `timeout` event.
Once a socket is assigned to this request and is connected
diff --git a/doc/api/http2.md b/doc/api/http2.md
index a67fc387d832a2..4f37400c0cc260 100644
--- a/doc/api/http2.md
+++ b/doc/api/http2.md
@@ -229,8 +229,8 @@ added: v8.4.0
The `'stream'` event is emitted when a new `Http2Stream` is created. When
invoked, the handler function will receive a reference to the `Http2Stream`
-object, a [Headers Object][], and numeric flags associated with the creation
-of the stream.
+object, a [HTTP2 Headers Object][], and numeric flags associated with the
+creation of the stream.
```js
const http2 = require('http2');
@@ -382,7 +382,7 @@ Transmits a `GOAWAY` frame to the connected peer *without* shutting down the
added: v8.4.0
-->
-* Value: {[Settings Object][]}
+* Value: {HTTP2 Settings Object}
A prototype-less object describing the current local settings of this
`Http2Session`. The local settings are local to *this* `Http2Session` instance.
@@ -461,7 +461,7 @@ instance's underlying [`net.Socket`].
added: v8.4.0
-->
-* Value: {[Settings Object][]}
+* Value: {HTTP2 Settings Object}
A prototype-less object describing the current remote settings of this
`Http2Session`. The remote settings are set by the *connected* HTTP/2 peer.
@@ -540,16 +540,28 @@ All other interactions will be routed directly to the socket.
added: v8.4.0
-->
+Provides miscellaneous information about the current state of the
+`Http2Session`.
+
* Value: {Object}
- * `effectiveLocalWindowSize` {number}
- * `effectiveRecvDataLength` {number}
- * `nextStreamID` {number}
- * `localWindowSize` {number}
- * `lastProcStreamID` {number}
- * `remoteWindowSize` {number}
- * `outboundQueueSize` {number}
- * `deflateDynamicTableSize` {number}
- * `inflateDynamicTableSize` {number}
+ * `effectiveLocalWindowSize` {number} The current local (receive)
+ flow control window size for the `Http2Session`.
+ * `effectiveRecvDataLength` {number} The current number of bytes
+ that have been received since the last flow control `WINDOW_UPDATE`.
+ * `nextStreamID` {number} The numeric identifier to be used the
+ next time a new `Http2Stream` is created by this `Http2Session`.
+ * `localWindowSize` {number} The number of bytes that the remote peer can
+ send without receiving a `WINDOW_UPDATE`.
+ * `lastProcStreamID` {number} The numeric id of the `Http2Stream`
+ for which a `HEADERS` or `DATA` frame was most recently received.
+ * `remoteWindowSize` {number} The number of bytes that this `Http2Session`
+ may send without receiving a `WINDOW_UPDATE`.
+ * `outboundQueueSize` {number} The number of frames currently within the
+ outbound queue for this `Http2Session`.
+ * `deflateDynamicTableSize` {number} The current size in bytes of the
+ outbound header compression state table.
+ * `inflateDynamicTableSize` {number} The current size in bytes of the
+ inbound header compression state table.
An object describing the current status of this `Http2Session`.
@@ -558,7 +570,7 @@ An object describing the current status of this `Http2Session`.
added: v8.4.0
-->
-* `settings` {[Settings Object][]}
+* `settings` {HTTP2 Settings Object}
* Returns {undefined}
Updates the current local settings for this `Http2Session` and sends a new
@@ -695,7 +707,7 @@ client.on('altsvc', (alt, origin, stream) => {
added: v8.4.0
-->
-* `headers` {[Headers Object][]}
+* `headers` {HTTP2 Headers Object}
* `options` {Object}
* `endStream` {boolean} `true` if the `Http2Stream` *writable* side should
be closed initially, such as when sending a `GET` request that should not
@@ -883,7 +895,7 @@ added: v8.4.0
The `'trailers'` event is emitted when a block of headers associated with
trailing header fields is received. The listener callback is passed the
-[Headers Object][] and flags associated with the headers.
+[HTTP2 Headers Object][] and flags associated with the headers.
```js
stream.on('trailers', (headers, flags) => {
@@ -907,7 +919,7 @@ added: v8.4.0
-->
* code {number} Unsigned 32-bit integer identifying the error code. **Default:**
- `http2.constant.NGHTTP2_NO_ERROR` (`0x00`)
+ `http2.constants.NGHTTP2_NO_ERROR` (`0x00`)
* `callback` {Function} An optional function registered to listen for the
`'close'` event.
* Returns: {undefined}
@@ -977,6 +989,34 @@ destroyed after either receiving an `RST_STREAM` frame from the connected peer,
calling `http2stream.close()`, or `http2stream.destroy()`. Will be
`undefined` if the `Http2Stream` has not been closed.
+#### http2stream.sentHeaders
+
+
+* Value: {HTTP2 Headers Object}
+
+An object containing the outbound headers sent for this `Http2Stream`.
+
+#### http2stream.sentInfoHeaders
+
+
+* Value: {HTTP2 Headers Object[]}
+
+An array of objects containing the outbound informational (additional) headers
+sent for this `Http2Stream`.
+
+#### http2stream.sentTrailers
+
+
+* Value: {HTTP2 Headers Object}
+
+An object containing the outbound trailers sent for this this `HttpStream`.
+
#### http2stream.session
+Provides miscellaneous information about the current state of the
+`Http2Stream`.
* Value: {Object}
- * `localWindowSize` {number}
- * `state` {number}
- * `localClose` {number}
- * `remoteClose` {number}
- * `sumDependencyWeight` {number}
- * `weight` {number}
+ * `localWindowSize` {number} The number of bytes the connected peer may send
+ for this `Http2Stream` without receiving a `WINDOW_UPDATE`.
+ * `state` {number} A flag indicating the low-level current state of the
+ `Http2Stream` as determined by nghttp2.
+ * `localClose` {number} `true` if this `Http2Stream` has been closed locally.
+ * `remoteClose` {number} `true` if this `Http2Stream` has been closed
+ remotely.
+ * `sumDependencyWeight` {number} The sum weight of all `Http2Stream`
+ instances that depend on this `Http2Stream` as specified using
+ `PRIORITY` frames.
+ * `weight` {number} The priority weight of this `Http2Stream`.
A current state of this `Http2Stream`.
@@ -1049,8 +1096,8 @@ added: v8.4.0
The `'headers'` event is emitted when an additional block of headers is received
for a stream, such as when a block of `1xx` informational headers is received.
-The listener callback is passed the [Headers Object][] and flags associated with
-the headers.
+The listener callback is passed the [HTTP2 Headers Object][] and flags
+associated with the headers.
```js
stream.on('headers', (headers, flags) => {
@@ -1064,8 +1111,8 @@ added: v8.4.0
-->
The `'push'` event is emitted when response headers for a Server Push stream
-are received. The listener callback is passed the [Headers Object][] and flags
-associated with the headers.
+are received. The listener callback is passed the [HTTP2 Headers Object][] and
+flags associated with the headers.
```js
stream.on('push', (headers, flags) => {
@@ -1081,7 +1128,7 @@ added: v8.4.0
The `'response'` event is emitted when a response `HEADERS` frame has been
received for this stream from the connected HTTP/2 server. The listener is
invoked with two arguments: an Object containing the received
-[Headers Object][], and flags associated with the headers.
+[HTTP2 Headers Object][], and flags associated with the headers.
For example:
@@ -1111,7 +1158,7 @@ provide additional methods such as `http2stream.pushStream()` and
added: v8.4.0
-->
-* `headers` {[Headers Object][]}
+* `headers` {HTTP2 Headers Object}
* Returns: {undefined}
Sends an additional informational `HEADERS` frame to the connected HTTP/2 peer.
@@ -1142,7 +1189,7 @@ accepts push streams, `false` otherwise. Settings are the same for every
added: v8.4.0
-->
-* `headers` {[Headers Object][]}
+* `headers` {HTTP2 Headers Object}
* `options` {Object}
* `exclusive` {boolean} When `true` and `parent` identifies a parent Stream,
the created stream is made the sole direct dependency of the parent, with
@@ -1152,17 +1199,23 @@ added: v8.4.0
created stream is dependent on.
* `callback` {Function} Callback that is called once the push stream has been
initiated.
+ * `err` {Error}
+ * `pushStream` {ServerHttp2Stream} The returned pushStream object.
+ * `headers` {HTTP2 Headers Object} Headers object the pushStream was
+ initiated with.
* Returns: {undefined}
Initiates a push stream. The callback is invoked with the new `Http2Stream`
-instance created for the push stream.
+instance created for the push stream passed as the second argument, or an
+`Error` passed as the first argument.
```js
const http2 = require('http2');
const server = http2.createServer();
server.on('stream', (stream) => {
stream.respond({ ':status': 200 });
- stream.pushStream({ ':path': '/' }, (pushStream) => {
+ stream.pushStream({ ':path': '/' }, (err, pushStream, headers) => {
+ if (err) throw err;
pushStream.respond({ ':status': 200 });
pushStream.end('some pushed data');
});
@@ -1179,7 +1232,7 @@ a `weight` value to `http2stream.priority` with the `silent` option set to
added: v8.4.0
-->
-* `headers` {[Headers Object][]}
+* `headers` {HTTP2 Headers Object}
* `options` {Object}
* `endStream` {boolean} Set to `true` to indicate that the response will not
include payload data.
@@ -1225,7 +1278,7 @@ added: v8.4.0
-->
* `fd` {number} A readable file descriptor.
-* `headers` {[Headers Object][]}
+* `headers` {HTTP2 Headers Object}
* `options` {Object}
* `statCheck` {Function}
* `getTrailers` {Function} Callback function invoked to collect trailer
@@ -1309,7 +1362,7 @@ added: v8.4.0
-->
* `path` {string|Buffer|URL}
-* `headers` {[Headers Object][]}
+* `headers` {HTTP2 Headers Object}
* `options` {Object}
* `statCheck` {Function}
* `onError` {Function} Callback function invoked in the case of an
@@ -1675,7 +1728,7 @@ changes:
* `selectPadding` {Function} When `options.paddingStrategy` is equal to
`http2.constants.PADDING_STRATEGY_CALLBACK`, provides the callback function
used to determine the padding. See [Using options.selectPadding][].
- * `settings` {[Settings Object][]} The initial settings to send to the
+ * `settings` {HTTP2 Settings Object} The initial settings to send to the
remote peer upon connection.
* `onRequestHandler` {Function} See [Compatibility API][]
* Returns: {Http2Server}
@@ -1762,7 +1815,7 @@ changes:
* `selectPadding` {Function} When `options.paddingStrategy` is equal to
`http2.constants.PADDING_STRATEGY_CALLBACK`, provides the callback function
used to determine the padding. See [Using options.selectPadding][].
- * `settings` {[Settings Object][]} The initial settings to send to the
+ * `settings` {HTTP2 Settings Object} The initial settings to send to the
remote peer upon connection.
* ...: Any [`tls.createServer()`][] options can be provided. For
servers, the identity options (`pfx` or `key`/`cert`) are usually required.
@@ -1858,7 +1911,7 @@ changes:
* `selectPadding` {Function} When `options.paddingStrategy` is equal to
`http2.constants.PADDING_STRATEGY_CALLBACK`, provides the callback function
used to determine the padding. See [Using options.selectPadding][].
- * `settings` {[Settings Object][]} The initial settings to send to the
+ * `settings` {HTTP2 Settings Object} The initial settings to send to the
remote peer upon connection.
* `createConnection` {Function} An optional callback that receives the `URL`
instance passed to `connect` and the `options` object, and returns any
@@ -1911,7 +1964,7 @@ a given number of milliseconds set using `http2server.setTimeout()`.
added: v8.4.0
-->
-* Returns: {[Settings Object][]}
+* Returns: {HTTP2 Settings Object}
Returns an object containing the default settings for an `Http2Session`
instance. This method returns a new object instance every time it is called
@@ -1922,7 +1975,7 @@ so instances returned may be safely modified for use.
added: v8.4.0
-->
-* `settings` {[Settings Object][]}
+* `settings` {HTTP2 Settings Object}
* Returns: {Buffer}
Returns a `Buffer` instance containing serialized representation of the given
@@ -1944,10 +1997,10 @@ added: v8.4.0
-->
* `buf` {Buffer|Uint8Array} The packed settings.
-* Returns: {[Settings Object][]}
+* Returns: {HTTP2 Settings Object}
-Returns a [Settings Object][] containing the deserialized settings from the
-given `Buffer` as generated by `http2.getPackedSettings()`.
+Returns a [HTTP2 Settings Object][] containing the deserialized settings from
+the given `Buffer` as generated by `http2.getPackedSettings()`.
### Headers Object
@@ -2319,7 +2372,7 @@ Example:
console.log(request.headers);
```
-See [Headers Object][].
+See [HTTP2 Headers Object][].
*Note*: In HTTP/2, the request path, hostname, protocol, and method are
represented as special headers prefixed with the `:` character (e.g. `':path'`).
@@ -3041,13 +3094,13 @@ following additional properties:
[Compatibility API]: #http2_compatibility_api
[HTTP/1]: http.html
[HTTP/2]: https://tools.ietf.org/html/rfc7540
+[HTTP2 Headers Object]: #http2_headers_object
+[HTTP2 Settings Object]: #http2_settings_object
[HTTPS]: https.html
-[Headers Object]: #http2_headers_object
[Http2Session and Sockets]: #http2_http2session_and_sockets
[Performance Observer]: perf_hooks.html
[Readable Stream]: stream.html#stream_class_stream_readable
[RFC 7838]: https://tools.ietf.org/html/rfc7838
-[Settings Object]: #http2_settings_object
[Using options.selectPadding]: #http2_using_options_selectpadding
[Writable Stream]: stream.html#stream_writable_streams
[`'checkContinue'`]: #http2_event_checkcontinue
diff --git a/doc/api/modules.md b/doc/api/modules.md
index 458ba73c3df064..157ec3b6f715b9 100644
--- a/doc/api/modules.md
+++ b/doc/api/modules.md
@@ -468,7 +468,7 @@ added: v0.1.27
* {string}
-The directory name of the current module. This the same as the
+The directory name of the current module. This is the same as the
[`path.dirname()`][] of the [`__filename`][].
Example: running `node example.js` from `/Users/mjr`
@@ -626,9 +626,11 @@ added: v8.9.0
-->
* `request` {string} The module path whose lookup paths are being retrieved.
-* Returns: {Array}
+* Returns: {Array|null}
-Returns an array containing the paths searched during resolution of `request`.
+Returns an array containing the paths searched during resolution of `request` or
+null if the `request` string references a core module, for example `http` or
+`fs`.
## The `module` Object
@@ -1038,7 +1038,7 @@ JavaScript arrays are described in
[Section 22.1](https://tc39.github.io/ecma262/#sec-array-objects) of the
ECMAScript Language Specification.
-#### *napi_create_array_with_length*
+#### napi_create_array_with_length
@@ -1067,7 +1067,7 @@ JavaScript arrays are described in
[Section 22.1](https://tc39.github.io/ecma262/#sec-array-objects) of the
ECMAScript Language Specification.
-#### *napi_create_arraybuffer*
+#### napi_create_arraybuffer
@@ -1099,7 +1099,7 @@ JavaScript ArrayBuffer objects are described in
[Section 24.1](https://tc39.github.io/ecma262/#sec-arraybuffer-objects)
of the ECMAScript Language Specification.
-#### *napi_create_buffer*
+#### napi_create_buffer
@@ -1120,7 +1120,7 @@ Returns `napi_ok` if the API succeeded.
This API allocates a `node::Buffer` object. While this is still a
fully-supported data structure, in most cases using a TypedArray will suffice.
-#### *napi_create_buffer_copy*
+#### napi_create_buffer_copy
@@ -1145,7 +1145,7 @@ This API allocates a `node::Buffer` object and initializes it with data copied
from the passed-in buffer. While this is still a fully-supported data
structure, in most cases using a TypedArray will suffice.
-#### *napi_create_external*
+#### napi_create_external
@@ -1212,7 +1212,7 @@ JavaScript ArrayBuffers are described in
[Section 24.1](https://tc39.github.io/ecma262/#sec-arraybuffer-objects)
of the ECMAScript Language Specification.
-#### *napi_create_external_buffer*
+#### napi_create_external_buffer
@@ -1243,7 +1243,7 @@ structure, in most cases using a TypedArray will suffice.
*Note*: For Node.js >=4 `Buffers` are Uint8Arrays.
-#### *napi_create_function*
+#### napi_create_function
@@ -1276,7 +1276,7 @@ JavaScript Functions are described in
[Section 19.2](https://tc39.github.io/ecma262/#sec-function-objects)
of the ECMAScript Language Specification.
-#### *napi_create_object*
+#### napi_create_object
@@ -1296,7 +1296,7 @@ The JavaScript Object type is described in
[Section 6.1.7](https://tc39.github.io/ecma262/#sec-object-type) of the
ECMAScript Language Specification.
-#### *napi_create_symbol*
+#### napi_create_symbol
@@ -1319,7 +1319,7 @@ The JavaScript Symbol type is described in
[Section 19.4](https://tc39.github.io/ecma262/#sec-symbol-objects)
of the ECMAScript Language Specification.
-#### *napi_create_typedarray*
+#### napi_create_typedarray
@@ -1355,7 +1355,7 @@ JavaScript TypedArray Objects are described in
of the ECMAScript Language Specification.
-#### *napi_create_dataview*
+#### napi_create_dataview
@@ -1389,7 +1389,7 @@ JavaScript DataView Objects are described in
[Section 24.3][] of the ECMAScript Language Specification.
### Functions to convert from C types to N-API
-#### *napi_create_int32*
+#### napi_create_int32
@@ -1410,7 +1410,7 @@ The JavaScript Number type is described in
[Section 6.1.6](https://tc39.github.io/ecma262/#sec-ecmascript-language-types-number-type)
of the ECMAScript Language Specification.
-#### *napi_create_uint32*
+#### napi_create_uint32
@@ -1431,7 +1431,7 @@ The JavaScript Number type is described in
[Section 6.1.6](https://tc39.github.io/ecma262/#sec-ecmascript-language-types-number-type)
of the ECMAScript Language Specification.
-#### *napi_create_int64*
+#### napi_create_int64
@@ -1458,7 +1458,7 @@ outside the range of
[`Number.MAX_SAFE_INTEGER`](https://tc39.github.io/ecma262/#sec-number.max_safe_integer)
(2^53 - 1) will lose precision.
-#### *napi_create_double*
+#### napi_create_double
@@ -1479,7 +1479,7 @@ The JavaScript Number type is described in
[Section 6.1.6](https://tc39.github.io/ecma262/#sec-ecmascript-language-types-number-type)
of the ECMAScript Language Specification.
-#### *napi_create_string_latin1*
+#### napi_create_string_latin1
@@ -1504,7 +1504,7 @@ The JavaScript String type is described in
[Section 6.1.4](https://tc39.github.io/ecma262/#sec-ecmascript-language-types-string-type)
of the ECMAScript Language Specification.
-#### *napi_create_string_utf16*
+#### napi_create_string_utf16
@@ -1529,7 +1529,7 @@ The JavaScript String type is described in
[Section 6.1.4](https://tc39.github.io/ecma262/#sec-ecmascript-language-types-string-type)
of the ECMAScript Language Specification.
-#### *napi_create_string_utf8*
+#### napi_create_string_utf8
@@ -1555,7 +1555,7 @@ The JavaScript String type is described in
of the ECMAScript Language Specification.
### Functions to convert from N-API to C types
-#### *napi_get_array_length*
+#### napi_get_array_length
@@ -1578,7 +1578,7 @@ Array length is described in
[Section 22.1.4.1](https://tc39.github.io/ecma262/#sec-properties-of-array-instances-length)
of the ECMAScript Language Specification.
-#### *napi_get_arraybuffer_info*
+#### napi_get_arraybuffer_info
@@ -1606,7 +1606,7 @@ which can be used to guarantee control over the lifetime of the
ArrayBuffer. It's also safe to use the returned data buffer within the same
callback as long as there are no calls to other APIs that might trigger a GC.
-#### *napi_get_buffer_info*
+#### napi_get_buffer_info
@@ -1630,7 +1630,7 @@ and it's length.
*Warning*: Use caution while using this API since the underlying data buffer's
lifetime is not guaranteed if it's managed by the VM.
-#### *napi_get_prototype*
+#### napi_get_prototype
@@ -1648,7 +1648,7 @@ not the same as the function's `prototype` property).
Returns `napi_ok` if the API succeeded.
-#### *napi_get_typedarray_info*
+#### napi_get_typedarray_info
@@ -1680,7 +1680,7 @@ is managed by the VM
-#### *napi_get_dataview_info*
+#### napi_get_dataview_info
@@ -1708,7 +1708,7 @@ Returns `napi_ok` if the API succeeded.
This API returns various properties of a DataView.
-#### *napi_get_value_bool*
+#### napi_get_value_bool
@@ -1727,7 +1727,7 @@ passed in it returns `napi_boolean_expected`.
This API returns the C boolean primitive equivalent of the given JavaScript
Boolean.
-#### *napi_get_value_double*
+#### napi_get_value_double
@@ -1749,7 +1749,7 @@ This API returns the C double primitive equivalent of the given JavaScript
Number.
-#### *napi_get_value_external*
+#### napi_get_value_external
@@ -1769,7 +1769,7 @@ passed in it returns `napi_invalid_arg`.
This API retrieves the external data pointer that was previously passed to
`napi_create_external()`.
-#### *napi_get_value_int32*
+#### napi_get_value_int32
@@ -1792,7 +1792,7 @@ of the given JavaScript Number. If the number exceeds the range of the
bottom 32 bits. This can result in a large positive number becoming
a negative number if the value is > 2^31 -1.
-#### *napi_get_value_int64*
+#### napi_get_value_int64
@@ -1812,7 +1812,7 @@ is passed in it returns `napi_number_expected`.
This API returns the C int64 primitive equivalent of the given
JavaScript Number
-#### *napi_get_value_string_latin1*
+#### napi_get_value_string_latin1
@@ -1839,7 +1839,7 @@ is passed in it returns `napi_string_expected`.
This API returns the ISO-8859-1-encoded string corresponding the value passed
in.
-#### *napi_get_value_string_utf8*
+#### napi_get_value_string_utf8
@@ -1865,7 +1865,7 @@ is passed in it returns `napi_string_expected`.
This API returns the UTF8-encoded string corresponding the value passed in.
-#### *napi_get_value_string_utf16*
+#### napi_get_value_string_utf16
@@ -1891,7 +1891,7 @@ is passed in it returns `napi_string_expected`.
This API returns the UTF16-encoded string corresponding the value passed in.
-#### *napi_get_value_uint32*
+#### napi_get_value_uint32
@@ -1913,7 +1913,7 @@ This API returns the C primitive equivalent of the given `napi_value` as a
`uint32_t`.
### Functions to get global instances
-#### *napi_get_boolean*
+#### napi_get_boolean
@@ -1931,7 +1931,7 @@ Returns `napi_ok` if the API succeeded.
This API is used to return the JavaScript singleton object that is used to
represent the given boolean value
-#### *napi_get_global*
+#### napi_get_global
@@ -1946,7 +1946,7 @@ Returns `napi_ok` if the API succeeded.
This API returns the global Object.
-#### *napi_get_null*
+#### napi_get_null
@@ -1961,7 +1961,7 @@ Returns `napi_ok` if the API succeeded.
This API returns the null Object.
-#### *napi_get_undefined*
+#### napi_get_undefined
@@ -1989,7 +1989,7 @@ These APIs support doing one of the following:
2. Check the type of a JavaScript value
3. Check for equality between two JavaScript values
-### *napi_coerce_to_bool*
+### napi_coerce_to_bool
@@ -2010,7 +2010,7 @@ This API implements the abstract operation ToBoolean as defined in
of the ECMAScript Language Specification.
This API can be re-entrant if getters are defined on the passed-in Object.
-### *napi_coerce_to_number*
+### napi_coerce_to_number
@@ -2031,7 +2031,7 @@ This API implements the abstract operation ToNumber as defined in
of the ECMAScript Language Specification.
This API can be re-entrant if getters are defined on the passed-in Object.
-### *napi_coerce_to_object*
+### napi_coerce_to_object
@@ -2052,7 +2052,7 @@ This API implements the abstract operation ToObject as defined in
of the ECMAScript Language Specification.
This API can be re-entrant if getters are defined on the passed-in Object.
-### *napi_coerce_to_string*
+### napi_coerce_to_string
@@ -2073,7 +2073,7 @@ This API implements the abstract operation ToString as defined in
of the ECMAScript Language Specification.
This API can be re-entrant if getters are defined on the passed-in Object.
-### *napi_typeof*
+### napi_typeof
@@ -2094,7 +2094,7 @@ the object as defined in [Section 12.5.5][] of the ECMAScript Language
Specification. However, it has support for detecting an External value.
If `value` has a type that is invalid, an error is returned.
-### *napi_instanceof*
+### napi_instanceof
@@ -2119,7 +2119,7 @@ defined in
[Section 12.10.4](https://tc39.github.io/ecma262/#sec-instanceofoperator)
of the ECMAScript Language Specification.
-### *napi_is_array*
+### napi_is_array
@@ -2137,7 +2137,7 @@ This API represents invoking the `IsArray` operation on the object
as defined in [Section 7.2.2](https://tc39.github.io/ecma262/#sec-isarray)
of the ECMAScript Language Specification.
-### *napi_is_arraybuffer*
+### napi_is_arraybuffer
@@ -2153,7 +2153,7 @@ Returns `napi_ok` if the API succeeded.
This API checks if the Object passsed in is an array buffer.
-### *napi_is_buffer*
+### napi_is_buffer
@@ -2170,7 +2170,7 @@ Returns `napi_ok` if the API succeeded.
This API checks if the Object passsed in is a buffer.
-### *napi_is_error*
+### napi_is_error
@@ -2186,7 +2186,7 @@ Returns `napi_ok` if the API succeeded.
This API checks if the Object passsed in is an Error.
-### *napi_is_typedarray*
+### napi_is_typedarray
@@ -2204,7 +2204,7 @@ This API checks if the Object passsed in is a typed array.
-### *napi_is_dataview*
+### napi_is_dataview
@@ -2221,7 +2221,7 @@ Returns `napi_ok` if the API succeeded.
This API checks if the Object passed in is a DataView.
-### *napi_strict_equals*
+### napi_strict_equals
@@ -2375,7 +2375,7 @@ if (status != napi_ok) return status;
```
### Structures
-#### *napi_property_attributes*
+#### napi_property_attributes
```C
typedef enum {
napi_default = 0,
@@ -2409,7 +2409,7 @@ a static property on a class as opposed to an instance property, which is the
default. This is used only by [`napi_define_class`][]. It is ignored by
`napi_define_properties`.
-#### *napi_property_descriptor*
+#### napi_property_descriptor
```C
typedef struct {
// One of utf8name or name should be NULL.
@@ -2455,7 +2455,7 @@ this function is invoked.
See [`napi_property_attributes`](#n_api_napi_property_attributes).
### Functions
-#### *napi_get_property_names*
+#### napi_get_property_names
@@ -2476,7 +2476,7 @@ Returns `napi_ok` if the API succeeded.
This API returns the array of propertys for the Object passed in
-#### *napi_set_property*
+#### napi_set_property
@@ -2496,7 +2496,7 @@ Returns `napi_ok` if the API succeeded.
This API set a property on the Object passed in.
-#### *napi_get_property*
+#### napi_get_property
@@ -2517,7 +2517,7 @@ Returns `napi_ok` if the API succeeded.
This API gets the requested property from the Object passed in.
-#### *napi_has_property*
+#### napi_has_property
@@ -2538,7 +2538,7 @@ Returns `napi_ok` if the API succeeded.
This API checks if the Object passed in has the named property.
-#### *napi_delete_property*
+#### napi_delete_property
@@ -2560,7 +2560,7 @@ Returns `napi_ok` if the API succeeded.
This API attempts to delete the `key` own property from `object`.
-#### *napi_has_own_property*
+#### napi_has_own_property
@@ -2583,7 +2583,7 @@ be a string or a Symbol, or an error will be thrown. N-API will not perform any
conversion between data types.
-#### *napi_set_named_property*
+#### napi_set_named_property
@@ -2604,7 +2604,7 @@ Returns `napi_ok` if the API succeeded.
This method is equivalent to calling [`napi_set_property`][] with a `napi_value`
created from the string passed in as `utf8Name`
-#### *napi_get_named_property*
+#### napi_get_named_property
@@ -2625,7 +2625,7 @@ Returns `napi_ok` if the API succeeded.
This method is equivalent to calling [`napi_get_property`][] with a `napi_value`
created from the string passed in as `utf8Name`
-#### *napi_has_named_property*
+#### napi_has_named_property
@@ -2646,7 +2646,7 @@ Returns `napi_ok` if the API succeeded.
This method is equivalent to calling [`napi_has_property`][] with a `napi_value`
created from the string passed in as `utf8Name`
-#### *napi_set_element*
+#### napi_set_element
@@ -2666,7 +2666,7 @@ Returns `napi_ok` if the API succeeded.
This API sets and element on the Object passed in.
-#### *napi_get_element*
+#### napi_get_element
@@ -2686,7 +2686,7 @@ Returns `napi_ok` if the API succeeded.
This API gets the element at the requested index.
-#### *napi_has_element*
+#### napi_has_element
@@ -2707,7 +2707,7 @@ Returns `napi_ok` if the API succeeded.
This API returns if the Object passed in has an element at the
requested index.
-#### *napi_delete_element*
+#### napi_delete_element
@@ -2728,7 +2728,7 @@ Returns `napi_ok` if the API succeeded.
This API attempts to delete the specified `index` from `object`.
-#### *napi_define_properties*
+#### napi_define_properties
@@ -2771,7 +2771,7 @@ like a regular JavaScript function call, or as a constructor
function.
-### *napi_call_function*
+### napi_call_function
@@ -2837,7 +2837,7 @@ status = napi_get_value_int32(env, return_val, &result);
if (status != napi_ok) return;
```
-### *napi_create_function*
+### napi_create_function
@@ -2905,7 +2905,7 @@ myaddon.sayHello();
`NAPI_MODULE` in the earlier snippet but the name of the target in `binding.gyp`
responsible for creating the `.node` file.
-### *napi_get_cb_info*
+### napi_get_cb_info
@@ -2935,7 +2935,7 @@ Returns `napi_ok` if the API succeeded.
This method is used within a callback function to retrieve details about the
call like the arguments and the `this` pointer from a given callback info.
-### *napi_get_new_target*
+### napi_get_new_target
@@ -2954,7 +2954,7 @@ Returns `napi_ok` if the API succeeded.
This API returns the `new.target` of the constructor call. If the current
callback is not a constructor call, the result is `NULL`.
-### *napi_new_instance*
+### napi_new_instance
@@ -3049,7 +3049,7 @@ if (is_instance) {
The reference must be freed once it is no longer needed.
-### *napi_define_class*
+### napi_define_class
@@ -3105,7 +3105,7 @@ case, to prevent the function value from being garbage-collected, create a
persistent reference to it using [`napi_create_reference`][] and ensure the
reference count is kept >= 1.
-### *napi_wrap*
+### napi_wrap
@@ -3167,7 +3167,7 @@ native instance associated with it by virtue of a previous call to
another native instance with the given object, call `napi_remove_wrap()` on it
first.
-### *napi_unwrap*
+### napi_unwrap
@@ -3192,7 +3192,7 @@ method or accessor, then the `this` argument to the callback is the wrapper
object; the wrapped C++ instance that is the target of the call can be obtained
then by calling `napi_unwrap()` on the wrapper object.
-### *napi_remove_wrap*
+### napi_remove_wrap
@@ -3283,7 +3283,7 @@ napi_status napi_create_async_work(napi_env env,
- `[in] env`: The environment that the API is invoked under.
- `[in] async_resource`: An optional object associated with the async work
that will be passed to possible async_hooks [`init` hooks][].
-- `[in] async_resource_name`: An identifier for the kind of resource that is
+- `[in] async_resource_name`: Identifier for the kind of resource that is
being provided for diagnostic information exposed by the `async_hooks` API.
- `[in] execute`: The native function which should be called to excute
the logic asynchronously.
@@ -3368,7 +3368,7 @@ scenario, because with those the async execution still happens on the main
event loop. When using any other async mechanism, the following APIs are
necessary to ensure an async operation is properly tracked by the runtime.
-### *napi_async_init**
+### napi_async_init
@@ -3382,14 +3382,14 @@ napi_status napi_async_init(napi_env env,
- `[in] env`: The environment that the API is invoked under.
- `[in] async_resource`: An optional object associated with the async work
that will be passed to possible `async_hooks` [`init` hooks][].
-- `[in] async_resource_name`: Required identifier for the kind of resource
+- `[in] async_resource_name`: Identifier for the kind of resource
that is being provided for diagnostic information exposed by the
`async_hooks` API.
- `[out] result`: The initialized async context.
Returns `napi_ok` if the API succeeded.
-### *napi_async_destroy**
+### napi_async_destroy
@@ -3403,7 +3403,7 @@ napi_status napi_async_destroy(napi_env env,
Returns `napi_ok` if the API succeeded.
-### *napi_make_callback*
+### napi_make_callback
-* `path` {String} Path the server should listen to. See
+* `path` {string} Path the server should listen to. See
[Identifying paths for IPC connections][].
* `backlog` {number} Common parameter of [`server.listen()`][] functions.
* `callback` {Function} Common parameter of [`server.listen()`][] functions.
diff --git a/doc/api/perf_hooks.md b/doc/api/perf_hooks.md
index 5ce94624cb9d22..6d98d25af37cbc 100644
--- a/doc/api/perf_hooks.md
+++ b/doc/api/perf_hooks.md
@@ -29,6 +29,14 @@ added: v8.5.0
The `Performance` provides access to performance metric data. A single
instance of this class is provided via the `performance` property.
+### performance.clearEntries(name)
+
+
+Remove all performance entry objects with `entryType` equal to `name` from the
+Performance Timeline.
+
### performance.clearFunctions([name])
+
+Remove all performance entry objects with `entryType` equal to `gc` from the
+Performance Timeline.
+
### performance.clearMarks([name])
+
+* {boolean}
+
+The `process.noDeprecation` property indicates whether the `--no-deprecation`
+flag is set on the current Node.js process. See the documentation for
+the [`warning` event][process_warning] and the
+[`emitWarning` method][process_emit_warning] for more information about this
+flag's behavior.
+
## process.pid
+
+* {boolean}
+
+The `process.throwDeprecation` property indicates whether the
+`--throw-deprecation` flag is set on the current Node.js process. See the
+documentation for the [`warning` event][process_warning] and the
+[`emitWarning` method][process_emit_warning] for more information about this
+flag's behavior.
+
## process.title
+
+* {boolean}
+
+The `process.traceDeprecation` property indicates whether the
+`--trace-deprecation` flag is set on the current Node.js process. See the
+documentation for the [`warning` event][process_warning] and the
+[`emitWarning` method][process_emit_warning] for more information about this
+flag's behavior.
+
## process.umask([mask])
```js
-{
- http_parser: '2.3.0',
- node: '1.1.1',
- v8: '6.1.534.42-node.0',
- uv: '1.3.0',
- zlib: '1.2.8',
- ares: '1.10.0-DEV',
- modules: '43',
- icu: '55.1',
- openssl: '1.0.1k',
- unicode: '8.0',
- cldr: '29.0',
+{ http_parser: '2.7.0',
+ node: '8.9.0',
+ v8: '6.3.292.48-node.6',
+ uv: '1.18.0',
+ zlib: '1.2.11',
+ ares: '1.13.0',
+ modules: '60',
+ nghttp2: '1.29.0',
+ napi: '2',
+ openssl: '1.0.2n',
+ icu: '60.1',
+ unicode: '10.0',
+ cldr: '32.0',
tz: '2016b' }
```
@@ -1977,5 +2030,6 @@ cases:
[Readable]: stream.html#stream_readable_streams
[Signal Events]: #process_signal_events
[Stream compatibility]: stream.html#stream_compatibility_with_older_node_js_versions
+[Supported platforms]: https://github.com/nodejs/node/blob/master/BUILDING.md#supported-platforms-1
[TTY]: tty.html#tty_tty
[Writable]: stream.html#stream_writable_streams
diff --git a/doc/api/readline.md b/doc/api/readline.md
index 8619d75132836d..7ba1277dce2d9c 100644
--- a/doc/api/readline.md
+++ b/doc/api/readline.md
@@ -60,8 +60,8 @@ The `'close'` event is emitted when one of the following occur:
The listener function is called without passing any arguments.
-The `readline.Interface` instance should be considered to be "finished" once
-the `'close'` event is emitted.
+The `readline.Interface` instance is finished once the `'close'` event is
+emitted.
### Event: 'line'
-The `replServer.clearBufferedComand()` method clears any command that has been
+The `replServer.clearBufferedCommand()` method clears any command that has been
buffered but not yet executed. This method is primarily intended to be
called from within the action function for commands registered using the
`replServer.defineCommand()` method.
diff --git a/doc/api/stream.md b/doc/api/stream.md
index 87c43dca8c0913..c0351a3679445e 100644
--- a/doc/api/stream.md
+++ b/doc/api/stream.md
@@ -1474,7 +1474,7 @@ added: v8.0.0
argument.
The `_destroy()` method is called by [`writable.destroy()`][writable-destroy].
-It can be overriden by child classes but it **must not** be called directly.
+It can be overridden by child classes but it **must not** be called directly.
#### writable.\_final(callback)
+
+* **Version**:
+* **Platform**:
+* **Subsystem**:
+
+
+```
+
+If you believe that you have uncovered a bug in Node.js, please fill out this
+form, following the template to the best of your ability. Do not worry if you
+cannot answer every detail, just fill in what you can.
+
+The two most important pieces of information we need in order to properly
+evaluate the report is a description of the behavior you are seeing and a simple
+test case we can use to recreate the problem on our own. If we cannot recreate
+the issue, it becomes impossible for us to fix.
+
+In order to rule out the possibility of bugs introduced by userland code, test
+cases should be limited, as much as possible, to using *only* Node.js APIs.
+If the bug occurs only when you're using a specific userland module, there is
+a very good chance that either (a) the module has a bug or (b) something in
+Node.js changed that broke the module.
+
+See [How to create a Minimal, Complete, and Verifiable example](https://stackoverflow.com/help/mcve).
+
+## Triaging a Bug Report
+
+Once an issue has been opened, it is not uncommon for there to be discussion
+around it. Some contributors may have differing opinions about the issue,
+including whether the behavior being seen is a bug or a feature. This discussion
+is part of the process and should be kept focused, helpful, and professional.
+
+Short, clipped responses—that provide neither additional context nor supporting
+detail—are not helpful or professional. To many, such responses are simply
+annoying and unfriendly.
+
+Contributors are encouraged to help one another make forward progress as much
+as possible, empowering one another to solve issues collaboratively. If you
+choose to comment on an issue that you feel either is not a problem that needs
+to be fixed, or if you encounter information in an issue that you feel is
+incorrect, explain *why* you feel that way with additional supporting context,
+and be willing to be convinced that you may be wrong. By doing so, we can often
+reach the correct outcome much faster.
+
+## Resolving a Bug Report
+
+In the vast majority of cases, issues are resolved by opening a Pull Request.
+The process for opening and reviewing a Pull Request is similar to that of
+opening and triaging issues, but carries with it a necessary review and approval
+workflow that ensures that the proposed changes meet the minimal quality and
+functional guidelines of the Node.js project.
+
+[Node.js help repository]: https://github.com/nodejs/help/issues
+[Technical Steering Committee (TSC) repository]: https://github.com/nodejs/TSC/issues
diff --git a/doc/guides/contributing/pull-requests.md b/doc/guides/contributing/pull-requests.md
new file mode 100644
index 00000000000000..5812c8c54645e2
--- /dev/null
+++ b/doc/guides/contributing/pull-requests.md
@@ -0,0 +1,663 @@
+# Pull Requests
+
+There are two fundamental components of the Pull Request process: one concrete
+and technical, and one more process oriented. The concrete and technical
+component involves the specific details of setting up your local environment
+so that you can make the actual changes. This is where we will start.
+
+* [Dependencies](#dependencies)
+* [Setting up your local environment](#setting-up-your-local-environment)
+ * [Step 1: Fork](#step-1-fork)
+ * [Step 2: Branch](#step-2-branch)
+* [The Process of Making Changes](#the-process-of-making-changes)
+ * [Step 3: Code](#step-3-code)
+ * [Step 4: Commit](#step-4-commit)
+ * [Commit message guidelines](#commit-message-guidelines)
+ * [Step 5: Rebase](#step-5-rebase)
+ * [Step 6: Test](#step-6-test)
+ * [Test Coverage](#test-coverage)
+ * [Step 7: Push](#step-7-push)
+ * [Step 8: Opening the Pull Request](#step-8-opening-the-pull-request)
+ * [Step 9: Discuss and Update](#step-9-discuss-and-update)
+ * [Approval and Request Changes Workflow](#approval-and-request-changes-workflow)
+ * [Step 10: Landing](#step-10-landing)
+* [Reviewing Pull Requests](#reviewing-pull-requests)
+ * [Review a bit at a time](#review-a-bit-at-a-time)
+ * [Be aware of the person behind the code](#be-aware-of-the-person-behind-the-code)
+ * [Respect the minimum wait time for comments](#respect-the-minimum-wait-time-for-comments)
+ * [Abandoned or Stalled Pull Requests](#abandoned-or-stalled-pull-requests)
+ * [Approving a change](#approving-a-change)
+ * [Accept that there are different opinions about what belongs in Node.js](#accept-that-there-are-different-opinions-about-what-belongs-in-nodejs)
+ * [Performance is not everything](#performance-is-not-everything)
+ * [Continuous Integration Testing](#continuous-integration-testing)
+* [Additional Notes](#additional-notes)
+ * [Commit Squashing](#commit-squashing)
+ * [Getting Approvals for your Pull Request](#getting-approvals-for-your-pull-request)
+ * [CI Testing](#ci-testing)
+ * [Waiting Until the Pull Request Gets Landed](#waiting-until-the-pull-request-gets-landed)
+ * [Check Out the Collaborator's Guide](#check-out-the-collaborators-guide)
+
+## Dependencies
+
+Node.js has several bundled dependencies in the *deps/* and the *tools/*
+directories that are not part of the project proper. Changes to files in those
+directories should be sent to their respective projects. Do not send a patch to
+Node.js. We cannot accept such patches.
+
+In case of doubt, open an issue in the
+[issue tracker](https://github.com/nodejs/node/issues/) or contact one of the
+[project Collaborators](https://github.com/nodejs/node/#current-project-team-members).
+Node.js has two IRC channels:
+[#Node.js](https://webchat.freenode.net/?channels=node.js) for general help and
+questions, and
+[#Node-dev](https://webchat.freenode.net/?channels=node-dev) for development of
+Node.js core specifically.
+
+## Setting up your local environment
+
+To get started, you will need to have `git` installed locally. Depending on
+your operating system, there are also a number of other dependencies required.
+These are detailed in the [Building guide][].
+
+Once you have `git` and are sure you have all of the necessary dependencies,
+it's time to create a fork.
+
+Before getting started, it is recommended to configure `git` so that it knows
+who you are:
+
+```text
+$ git config --global user.name "J. Random User"
+$ git config --global user.email "j.random.user@example.com"
+```
+Please make sure this local email is also added to your
+[GitHub email list](https://github.com/settings/emails) so that your commits
+will be properly associated with your account and you will be promoted
+to Contributor once your first commit is landed.
+
+### Step 1: Fork
+
+Fork the project [on GitHub](https://github.com/nodejs/node) and clone your fork
+locally.
+
+```text
+$ git clone git@github.com:username/node.git
+$ cd node
+$ git remote add upstream https://github.com/nodejs/node.git
+$ git fetch upstream
+```
+
+### Step 2: Branch
+
+As a best practice to keep your development environment as organized as
+possible, create local branches to work within. These should also be created
+directly off of the `master` branch.
+
+```text
+$ git checkout -b my-branch -t upstream/master
+```
+
+## The Process of Making Changes
+
+### Step 3: Code
+
+The vast majority of Pull Requests opened against the `nodejs/node`
+repository includes changes to either the C/C++ code contained in the `src`
+directory, the JavaScript code contained in the `lib` directory, the
+documentation in `docs/api` or tests within the `test` directory.
+
+If you are modifying code, please be sure to run `make lint` from time to
+time to ensure that the changes follow the Node.js code style guide.
+
+Any documentation you write (including code comments and API documentation)
+should follow the [Style Guide](doc/STYLE_GUIDE.md). Code samples included
+in the API docs will also be checked when running `make lint` (or
+`vcbuild.bat lint` on Windows).
+
+For contributing C++ code, you may want to look at the
+[C++ Style Guide](CPP_STYLE_GUIDE.md).
+
+### Step 4: Commit
+
+It is a recommended best practice to keep your changes as logically grouped
+as possible within individual commits. There is no limit to the number of
+commits any single Pull Request may have, and many contributors find it easier
+to review changes that are split across multiple commits.
+
+```text
+$ git add my/changed/files
+$ git commit
+```
+
+Note that multiple commits often get squashed when they are landed (see the
+notes about [commit squashing](#commit-squashing)).
+
+#### Commit message guidelines
+
+A good commit message should describe what changed and why.
+
+1. The first line should:
+ - contain a short description of the change (preferably 50 characters or less,
+ and no more than 72 characters)
+ - be entirely in lowercase with the exception of proper nouns, acronyms, and
+ the words that refer to code, like function/variable names
+ - be prefixed with the name of the changed subsystem and start with an
+ imperative verb. Check the output of `git log --oneline files/you/changed` to
+ find out what subsystems your changes touch.
+
+ Examples:
+ - `net: add localAddress and localPort to Socket`
+ - `src: fix typos in node_lttng_provider.h`
+
+
+2. Keep the second line blank.
+3. Wrap all other lines at 72 columns.
+
+4. If your patch fixes an open issue, you can add a reference to it at the end
+of the log. Use the `Fixes:` prefix and the full issue URL. For other references
+use `Refs:`.
+
+ Examples:
+ - `Fixes: https://github.com/nodejs/node/issues/1337`
+ - `Refs: http://eslint.org/docs/rules/space-in-parens.html`
+ - `Refs: https://github.com/nodejs/node/pull/3615`
+
+5. If your commit introduces a breaking change (`semver-major`), it should
+contain an explanation about the reason of the breaking change, which
+situation would trigger the breaking change and what is the exact change.
+
+Breaking changes will be listed in the wiki with the aim to make upgrading
+easier. Please have a look at [Breaking Changes](https://github.com/nodejs/node/wiki/Breaking-changes-between-v4-LTS-and-v6-LTS)
+for the level of detail that's suitable.
+
+Sample complete commit message:
+
+```txt
+subsystem: explain the commit in one line
+
+Body of commit message is a few lines of text, explaining things
+in more detail, possibly giving some background about the issue
+being fixed, etc.
+
+The body of the commit message can be several paragraphs, and
+please do proper word-wrap and keep columns shorter than about
+72 characters or so. That way, `git log` will show things
+nicely even when it is indented.
+
+Fixes: https://github.com/nodejs/node/issues/1337
+Refs: http://eslint.org/docs/rules/space-in-parens.html
+```
+
+If you are new to contributing to Node.js, please try to do your best at
+conforming to these guidelines, but do not worry if you get something wrong.
+One of the existing contributors will help get things situated and the
+contributor landing the Pull Request will ensure that everything follows
+the project guidelines.
+
+See [core-validate-commit](https://github.com/evanlucas/core-validate-commit) -
+A utility that ensures commits follow the commit formatting guidelines.
+
+### Step 5: Rebase
+
+As a best practice, once you have committed your changes, it is a good idea
+to use `git rebase` (not `git merge`) to synchronize your work with the main
+repository.
+
+```text
+$ git fetch upstream
+$ git rebase upstream/master
+```
+
+This ensures that your working branch has the latest changes from `nodejs/node`
+master.
+
+### Step 6: Test
+
+Bug fixes and features should always come with tests. A
+[guide for writing tests in Node.js][] has been
+provided to make the process easier. Looking at other tests to see how they
+should be structured can also help.
+
+The `test` directory within the `nodejs/node` repository is complex and it is
+often not clear where a new test file should go. When in doubt, add new tests
+to the `test/parallel/` directory and the right location will be sorted out
+later.
+
+Before submitting your changes in a Pull Request, always run the full Node.js
+test suite. To run the tests (including code linting) on Unix / macOS:
+
+```text
+$ ./configure && make -j4 test
+```
+
+And on Windows:
+
+```text
+> vcbuild test
+```
+
+(See the [Building guide][] for more details.)
+
+Make sure the linter does not report any issues and that all tests pass. Please
+do not submit patches that fail either check.
+
+If you want to run the linter without running tests, use
+`make lint`/`vcbuild lint`. It will run both JavaScript linting and
+C++ linting.
+
+If you are updating tests and just want to run a single test to check it:
+
+```text
+$ python tools/test.py -J --mode=release parallel/test-stream2-transform
+```
+
+You can execute the entire suite of tests for a given subsystem
+by providing the name of a subsystem:
+
+```text
+$ python tools/test.py -J --mode=release child-process
+```
+
+If you want to check the other options, please refer to the help by using
+the `--help` option
+
+```text
+$ python tools/test.py --help
+```
+
+You can usually run tests directly with node:
+
+```text
+$ ./node ./test/parallel/test-stream2-transform.js
+```
+
+Remember to recompile with `make -j4` in between test runs if you change code in
+the `lib` or `src` directories.
+
+#### Test Coverage
+
+It's good practice to ensure any code you add or change is covered by tests.
+You can do so by running the test suite with coverage enabled:
+
+```text
+$ ./configure --coverage && make coverage
+```
+
+A detailed coverage report will be written to `coverage/index.html` for
+JavaScript coverage and to `coverage/cxxcoverage.html` for C++ coverage.
+
+_Note that generating a test coverage report can take several minutes._
+
+To collect coverage for a subset of tests you can set the `CI_JS_SUITES` and
+`CI_NATIVE_SUITES` variables:
+
+```text
+$ CI_JS_SUITES=child-process CI_NATIVE_SUITES= make coverage
+```
+
+The above command executes tests for the `child-process` subsystem and
+outputs the resulting coverage report.
+
+Running tests with coverage will create and modify several directories
+and files. To clean up afterwards, run:
+
+```text
+make coverage-clean
+./configure && make -j4.
+```
+
+### Step 7: Push
+
+Once you are sure your commits are ready to go, with passing tests and linting,
+begin the process of opening a Pull Request by pushing your working branch to
+your fork on GitHub.
+
+```text
+$ git push origin my-branch
+```
+
+### Step 8: Opening the Pull Request
+
+From within GitHub, opening a new Pull Request will present you with a template
+that should be filled out:
+
+```markdown
+
+
+#### Checklist
+
+
+- [ ] `make -j4 test` (UNIX), or `vcbuild test` (Windows) passes
+- [ ] tests and/or benchmarks are included
+- [ ] documentation is changed or added
+- [ ] commit message follows [commit guidelines](https://github.com/nodejs/node/blob/master/doc/guides/contributing/pull-requests.md#commit-message-guidelines)
+
+#### Affected core subsystem(s)
+
+```
+
+Please try to do your best at filling out the details, but feel free to skip
+parts if you're not sure what to put.
+
+Once opened, Pull Requests are usually reviewed within a few days.
+
+### Step 9: Discuss and update
+
+You will probably get feedback or requests for changes to your Pull Request.
+This is a big part of the submission process so don't be discouraged! Some
+contributors may sign off on the Pull Request right away, others may have
+more detailed comments or feedback. This is a necessary part of the process
+in order to evaluate whether the changes are correct and necessary.
+
+To make changes to an existing Pull Request, make the changes to your local
+branch, add a new commit with those changes, and push those to your fork.
+GitHub will automatically update the Pull Request.
+
+```text
+$ git add my/changed/files
+$ git commit
+$ git push origin my-branch
+```
+
+It is also frequently necessary to synchronize your Pull Request with other
+changes that have landed in `master` by using `git rebase`:
+
+```text
+$ git fetch --all
+$ git rebase origin/master
+$ git push --force-with-lease origin my-branch
+```
+
+**Important:** The `git push --force-with-lease` command is one of the few ways
+to delete history in `git`. Before you use it, make sure you understand the
+risks. If in doubt, you can always ask for guidance in the Pull Request or on
+[IRC in the #node-dev channel][].
+
+If you happen to make a mistake in any of your commits, do not worry. You can
+amend the last commit (for example if you want to change the commit log).
+
+```text
+$ git add any/changed/files
+$ git commit --amend
+$ git push --force-with-lease origin my-branch
+```
+
+There are a number of more advanced mechanisms for managing commits using
+`git rebase` that can be used, but are beyond the scope of this guide.
+
+Feel free to post a comment in the Pull Request to ping reviewers if you are
+awaiting an answer on something. If you encounter words or acronyms that
+seem unfamiliar, refer to this
+[glossary](https://sites.google.com/a/chromium.org/dev/glossary).
+
+#### Approval and Request Changes Workflow
+
+All Pull Requests require "sign off" in order to land. Whenever a contributor
+reviews a Pull Request they may find specific details that they would like to
+see changed or fixed. These may be as simple as fixing a typo, or may involve
+substantive changes to the code you have written. In general, such requests
+are intended to be helpful, but at times may come across as abrupt or unhelpful,
+especially requests to change things that do not include concrete suggestions
+on *how* to change them.
+
+Try not to be discouraged. If you feel that a particular review is unfair,
+say so, or contact one of the other contributors in the project and seek their
+input. Often such comments are the result of the reviewer having only taken a
+short amount of time to review and are not ill-intended. Such issues can often
+be resolved with a bit of patience. That said, reviewers should be expected to
+be helpful in their feedback, and feedback that is simply vague, dismissive and
+unhelpful is likely safe to ignore.
+
+### Step 10: Landing
+
+In order to land, a Pull Request needs to be reviewed and [approved][] by
+at least one Node.js Collaborator and pass a
+[CI (Continuous Integration) test run][]. After that, as long as there are no
+objections from other contributors, the Pull Request can be merged. If you find
+your Pull Request waiting longer than you expect, see the
+[notes about the waiting time](#waiting-until-the-pull-request-gets-landed).
+
+When a collaborator lands your Pull Request, they will post
+a comment to the Pull Request page mentioning the commit(s) it
+landed as. GitHub often shows the Pull Request as `Closed` at this
+point, but don't worry. If you look at the branch you raised your
+Pull Request against (probably `master`), you should see a commit with
+your name on it. Congratulations and thanks for your contribution!
+
+## Reviewing Pull Requests
+
+All Node.js contributors who choose to review and provide feedback on Pull
+Requests have a responsibility to both the project and the individual making the
+contribution. Reviews and feedback must be helpful, insightful, and geared
+towards improving the contribution as opposed to simply blocking it. If there
+are reasons why you feel the PR should not land, explain what those are. Do not
+expect to be able to block a Pull Request from advancing simply because you say
+"No" without giving an explanation. Be open to having your mind changed. Be open
+to working with the contributor to make the Pull Request better.
+
+Reviews that are dismissive or disrespectful of the contributor or any other
+reviewers are strictly counter to the [Code of Conduct][].
+
+When reviewing a Pull Request, the primary goals are for the codebase to improve
+and for the person submitting the request to succeed. Even if a Pull Request
+does not land, the submitters should come away from the experience feeling like
+their effort was not wasted or unappreciated. Every Pull Request from a new
+contributor is an opportunity to grow the community.
+
+### Review a bit at a time.
+
+Do not overwhelm new contributors.
+
+It is tempting to micro-optimize and make everything about relative performance,
+perfect grammar, or exact style matches. Do not succumb to that temptation.
+
+Focus first on the most significant aspects of the change:
+
+1. Does this change make sense for Node.js?
+2. Does this change make Node.js better, even if only incrementally?
+3. Are there clear bugs or larger scale issues that need attending to?
+4. Is the commit message readable and correct? If it contains a breaking change is it clear enough?
+
+When changes are necessary, *request* them, do not *demand* them, and do not
+assume that the submitter already knows how to add a test or run a benchmark.
+
+Specific performance optimization techniques, coding styles and conventions
+change over time. The first impression you give to a new contributor never does.
+
+Nits (requests for small changes that are not essential) are fine, but try to
+avoid stalling the Pull Request. Most nits can typically be fixed by the
+Node.js Collaborator landing the Pull Request but they can also be an
+opportunity for the contributor to learn a bit more about the project.
+
+It is always good to clearly indicate nits when you comment: e.g.
+`Nit: change foo() to bar(). But this is not blocking.`
+
+### Be aware of the person behind the code
+
+Be aware that *how* you communicate requests and reviews in your feedback can
+have a significant impact on the success of the Pull Request. Yes, we may land
+a particular change that makes Node.js better, but the individual might just
+not want to have anything to do with Node.js ever again. The goal is not just
+having good code.
+
+### Respect the minimum wait time for comments
+
+There is a minimum waiting time which we try to respect for non-trivial
+changes, so that people who may have important input in such a distributed
+project are able to respond.
+
+For non-trivial changes, Pull Requests must be left open for *at least* 48
+hours during the week, and 72 hours on a weekend. In most cases, when the
+PR is relatively small and focused on a narrow set of changes, these periods
+provide more than enough time to adequately review. Sometimes changes take far
+longer to review, or need more specialized review from subject matter experts.
+When in doubt, do not rush.
+
+Trivial changes, typically limited to small formatting changes or fixes to
+documentation, may be landed within the minimum 48 hour window.
+
+### Abandoned or Stalled Pull Requests
+
+If a Pull Request appears to be abandoned or stalled, it is polite to first
+check with the contributor to see if they intend to continue the work before
+checking if they would mind if you took it over (especially if it just has
+nits left). When doing so, it is courteous to give the original contributor
+credit for the work they started (either by preserving their name and email
+address in the commit log, or by using an `Author: ` meta-data tag in the
+commit.
+
+### Approving a change
+
+Any Node.js core Collaborator (any GitHub user with commit rights in the
+`nodejs/node` repository) is authorized to approve any other contributor's
+work. Collaborators are not permitted to approve their own Pull Requests.
+
+Collaborators indicate that they have reviewed and approve of the changes in
+a Pull Request either by using GitHub's Approval Workflow, which is preferred,
+or by leaving an `LGTM` ("Looks Good To Me") comment.
+
+When explicitly using the "Changes requested" component of the GitHub Approval
+Workflow, show empathy. That is, do not be rude or abrupt with your feedback
+and offer concrete suggestions for improvement, if possible. If you're not
+sure *how* a particular change can be improved, say so.
+
+Most importantly, after leaving such requests, it is courteous to make yourself
+available later to check whether your comments have been addressed.
+
+If you see that requested changes have been made, you can clear another
+collaborator's `Changes requested` review.
+
+Change requests that are vague, dismissive, or unconstructive may also be
+dismissed if requests for greater clarification go unanswered within a
+reasonable period of time.
+
+If you do not believe that the Pull Request should land at all, use
+`Changes requested` to indicate that you are considering some of your comments
+to block the PR from landing. When doing so, explain *why* you believe the
+Pull Request should not land along with an explanation of what may be an
+acceptable alternative course, if any.
+
+### Accept that there are different opinions about what belongs in Node.js
+
+Opinions on this vary, even among the members of the Technical Steering
+Committee.
+
+One general rule of thumb is that if Node.js itself needs it (due to historic
+or functional reasons), then it belongs in Node.js. For instance, `url`
+parsing is in Node.js because of HTTP protocol support.
+
+Also, functionality that either cannot be implemented outside of core in any
+reasonable way, or only with significant pain.
+
+It is not uncommon for contributors to suggest new features they feel would
+make Node.js better. These may or may not make sense to add, but as with all
+changes, be courteous in how you communicate your stance on these. Comments
+that make the contributor feel like they should have "known better" or
+ridiculed for even trying run counter to the [Code of Conduct][].
+
+### Performance is not everything
+
+Node.js has always optimized for speed of execution. If a particular change
+can be shown to make some part of Node.js faster, it's quite likely to be
+accepted. Claims that a particular Pull Request will make things faster will
+almost always be met by requests for performance [benchmark results][] that
+demonstrate the improvement.
+
+That said, performance is not the only factor to consider. Node.js also
+optimizes in favor of not breaking existing code in the ecosystem, and not
+changing working functional code just for the sake of changing.
+
+If a particular Pull Request introduces a performance or functional
+regression, rather than simply rejecting the Pull Request, take the time to
+work *with* the contributor on improving the change. Offer feedback and
+advice on what would make the Pull Request acceptable, and do not assume that
+the contributor should already know how to do that. Be explicit in your
+feedback.
+
+### Continuous Integration Testing
+
+All Pull Requests that contain changes to code must be run through
+continuous integration (CI) testing at [https://ci.nodejs.org/][].
+
+Only Node.js core Collaborators with commit rights to the `nodejs/node`
+repository may start a CI testing run. The specific details of how to do
+this are included in the new Collaborator [Onboarding guide][].
+
+Ideally, the code change will pass ("be green") on all platform configurations
+supported by Node.js (there are over 30 platform configurations currently).
+This means that all tests pass and there are no linting errors. In reality,
+however, it is not uncommon for the CI infrastructure itself to fail on
+specific platforms or for so-called "flaky" tests to fail ("be red"). It is
+vital to visually inspect the results of all failed ("red") tests to determine
+whether the failure was caused by the changes in the Pull Request.
+
+## Additional Notes
+
+### Commit Squashing
+
+When the commits in your Pull Request land, they may be squashed
+into one commit per logical change. Metadata will be added to the commit
+message (including links to the Pull Request, links to relevant issues,
+and the names of the reviewers). The commit history of your Pull Request,
+however, will stay intact on the Pull Request page.
+
+For the size of "one logical change",
+[0b5191f](https://github.com/nodejs/node/commit/0b5191f15d0f311c804d542b67e2e922d98834f8)
+can be a good example. It touches the implementation, the documentation,
+and the tests, but is still one logical change. In general, the tests should
+always pass when each individual commit lands on the master branch.
+
+### Getting Approvals for Your Pull Request
+
+A Pull Request is approved either by saying LGTM, which stands for
+"Looks Good To Me", or by using GitHub's Approve button.
+GitHub's Pull Request review feature can be used during the process.
+For more information, check out
+[the video tutorial](https://www.youtube.com/watch?v=HW0RPaJqm4g)
+or [the official documentation](https://help.github.com/articles/reviewing-changes-in-pull-requests/).
+
+After you push new changes to your branch, you need to get
+approval for these new changes again, even if GitHub shows "Approved"
+because the reviewers have hit the buttons before.
+
+### CI Testing
+
+Every Pull Request needs to be tested
+to make sure that it works on the platforms that Node.js
+supports. This is done by running the code through the CI system.
+
+Only a Collaborator can start a CI run. Usually one of them will do it
+for you as approvals for the Pull Request come in.
+If not, you can ask a Collaborator to start a CI run.
+
+### Waiting Until the Pull Request Gets Landed
+
+A Pull Request needs to stay open for at least 48 hours (72 hours on a
+weekend) from when it is submitted, even after it gets approved and
+passes the CI. This is to make sure that everyone has a chance to
+weigh in. If the changes are trivial, collaborators may decide it
+doesn't need to wait. A Pull Request may well take longer to be
+merged in. All these precautions are important because Node.js is
+widely used, so don't be discouraged!
+
+### Check Out the Collaborator's Guide
+
+If you want to know more about the code review and the landing process,
+you can take a look at the
+[collaborator's guide](https://github.com/nodejs/node/blob/master/COLLABORATOR_GUIDE.md).
+
+[approved]: #getting-approvals-for-your-pull-request
+[benchmark results]: ../writing-and-running-benchmarks.md
+[Building guide]: ../../../BUILDING.md
+[CI (Continuous Integration) test run]: #ci-testing
+[Code of Conduct]: https://github.com/nodejs/admin/blob/master/CODE_OF_CONDUCT.md
+[guide for writing tests in Node.js]: ../writing-tests.md
+[https://ci.nodejs.org/]: https://ci.nodejs.org/
+[IRC in the #node-dev channel]: https://webchat.freenode.net?channels=node-dev&uio=d4
+[Onboarding guide]: ../onboarding.md
diff --git a/doc/guides/maintaining-V8.md b/doc/guides/maintaining-V8.md
index d45fa072074aee..04bc155660105b 100644
--- a/doc/guides/maintaining-V8.md
+++ b/doc/guides/maintaining-V8.md
@@ -29,8 +29,7 @@ For example, at the time of this writing:
released as part of the Chromium **canary** builds. This branch will be
promoted to beta next when V8 5.5 ships as stable.
-All older branches are considered **abandoned**, and are not maintained by the
-V8 team.
+All older branches are abandoned and are not maintained by the V8 team.
### V8 merge process overview
@@ -144,9 +143,10 @@ includes the following branches1 :
-The versions of V8 used in Node.js v4.x and v6.x have already been abandoned by
-upstream V8. However, Node.js needs to continue supporting these branches for
-many months (Current branches) or several years (LTS branches).
+The versions of V8 used in Node.js v4.x, v6.x, and 8.x have already been
+abandoned by upstream V8. However, Node.js needs to continue supporting
+these branches for many months (Current branches) or several
+years (LTS branches).
## Maintenance Process
diff --git a/doc/guides/maintaining-the-build-files.md b/doc/guides/maintaining-the-build-files.md
index c9dd155dd80269..ca5ee090a69dcc 100644
--- a/doc/guides/maintaining-the-build-files.md
+++ b/doc/guides/maintaining-the-build-files.md
@@ -15,7 +15,7 @@ There are three main build files that may be directly run when building Node.js:
Makefile mentioned below is maintained separately by humans). For a detailed
guide on this script, see [configure](#configure).
- `vcbuild.bat`: A Windows Batch Script that locates build tools, provides a
- subset of the targets avilable in the [Makefile](#makefile), and a few targets
+ subset of the targets available in the [Makefile](#makefile), and a few targets
of its own. For a detailed guide on this script, see
[vcbuild.bat](#vcbuild.bat).
- `Makefile`: A Makefile that can be run with GNU Make. It provides a set of
diff --git a/doc/guides/using-internal-errors.md b/doc/guides/using-internal-errors.md
index 3ae11e901a62ba..90962757bb0bb2 100644
--- a/doc/guides/using-internal-errors.md
+++ b/doc/guides/using-internal-errors.md
@@ -119,8 +119,8 @@ likely be required.
### Class: errors.Error(key[, args...])
-* `key` {String} The static error identifier
-* `args...` {Any} Zero or more optional arguments
+* `key` {string} The static error identifier
+* `args...` {any} Zero or more optional arguments
```js
const errors = require('internal/errors');
@@ -139,8 +139,8 @@ The `myError` object will have a `code` property equal to the `key` and a
### Class: errors.TypeError(key[, args...])
-* `key` {String} The static error identifier
-* `args...` {Any} Zero or more optional arguments
+* `key` {string} The static error identifier
+* `args...` {any} Zero or more optional arguments
```js
const errors = require('internal/errors');
@@ -159,8 +159,8 @@ The `myError` object will have a `code` property equal to the `key` and a
### Class: errors.RangeError(key[, args...])
-* `key` {String} The static error identifier
-* `args...` {Any} Zero or more optional arguments
+* `key` {string} The static error identifier
+* `args...` {any} Zero or more optional arguments
```js
const errors = require('internal/errors');
@@ -179,8 +179,8 @@ The `myError` object will have a `code` property equal to the `key` and a
### Method: errors.message(key, args)
-* `key` {String} The static error identifier
+* `key` {string} The static error identifier
* `args` {Array} Zero or more optional arguments passed as an Array
-* Returns: {String}
+* Returns: {string}
Returns the formatted error message string for the given `key`.
diff --git a/doc/onboarding-extras.md b/doc/onboarding-extras.md
index 9b00b2cecfcbb3..4d389376c98d20 100644
--- a/doc/onboarding-extras.md
+++ b/doc/onboarding-extras.md
@@ -2,45 +2,45 @@
## Who to CC in issues
-| Subsystem | Maintainers |
-| --- | --- |
-| `benchmark/*` | @nodejs/benchmarking, @mscdex |
-| `bootstrap_node.js` | @fishrock123 |
-| `doc/*`, `*.md` | @nodejs/documentation |
-| `lib/assert` | @nodejs/testing |
-| `lib/async_hooks` | @nodejs/async\_hooks for bugs/reviews (+ @nodejs/diagnostics for API) |
-| `lib/buffer` | @nodejs/buffer |
-| `lib/child_process` | @bnoordhuis, @cjihrig |
-| `lib/cluster` | @bnoordhuis, @cjihrig, @mcollina |
-| `lib/{crypto,tls,https}` | @nodejs/crypto |
-| `lib/dgram` | @cjihrig, @mcollina |
-| `lib/domains` | @misterdjules |
-| `lib/fs`, `src/{fs,file}` | @nodejs/fs |
-| `lib/{_}http{*}` | @nodejs/http |
-| `lib/inspector.js`, `src/inspector_*` | @nodejs/v8-inspector |
-| `lib/internal/url`, `src/node_url` | @nodejs/url |
-| `lib/net` | @bnoordhuis, @indutny, @nodejs/streams |
-| `lib/repl` | @addaleax, @fishrock123 |
-| `lib/{_}stream{*}` | @nodejs/streams |
-| `lib/timers` | @fishrock123, @misterdjules |
-| `lib/util` | @bnoordhuis, @cjihrig, @evanlucas |
-| `lib/zlib` | @addaleax, @bnoordhuis, @indutny |
-| `src/async-wrap.*` | @nodejs/async\_hooks |
-| `src/node_api.*` | @nodejs/n-api |
-| `src/node_crypto.*` | @nodejs/crypto |
-| `test/*` | @nodejs/testing |
-| `tools/eslint`, `.eslintrc` | @not-an-aardvark, @silverwind, @trott |
-| build | @nodejs/build |
-| ES Modules | @bmeck, @Fishrock123, @guybedford, @MylesBorins, @targos |
-| GYP | @nodejs/gyp |
-| performance | @nodejs/performance |
-| platform specific | @nodejs/platform-{aix,arm,freebsd,macos,ppc,smartos,s390,windows} |
-| python code | @nodejs/python |
-| upgrading c-ares | @jbergstroem |
-| upgrading http-parser | @jbergstroem, @nodejs/http |
-| upgrading libuv | @saghul |
-| upgrading npm | @fishrock123, @MylesBorins |
-| upgrading V8 | @nodejs/v8, @nodejs/post-mortem |
+| Subsystem | Maintainers |
+| --- | --- |
+| `benchmark/*` | @nodejs/benchmarking, @mscdex |
+| `bootstrap_node.js` | @fishrock123 |
+| `doc/*`, `*.md` | @nodejs/documentation |
+| `lib/assert` | @nodejs/testing |
+| `lib/async_hooks` | @nodejs/async\_hooks for bugs/reviews (+ @nodejs/diagnostics for API) |
+| `lib/buffer` | @nodejs/buffer |
+| `lib/child_process` | @bnoordhuis, @cjihrig |
+| `lib/cluster` | @bnoordhuis, @cjihrig, @mcollina |
+| `lib/{crypto,tls,https}` | @nodejs/crypto |
+| `lib/dgram` | @cjihrig, @mcollina |
+| `lib/domains` | @misterdjules |
+| `lib/fs`, `src/{fs,file}` | @nodejs/fs |
+| `lib/{_}http{*}` | @nodejs/http |
+| `lib/inspector.js`, `src/inspector_*` | @nodejs/v8-inspector |
+| `lib/internal/url`, `src/node_url` | @nodejs/url |
+| `lib/net` | @bnoordhuis, @indutny, @nodejs/streams |
+| `lib/repl` | @addaleax, @fishrock123 |
+| `lib/{_}stream{*}` | @nodejs/streams |
+| `lib/timers` | @fishrock123, @misterdjules |
+| `lib/util` | @bnoordhuis, @cjihrig, @evanlucas |
+| `lib/zlib` | @addaleax, @bnoordhuis, @indutny |
+| `src/async-wrap.*` | @nodejs/async\_hooks |
+| `src/node_api.*` | @nodejs/n-api |
+| `src/node_crypto.*` | @nodejs/crypto |
+| `test/*` | @nodejs/testing |
+| `tools/node_modules/eslint`, `.eslintrc` | @not-an-aardvark, @silverwind, @trott |
+| build | @nodejs/build |
+| ES Modules | @bmeck, @Fishrock123, @guybedford, @MylesBorins, @targos |
+| GYP | @nodejs/gyp |
+| performance | @nodejs/performance |
+| platform specific | @nodejs/platform-{aix,arm,freebsd,macos,ppc,smartos,s390,windows} |
+| python code | @nodejs/python |
+| upgrading c-ares | @jbergstroem |
+| upgrading http-parser | @jbergstroem, @nodejs/http |
+| upgrading libuv | @saghul |
+| upgrading npm | @fishrock123, @MylesBorins |
+| upgrading V8 | @nodejs/v8, @nodejs/post-mortem |
When things need extra attention, are controversial, or `semver-major`:
@nodejs/tsc
diff --git a/doc/onboarding.md b/doc/onboarding.md
index e67f6638d3a0e4..296f9db7d02ef8 100644
--- a/doc/onboarding.md
+++ b/doc/onboarding.md
@@ -9,6 +9,8 @@ onboarding session.
GitHub account. Unless two-factor authentication is enabled, do not give an
account elevated privileges such as the ability to land code in the main
repository or to start continuous integration (CI) jobs.
+* Announce the accepted nomination in a TSC meeting and in the TSC
+ mailing list.
## Fifteen minutes before the onboarding session
@@ -199,12 +201,13 @@ onboarding session.
* Optionally, include your personal pronouns.
* Label your pull request with the `doc` subsystem label.
* Run CI on your PR.
-* After one or two approvals, land the PR.
+* After one or two approvals, land the PR (PRs of this type do not need to wait
+ for 48/72 hours to land).
* Be sure to add the `PR-URL: ` and appropriate `Reviewed-By:`
- metadata!
- * [`core-validate-commit`][] helps a lot with this – install and use it if you
- can!
- * [`node-core-utils`][] fetches the metadata for you.
+ metadata.
+ * [`core-validate-commit`][] automates the validation of commit messages.
+ * [`node-core-utils`][] automates the generation of metadata and the landing
+ process. See the documentation of [`git-node`][].
## Final notes
@@ -227,6 +230,7 @@ onboarding session.
[Code of Conduct]: https://github.com/nodejs/admin/blob/master/CODE_OF_CONDUCT.md
[`core-validate-commit`]: https://github.com/evanlucas/core-validate-commit
+[`git-node`]: https://github.com/nodejs/node-core-utils#git-node
[`node-core-utils`]: https://github.com/nodejs/node-core-utils
[Landing Pull Requests]: https://github.com/nodejs/node/blob/master/COLLABORATOR_GUIDE.md#landing-pull-requests
[https://github.com/nodejs/node/commit/ce986de829457c39257cd205067602e765768fb0]: https://github.com/nodejs/node/commit/ce986de829457c39257cd205067602e765768fb0
diff --git a/lib/_http_client.js b/lib/_http_client.js
index 09925b22791972..2287c2751b8ce6 100644
--- a/lib/_http_client.js
+++ b/lib/_http_client.js
@@ -70,7 +70,7 @@ function isInvalidPath(s) {
}
function validateHost(host, name) {
- if (host != null && typeof host !== 'string') {
+ if (host !== null && host !== undefined && typeof host !== 'string') {
throw new errors.TypeError('ERR_INVALID_ARG_TYPE', `options.${name}`,
['string', 'undefined', 'null'], host);
}
@@ -145,7 +145,7 @@ function ClientRequest(options, cb) {
var method = options.method;
var methodIsString = (typeof method === 'string');
- if (method != null && !methodIsString) {
+ if (method !== null && method !== undefined && !methodIsString) {
throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'method',
'string', method);
}
@@ -452,6 +452,7 @@ function socketOnData(d) {
socket.removeListener('data', socketOnData);
socket.removeListener('end', socketOnEnd);
parser.finish();
+ freeParser(parser, req, socket);
var bodyHead = d.slice(bytesParsed, d.length);
@@ -472,7 +473,6 @@ function socketOnData(d) {
// Got Upgrade header or CONNECT method, but have no handler.
socket.destroy();
}
- freeParser(parser, req, socket);
} else if (parser.incoming && parser.incoming.complete &&
// When the status code is 100 (Continue), the server will
// send a final response after this client sends a request
@@ -490,7 +490,6 @@ function parserOnIncomingClient(res, shouldKeepAlive) {
var socket = this.socket;
var req = socket._httpMessage;
-
// propagate "domain" setting...
if (req.domain && !res.domain) {
debug('setting "res.domain"');
@@ -503,29 +502,22 @@ function parserOnIncomingClient(res, shouldKeepAlive) {
// We already have a response object, this means the server
// sent a double response.
socket.destroy();
- return;
+ return 0; // No special treatment.
}
req.res = res;
// Responses to CONNECT request is handled as Upgrade.
- if (req.method === 'CONNECT') {
+ const method = req.method;
+ if (method === 'CONNECT') {
res.upgrade = true;
- return 2; // skip body, and the rest
+ return 2; // Skip body and treat as Upgrade.
}
- // Responses to HEAD requests are crazy.
- // HEAD responses aren't allowed to have an entity-body
- // but *can* have a content-length which actually corresponds
- // to the content-length of the entity-body had the request
- // been a GET.
- var isHeadResponse = req.method === 'HEAD';
- debug('AGENT isHeadResponse', isHeadResponse);
-
if (res.statusCode === 100) {
// restart the parser, as this is a continue message.
req.res = null; // Clear res so that we don't hit double-responses.
req.emit('continue');
- return true;
+ return 1; // Skip body but don't treat as Upgrade.
}
if (req.shouldKeepAlive && !shouldKeepAlive && !req.upgradeOrConnect) {
@@ -535,7 +527,6 @@ function parserOnIncomingClient(res, shouldKeepAlive) {
req.shouldKeepAlive = false;
}
-
DTRACE_HTTP_CLIENT_RESPONSE(socket, req);
LTTNG_HTTP_CLIENT_RESPONSE(socket, req);
COUNTER_HTTP_CLIENT_RESPONSE();
@@ -553,7 +544,10 @@ function parserOnIncomingClient(res, shouldKeepAlive) {
if (!handled)
res._dump();
- return isHeadResponse;
+ if (method === 'HEAD')
+ return 1; // Skip body but don't treat as Upgrade.
+
+ return 0; // No special treatment.
}
// client
@@ -579,7 +573,7 @@ function responseKeepAlive(res, req) {
socket.removeListener('error', socketErrorListener);
socket.once('error', freeSocketErrorListener);
// There are cases where _handle === null. Avoid those. Passing null to
- // nextTick() will call initTriggerId() to retrieve the id.
+ // nextTick() will call getDefaultTriggerAsyncId() to retrieve the id.
const asyncId = socket._handle ? socket._handle.getAsyncId() : null;
// Mark this socket as available, AFTER user-added end
// handlers have a chance to run.
diff --git a/lib/_http_common.js b/lib/_http_common.js
index cf37bbebe36197..b4caf5939e5afc 100644
--- a/lib/_http_common.js
+++ b/lib/_http_common.js
@@ -106,19 +106,10 @@ function parserOnHeadersComplete(versionMajor, versionMinor, headers, method,
parser.incoming.upgrade = upgrade;
- var skipBody = 0; // response to HEAD or CONNECT
+ if (upgrade)
+ return 2; // Skip body and treat as Upgrade.
- if (!upgrade) {
- // For upgraded connections and CONNECT method request, we'll emit this
- // after parser.execute so that we can capture the first part of the new
- // protocol.
- skipBody = parser.onIncoming(parser.incoming, shouldKeepAlive);
- }
-
- if (typeof skipBody !== 'number')
- return skipBody ? 1 : 0;
- else
- return skipBody;
+ return parser.onIncoming(parser.incoming, shouldKeepAlive);
}
// XXX This is a mess.
@@ -190,6 +181,7 @@ var parsers = new FreeList('parsers', 1000, function() {
return parser;
});
+function closeParserInstance(parser) { parser.close(); }
// Free the parser and also break any links that it
// might have to any other things.
@@ -212,7 +204,9 @@ function freeParser(parser, req, socket) {
parser.outgoing = null;
parser[kOnExecute] = null;
if (parsers.free(parser) === false) {
- parser.close();
+ // Make sure the parser's stack has unwound before deleting the
+ // corresponding C++ object through .close().
+ setImmediate(closeParserInstance, parser);
} else {
// Since the Parser destructor isn't going to run the destroy() callbacks
// it needs to be triggered manually.
diff --git a/lib/_http_outgoing.js b/lib/_http_outgoing.js
index 628504b4433158..8fecfc8a8db878 100644
--- a/lib/_http_outgoing.js
+++ b/lib/_http_outgoing.js
@@ -38,6 +38,8 @@ const errors = require('internal/errors');
const { CRLF, debug } = common;
const { utcDate } = internalHttp;
+const kIsCorked = Symbol('isCorked');
+
var RE_FIELDS =
/^(?:Connection|Transfer-Encoding|Content-Length|Date|Expect|Trailer|Upgrade)$/i;
var RE_CONN_VALUES = /(?:^|\W)close|upgrade(?:$|\W)/ig;
@@ -99,6 +101,7 @@ function OutgoingMessage() {
this.finished = false;
this._headerSent = false;
+ this[kIsCorked] = false;
this.socket = null;
this.connection = null;
@@ -657,9 +660,10 @@ function write_(msg, chunk, encoding, callback, fromEnd) {
// signal the user to keep writing.
if (chunk.length === 0) return true;
- if (!fromEnd && msg.connection && !msg.connection.corked) {
+ if (!fromEnd && msg.connection && !msg[kIsCorked]) {
msg.connection.cork();
- process.nextTick(connectionCorkNT, msg.connection);
+ msg[kIsCorked] = true;
+ process.nextTick(connectionCorkNT, msg, msg.connection);
}
var len, ret;
@@ -688,7 +692,8 @@ function writeAfterEndNT(err, callback) {
}
-function connectionCorkNT(conn) {
+function connectionCorkNT(msg, conn) {
+ msg[kIsCorked] = false;
conn.uncork();
}
diff --git a/lib/_http_server.js b/lib/_http_server.js
index 5857e43d79c787..c60119822a98d5 100644
--- a/lib/_http_server.js
+++ b/lib/_http_server.js
@@ -37,6 +37,10 @@ const {
} = require('_http_common');
const { OutgoingMessage } = require('_http_outgoing');
const { outHeadersKey, ondrain } = require('internal/http');
+const {
+ defaultTriggerAsyncIdScope,
+ getOrSetAsyncId
+} = require('internal/async_hooks');
const errors = require('internal/errors');
const Buffer = require('buffer').Buffer;
@@ -292,6 +296,12 @@ Server.prototype.setTimeout = function setTimeout(msecs, callback) {
function connectionListener(socket) {
+ defaultTriggerAsyncIdScope(
+ getOrSetAsyncId(socket), connectionListenerInternal, this, socket
+ );
+}
+
+function connectionListenerInternal(server, socket) {
debug('SERVER new http connection');
httpSocketSetup(socket);
@@ -299,13 +309,13 @@ function connectionListener(socket) {
// Ensure that the server property of the socket is correctly set.
// See https://github.com/nodejs/node/issues/13435
if (socket.server === null)
- socket.server = this;
+ socket.server = server;
// If the user has added a listener to the server,
// request, or response, then it's their responsibility.
// otherwise, destroy on timeout by default
- if (this.timeout && typeof socket.setTimeout === 'function')
- socket.setTimeout(this.timeout);
+ if (server.timeout && typeof socket.setTimeout === 'function')
+ socket.setTimeout(server.timeout);
socket.on('timeout', socketOnTimeout);
var parser = parsers.alloc();
@@ -315,8 +325,8 @@ function connectionListener(socket) {
parser.incoming = null;
// Propagate headers limit from server instance to parser
- if (typeof this.maxHeadersCount === 'number') {
- parser.maxHeaderPairs = this.maxHeadersCount << 1;
+ if (typeof server.maxHeadersCount === 'number') {
+ parser.maxHeaderPairs = server.maxHeadersCount << 1;
} else {
// Set default value because parser may be reused from FreeList
parser.maxHeaderPairs = 2000;
@@ -336,8 +346,8 @@ function connectionListener(socket) {
outgoingData: 0,
keepAliveTimeoutSet: false
};
- state.onData = socketOnData.bind(undefined, this, socket, parser, state);
- state.onEnd = socketOnEnd.bind(undefined, this, socket, parser, state);
+ state.onData = socketOnData.bind(undefined, server, socket, parser, state);
+ state.onEnd = socketOnEnd.bind(undefined, server, socket, parser, state);
state.onClose = socketOnClose.bind(undefined, socket, state);
state.onDrain = socketOnDrain.bind(undefined, socket, state);
socket.on('data', state.onData);
@@ -345,7 +355,7 @@ function connectionListener(socket) {
socket.on('end', state.onEnd);
socket.on('close', state.onClose);
socket.on('drain', state.onDrain);
- parser.onIncoming = parserOnIncoming.bind(undefined, this, socket, state);
+ parser.onIncoming = parserOnIncoming.bind(undefined, server, socket, state);
// We are consuming socket, so it won't get any actual data
socket.on('resume', onSocketResume);
@@ -364,7 +374,7 @@ function connectionListener(socket) {
}
}
parser[kOnExecute] =
- onParserExecute.bind(undefined, this, socket, parser, state);
+ onParserExecute.bind(undefined, server, socket, parser, state);
socket._paused = false;
}
@@ -617,7 +627,7 @@ function parserOnIncoming(server, socket, state, req, keepAlive) {
} else {
server.emit('request', req, res);
}
- return false; // Not a HEAD response. (Not even a response!)
+ return 0; // No special treatment.
}
function resetSocketTimeout(server, socket, state) {
diff --git a/lib/_stream_readable.js b/lib/_stream_readable.js
index 21598efa65f254..500071203b0f3b 100644
--- a/lib/_stream_readable.js
+++ b/lib/_stream_readable.js
@@ -652,8 +652,8 @@ Readable.prototype.pipe = function(dest, pipeOpts) {
if (((state.pipesCount === 1 && state.pipes === dest) ||
(state.pipesCount > 1 && state.pipes.indexOf(dest) !== -1)) &&
!cleanedUp) {
- debug('false write response, pause', src._readableState.awaitDrain);
- src._readableState.awaitDrain++;
+ debug('false write response, pause', state.awaitDrain);
+ state.awaitDrain++;
increasedAwaitDrain = true;
}
src.pause();
diff --git a/lib/_stream_writable.js b/lib/_stream_writable.js
index 549bff1599a911..895563bb39de25 100644
--- a/lib/_stream_writable.js
+++ b/lib/_stream_writable.js
@@ -283,7 +283,7 @@ Writable.prototype.write = function(chunk, encoding, cb) {
if (typeof cb !== 'function')
cb = nop;
- if (state.ended)
+ if (state.ending)
writeAfterEnd(this, cb);
else if (isBuf || validChunk(this, state, chunk, cb)) {
state.pendingcb++;
diff --git a/lib/async_hooks.js b/lib/async_hooks.js
index 78dff7218c13ea..340863a83c432e 100644
--- a/lib/async_hooks.js
+++ b/lib/async_hooks.js
@@ -19,8 +19,7 @@ const {
disableHooks,
// Sensitive Embedder API
newUid,
- initTriggerId,
- setInitTriggerId,
+ getDefaultTriggerAsyncId,
emitInit,
emitBefore,
emitAfter,
@@ -152,7 +151,7 @@ class AsyncResource {
if (typeof opts === 'number') {
opts = { triggerAsyncId: opts, requireManualDestroy: false };
} else if (opts.triggerAsyncId === undefined) {
- opts.triggerAsyncId = initTriggerId();
+ opts.triggerAsyncId = getDefaultTriggerAsyncId();
}
// Unlike emitInitScript, AsyncResource doesn't supports null as the
@@ -245,18 +244,11 @@ Object.defineProperty(module.exports, 'newUid', {
Object.defineProperty(module.exports, 'initTriggerId', {
get: internalUtil.deprecate(function() {
- return initTriggerId;
+ return getDefaultTriggerAsyncId;
}, 'async_hooks.initTriggerId is deprecated. ' +
'Use the AsyncResource default instead.', 'DEP0085')
});
-Object.defineProperty(module.exports, 'setInitTriggerId', {
- get: internalUtil.deprecate(function() {
- return setInitTriggerId;
- }, 'async_hooks.setInitTriggerId is deprecated. ' +
- 'Use the triggerAsyncId parameter in AsyncResource instead.', 'DEP0085')
-});
-
Object.defineProperty(module.exports, 'emitInit', {
get: internalUtil.deprecate(function() {
return emitInit;
diff --git a/lib/buffer.js b/lib/buffer.js
index 2cb1785d45bd11..91b30012ef65d5 100644
--- a/lib/buffer.js
+++ b/lib/buffer.js
@@ -1204,7 +1204,7 @@ Buffer.prototype.readInt32BE = function readInt32BE(offset, noAssert) {
//
// An all-bits-one exponent is either a positive or negative infinity, if
// the fraction is zero, or NaN when it is non-zero. The standard allows
-// both quiet and signalling NaNs but since NaN is a canonical value in
+// both quiet and signaling NaNs but since NaN is a canonical value in
// JavaScript, we cannot (and do not) distinguish between the two.
//
// Other exponents are regular numbers and are computed by subtracting the bias
diff --git a/lib/dgram.js b/lib/dgram.js
index bfd024bfee6c48..cd70dc5be91f0d 100644
--- a/lib/dgram.js
+++ b/lib/dgram.js
@@ -28,7 +28,7 @@ const dns = require('dns');
const util = require('util');
const { isUint8Array } = require('internal/util/types');
const EventEmitter = require('events');
-const { setInitTriggerId } = require('internal/async_hooks');
+const { defaultTriggerAsyncIdScope } = require('internal/async_hooks');
const { UV_UDP_REUSEADDR } = process.binding('constants').os;
const { async_id_symbol } = process.binding('async_wrap');
const { nextTick } = require('internal/process/next_tick');
@@ -448,21 +448,24 @@ Socket.prototype.send = function(buffer,
}
const afterDns = (ex, ip) => {
- doSend(ex, this, ip, list, address, port, callback);
+ defaultTriggerAsyncIdScope(
+ this[async_id_symbol],
+ doSend,
+ ex, this, ip, list, address, port, callback
+ );
};
this._handle.lookup(address, afterDns);
};
-
function doSend(ex, self, ip, list, address, port, callback) {
if (ex) {
if (typeof callback === 'function') {
- callback(ex);
+ process.nextTick(callback, ex);
return;
}
- self.emit('error', ex);
+ process.nextTick(() => self.emit('error', ex));
return;
} else if (!self._handle) {
return;
@@ -476,20 +479,18 @@ function doSend(ex, self, ip, list, address, port, callback) {
req.callback = callback;
req.oncomplete = afterSend;
}
- // node::SendWrap isn't instantiated and attached to the JS instance of
- // SendWrap above until send() is called. So don't set the init trigger id
- // until now.
- setInitTriggerId(self[async_id_symbol]);
+
var err = self._handle.send(req,
list,
list.length,
port,
ip,
!!callback);
+
if (err && callback) {
// don't emit as error, dgram_legacy.js compatibility
const ex = exceptionWithHostPort(err, 'send', address, port);
- nextTick(self[async_id_symbol], callback, ex);
+ process.nextTick(callback, ex);
}
}
diff --git a/lib/fs.js b/lib/fs.js
index d4988eb649f2e1..b2043d45f40afc 100644
--- a/lib/fs.js
+++ b/lib/fs.js
@@ -1110,20 +1110,16 @@ if (constants.O_SYMLINK !== undefined) {
};
fs.lchmodSync = function(path, mode) {
- var fd = fs.openSync(path, constants.O_WRONLY | constants.O_SYMLINK);
+ const fd = fs.openSync(path, constants.O_WRONLY | constants.O_SYMLINK);
// Prefer to return the chmod error, if one occurs,
// but still try to close, and report closing errors if they occur.
- var ret;
+ let ret;
try {
ret = fs.fchmodSync(fd, mode);
- } catch (err) {
- try {
- fs.closeSync(fd);
- } catch (ignore) {}
- throw err;
+ } finally {
+ fs.closeSync(fd);
}
- fs.closeSync(fd);
return ret;
};
}
@@ -1155,13 +1151,25 @@ if (constants.O_SYMLINK !== undefined) {
callback(err);
return;
}
- fs.fchown(fd, uid, gid, callback);
+ // Prefer to return the chown error, if one occurs,
+ // but still try to close, and report closing errors if they occur.
+ fs.fchown(fd, uid, gid, function(err) {
+ fs.close(fd, function(err2) {
+ callback(err || err2);
+ });
+ });
});
};
fs.lchownSync = function(path, uid, gid) {
- var fd = fs.openSync(path, constants.O_WRONLY | constants.O_SYMLINK);
- return fs.fchownSync(fd, uid, gid);
+ const fd = fs.openSync(path, constants.O_WRONLY | constants.O_SYMLINK);
+ let ret;
+ try {
+ ret = fs.fchownSync(fd, uid, gid);
+ } finally {
+ fs.closeSync(fd);
+ }
+ return ret;
};
}
@@ -1997,7 +2005,8 @@ function ReadStream(path, options) {
this.flags = options.flags === undefined ? 'r' : options.flags;
this.mode = options.mode === undefined ? 0o666 : options.mode;
- this.start = options.start;
+ this.start = typeof this.fd !== 'number' && options.start === undefined ?
+ 0 : options.start;
this.end = options.end;
this.autoClose = options.autoClose === undefined ? true : options.autoClose;
this.pos = undefined;
diff --git a/lib/internal/async_hooks.js b/lib/internal/async_hooks.js
index 46cc1806b94a8d..26da41cacf354a 100644
--- a/lib/internal/async_hooks.js
+++ b/lib/internal/async_hooks.js
@@ -14,20 +14,26 @@ const async_wrap = process.binding('async_wrap');
* kTriggerAsyncId: The trigger_async_id of the resource responsible for
* the current execution stack.
* kAsyncIdCounter: Incremental counter tracking the next assigned async_id.
- * kInitTriggerAsyncId: Written immediately before a resource's constructor
+ * kDefaultTriggerAsyncId: Written immediately before a resource's constructor
* that sets the value of the init()'s triggerAsyncId. The order of
* retrieving the triggerAsyncId value is passing directly to the
- * constructor -> value set in kInitTriggerAsyncId -> executionAsyncId of
+ * constructor -> value set in kDefaultTriggerAsyncId -> executionAsyncId of
* the current resource.
+ *
+ * async_ids_fast_stack is a Float64Array that contains part of the async ID
+ * stack. Each pushAsyncIds() call adds two doubles to it, and each
+ * popAsyncIds() call removes two doubles from it.
+ * It has a fixed size, so if that is exceeded, calls to the native
+ * side are used instead in pushAsyncIds() and popAsyncIds().
*/
-const { async_hook_fields, async_id_fields } = async_wrap;
+const { async_id_symbol, async_hook_fields, async_id_fields } = async_wrap;
// Store the pair executionAsyncId and triggerAsyncId in a std::stack on
// Environment::AsyncHooks::ids_stack_ tracks the resource responsible for the
// current execution stack. This is unwound as each resource exits. In the case
// of a fatal exception this stack is emptied after calling each hook's after()
// callback.
-const { pushAsyncIds, popAsyncIds } = async_wrap;
-// For performance reasons, only track Proimses when a hook is enabled.
+const { pushAsyncIds: pushAsyncIds_, popAsyncIds: popAsyncIds_ } = async_wrap;
+// For performance reasons, only track Promises when a hook is enabled.
const { enablePromiseHook, disablePromiseHook } = async_wrap;
// Properties in active_hooks are used to keep track of the set of hooks being
// executed in case another hook is enabled/disabled. The new set of hooks is
@@ -60,8 +66,8 @@ const active_hooks = {
// async execution. These are tracked so if the user didn't include callbacks
// for a given step, that step can bail out early.
const { kInit, kBefore, kAfter, kDestroy, kPromiseResolve,
- kCheck, kExecutionAsyncId, kAsyncIdCounter,
- kInitTriggerAsyncId } = async_wrap.constants;
+ kCheck, kExecutionAsyncId, kAsyncIdCounter, kTriggerAsyncId,
+ kDefaultTriggerAsyncId, kStackLength } = async_wrap.constants;
// Used in AsyncHook and AsyncResource.
const init_symbol = Symbol('init');
@@ -242,25 +248,41 @@ function newUid() {
return ++async_id_fields[kAsyncIdCounter];
}
+function getOrSetAsyncId(object) {
+ if (object.hasOwnProperty(async_id_symbol)) {
+ return object[async_id_symbol];
+ }
+
+ return object[async_id_symbol] = newUid();
+}
+
// Return the triggerAsyncId meant for the constructor calling it. It's up to
// the user to safeguard this call and make sure it's zero'd out when the
// constructor is complete.
-function initTriggerId() {
- var triggerAsyncId = async_id_fields[kInitTriggerAsyncId];
- // Reset value after it's been called so the next constructor doesn't
- // inherit it by accident.
- async_id_fields[kInitTriggerAsyncId] = 0;
- if (triggerAsyncId <= 0)
- triggerAsyncId = async_id_fields[kExecutionAsyncId];
- return triggerAsyncId;
+function getDefaultTriggerAsyncId() {
+ var defaultTriggerAsyncId = async_id_fields[kDefaultTriggerAsyncId];
+ // If defaultTriggerAsyncId isn't set, use the executionAsyncId
+ if (defaultTriggerAsyncId < 0)
+ defaultTriggerAsyncId = async_id_fields[kExecutionAsyncId];
+ return defaultTriggerAsyncId;
}
-function setInitTriggerId(triggerAsyncId) {
+function defaultTriggerAsyncIdScope(triggerAsyncId, block, ...args) {
// CHECK(Number.isSafeInteger(triggerAsyncId))
// CHECK(triggerAsyncId > 0)
- async_id_fields[kInitTriggerAsyncId] = triggerAsyncId;
+ const oldDefaultTriggerAsyncId = async_id_fields[kDefaultTriggerAsyncId];
+ async_id_fields[kDefaultTriggerAsyncId] = triggerAsyncId;
+
+ var ret;
+ try {
+ ret = Reflect.apply(block, null, args);
+ } finally {
+ async_id_fields[kDefaultTriggerAsyncId] = oldDefaultTriggerAsyncId;
+ }
+
+ return ret;
}
@@ -279,13 +301,9 @@ function emitInitScript(asyncId, type, triggerAsyncId, resource) {
return;
// This can run after the early return check b/c running this function
- // manually means that the embedder must have used initTriggerId().
+ // manually means that the embedder must have used getDefaultTriggerAsyncId().
if (triggerAsyncId === null) {
- triggerAsyncId = initTriggerId();
- } else {
- // If a triggerAsyncId was passed, any kInitTriggerAsyncId still must be
- // null'd.
- async_id_fields[kInitTriggerAsyncId] = 0;
+ triggerAsyncId = getDefaultTriggerAsyncId();
}
emitInitNative(asyncId, type, triggerAsyncId, resource);
@@ -326,6 +344,38 @@ function emitDestroyScript(asyncId) {
}
+// This is the equivalent of the native push_async_ids() call.
+function pushAsyncIds(asyncId, triggerAsyncId) {
+ const offset = async_hook_fields[kStackLength];
+ if (offset * 2 >= async_wrap.async_ids_stack.length)
+ return pushAsyncIds_(asyncId, triggerAsyncId);
+ async_wrap.async_ids_stack[offset * 2] = async_id_fields[kExecutionAsyncId];
+ async_wrap.async_ids_stack[offset * 2 + 1] = async_id_fields[kTriggerAsyncId];
+ async_hook_fields[kStackLength]++;
+ async_id_fields[kExecutionAsyncId] = asyncId;
+ async_id_fields[kTriggerAsyncId] = triggerAsyncId;
+}
+
+
+// This is the equivalent of the native pop_async_ids() call.
+function popAsyncIds(asyncId) {
+ if (async_hook_fields[kStackLength] === 0) return false;
+ const stackLength = async_hook_fields[kStackLength];
+
+ if (async_hook_fields[kCheck] > 0 &&
+ async_id_fields[kExecutionAsyncId] !== asyncId) {
+ // Do the same thing as the native code (i.e. crash hard).
+ return popAsyncIds_(asyncId);
+ }
+
+ const offset = stackLength - 1;
+ async_id_fields[kExecutionAsyncId] = async_wrap.async_ids_stack[2 * offset];
+ async_id_fields[kTriggerAsyncId] = async_wrap.async_ids_stack[2 * offset + 1];
+ async_hook_fields[kStackLength] = offset;
+ return offset > 0;
+}
+
+
module.exports = {
// Private API
getHookArrays,
@@ -337,8 +387,9 @@ module.exports = {
disableHooks,
// Sensitive Embedder API
newUid,
- initTriggerId,
- setInitTriggerId,
+ getOrSetAsyncId,
+ getDefaultTriggerAsyncId,
+ defaultTriggerAsyncIdScope,
emitInit: emitInitScript,
emitBefore: emitBeforeScript,
emitAfter: emitAfterScript,
diff --git a/lib/internal/bootstrap_node.js b/lib/internal/bootstrap_node.js
index 72101cb6408cc9..a6ac7b66afbc0b 100644
--- a/lib/internal/bootstrap_node.js
+++ b/lib/internal/bootstrap_node.js
@@ -21,9 +21,6 @@
setupProcessObject();
- internalBinding = process._internalBinding;
- delete process._internalBinding;
-
// do this good and early, since it handles errors.
setupProcessFatal();
@@ -245,6 +242,54 @@
perf.markMilestone(NODE_PERFORMANCE_MILESTONE_BOOTSTRAP_COMPLETE);
}
+ const moduleLoadList = [];
+ Object.defineProperty(process, 'moduleLoadList', {
+ value: moduleLoadList,
+ configurable: true,
+ enumerable: true,
+ writable: false
+ });
+
+ {
+ const bindingObj = Object.create(null);
+
+ const getBinding = process.binding;
+ process.binding = function binding(module) {
+ module = String(module);
+ let mod = bindingObj[module];
+ if (typeof mod !== 'object') {
+ mod = bindingObj[module] = getBinding(module);
+ moduleLoadList.push(`Binding ${module}`);
+ }
+ return mod;
+ };
+
+ const getLinkedBinding = process._linkedBinding;
+ process._linkedBinding = function _linkedBinding(module) {
+ module = String(module);
+ let mod = bindingObj[module];
+ if (typeof mod !== 'object')
+ mod = bindingObj[module] = getLinkedBinding(module);
+ return mod;
+ };
+ }
+
+ {
+ const bindingObj = Object.create(null);
+
+ const getInternalBinding = process._internalBinding;
+ delete process._internalBinding;
+
+ internalBinding = function internalBinding(module) {
+ let mod = bindingObj[module];
+ if (typeof mod !== 'object') {
+ mod = bindingObj[module] = getInternalBinding(module);
+ moduleLoadList.push(`Internal Binding ${module}`);
+ }
+ return mod;
+ };
+ }
+
function setupProcessObject() {
process._setupProcessObject(pushValueToArray);
@@ -366,16 +411,16 @@
// Arrays containing hook flags and ids for async_hook calls.
const { async_hook_fields, async_id_fields } = async_wrap;
// Internal functions needed to manipulate the stack.
- const { clearAsyncIdStack, asyncIdStackSize } = async_wrap;
+ const { clearAsyncIdStack } = async_wrap;
const { kAfter, kExecutionAsyncId,
- kInitTriggerAsyncId } = async_wrap.constants;
+ kDefaultTriggerAsyncId, kStackLength } = async_wrap.constants;
process._fatalException = function(er) {
var caught;
- // It's possible that kInitTriggerAsyncId was set for a constructor call
- // that threw and was never cleared. So clear it now.
- async_id_fields[kInitTriggerAsyncId] = 0;
+ // It's possible that kDefaultTriggerAsyncId was set for a constructor
+ // call that threw and was never cleared. So clear it now.
+ async_id_fields[kDefaultTriggerAsyncId] = -1;
if (exceptionHandlerState.captureFn !== null) {
exceptionHandlerState.captureFn(er);
@@ -406,7 +451,7 @@
do {
NativeModule.require('internal/async_hooks').emitAfter(
async_id_fields[kExecutionAsyncId]);
- } while (asyncIdStackSize() > 0);
+ } while (async_hook_fields[kStackLength] > 0);
// Or completely empty the id stack.
} else {
clearAsyncIdStack();
@@ -542,7 +587,7 @@
throw err;
}
- process.moduleLoadList.push(`NativeModule ${id}`);
+ moduleLoadList.push(`NativeModule ${id}`);
const nativeModule = new NativeModule(id);
diff --git a/lib/internal/cluster/child.js b/lib/internal/cluster/child.js
index 98c5e7b5597f74..40c1a12327558f 100644
--- a/lib/internal/cluster/child.js
+++ b/lib/internal/cluster/child.js
@@ -1,6 +1,7 @@
'use strict';
const assert = require('assert');
const util = require('util');
+const path = require('path');
const EventEmitter = require('events');
const Worker = require('internal/cluster/worker');
const { internal, sendHelper } = require('internal/cluster/utils');
@@ -48,7 +49,14 @@ cluster._setupWorker = function() {
// obj is a net#Server or a dgram#Socket object.
cluster._getServer = function(obj, options, cb) {
- const indexesKey = [options.address,
+ let address = options.address;
+
+ // Resolve unix socket paths to absolute paths
+ if (options.port < 0 && typeof address === 'string' &&
+ process.platform !== 'win32')
+ address = path.resolve(address);
+
+ const indexesKey = [address,
options.port,
options.addressType,
options.fd ].join(':');
@@ -64,6 +72,8 @@ cluster._getServer = function(obj, options, cb) {
data: null
}, options);
+ message.address = address;
+
// Set custom data on handle (i.e. tls tickets key)
if (obj._getServerData)
message.data = obj._getServerData();
diff --git a/lib/internal/cluster/master.js b/lib/internal/cluster/master.js
index 408b31c2b77805..570cf7bc6f93d4 100644
--- a/lib/internal/cluster/master.js
+++ b/lib/internal/cluster/master.js
@@ -2,6 +2,7 @@
const assert = require('assert');
const { fork } = require('child_process');
const util = require('util');
+const path = require('path');
const EventEmitter = require('events');
const RoundRobinHandle = require('internal/cluster/round_robin_handle');
const SharedHandle = require('internal/cluster/shared_handle');
@@ -125,6 +126,7 @@ function createWorkerProcess(id, env) {
}
return fork(cluster.settings.exec, cluster.settings.args, {
+ cwd: cluster.settings.cwd,
env: workerEnv,
silent: cluster.settings.silent,
windowsHide: cluster.settings.windowsHide,
@@ -276,6 +278,18 @@ function queryServer(worker, message) {
var handle = handles[key];
if (handle === undefined) {
+ let address = message.address;
+
+ // Find shortest path for unix sockets because of the ~100 byte limit
+ if (message.port < 0 && typeof address === 'string' &&
+ process.platform !== 'win32') {
+
+ address = path.relative(process.cwd(), address);
+
+ if (message.address.length < address.length)
+ address = message.address;
+ }
+
var constructor = RoundRobinHandle;
// UDP is exempt from round-robin connection balancing for what should
// be obvious reasons: it's connectionless. There is nothing to send to
@@ -287,7 +301,7 @@ function queryServer(worker, message) {
}
handles[key] = handle = new constructor(key,
- message.address,
+ address,
message.port,
message.addressType,
message.fd,
diff --git a/lib/internal/errors.js b/lib/internal/errors.js
index 807e8e76909b76..70569fd8439611 100644
--- a/lib/internal/errors.js
+++ b/lib/internal/errors.js
@@ -316,10 +316,15 @@ E('ERR_MODULE_RESOLUTION_LEGACY', '%s not found by import in %s.' +
E('ERR_MULTIPLE_CALLBACK', 'Callback called multiple times');
E('ERR_NAPI_CONS_FUNCTION', 'Constructor must be a function');
E('ERR_NAPI_CONS_PROTOTYPE_OBJECT', 'Constructor.prototype must be an object');
+E('ERR_NAPI_INVALID_DATAVIEW_ARGS',
+ 'byte_offset + byte_length should be less than or eqaul to the size in ' +
+ 'bytes of the array passed in');
+E('ERR_NAPI_INVALID_TYPEDARRAY_ALIGNMENT', 'start offset of %s should be a ' +
+ 'multiple of %s');
+E('ERR_NAPI_INVALID_TYPEDARRAY_LENGTH', 'Invalid typed array length');
E('ERR_NO_CRYPTO', 'Node.js is not compiled with OpenSSL crypto support');
E('ERR_NO_ICU', '%s is not supported on Node.js compiled without ICU');
E('ERR_NO_LONGER_SUPPORTED', '%s is no longer supported');
-E('ERR_OUTOFMEMORY', 'Out of memory');
E('ERR_OUT_OF_RANGE', 'The "%s" argument is out of range');
E('ERR_PARSE_HISTORY_DATA', 'Could not parse history data in %s');
E('ERR_REQUIRE_ESM', 'Must use import to load ES Module: %s');
diff --git a/lib/internal/http2/core.js b/lib/internal/http2/core.js
index e818085afb763a..aade7e83be3511 100644
--- a/lib/internal/http2/core.js
+++ b/lib/internal/http2/core.js
@@ -74,6 +74,7 @@ const kEncrypted = Symbol('encrypted');
const kHandle = Symbol('handle');
const kID = Symbol('id');
const kInit = Symbol('init');
+const kInfoHeaders = Symbol('sent-info-headers');
const kMaybeDestroy = Symbol('maybe-destroy');
const kLocalSettings = Symbol('local-settings');
const kOptions = Symbol('options');
@@ -82,6 +83,8 @@ const kProceed = Symbol('proceed');
const kProtocol = Symbol('protocol');
const kProxySocket = Symbol('proxy-socket');
const kRemoteSettings = Symbol('remote-settings');
+const kSentHeaders = Symbol('sent-headers');
+const kSentTrailers = Symbol('sent-trailers');
const kServer = Symbol('server');
const kSession = Symbol('session');
const kState = Symbol('state');
@@ -256,6 +259,7 @@ function onStreamTrailers() {
stream.destroy(headersList);
return [];
}
+ stream[kSentTrailers] = trailers;
return headersList;
}
@@ -273,7 +277,7 @@ function submitRstStream(code) {
// point, close them. If there is an open fd for file send, close that also.
// At this point the underlying node::http2:Http2Stream handle is no
// longer usable so destroy it also.
-function onStreamClose(code, hasData) {
+function onStreamClose(code) {
const stream = this[kOwner];
if (stream.destroyed)
return;
@@ -281,8 +285,7 @@ function onStreamClose(code, hasData) {
const state = stream[kState];
debug(`Http2Stream ${stream[kID]} [Http2Session ` +
- `${sessionName(stream[kSession][kType])}]: closed with code ${code}` +
- ` [has data? ${hasData}]`);
+ `${sessionName(stream[kSession][kType])}]: closed with code ${code}`);
if (!stream.closed) {
// Unenroll from timeouts
@@ -300,13 +303,14 @@ function onStreamClose(code, hasData) {
if (state.fd !== undefined)
tryClose(state.fd);
- stream[kMaybeDestroy](null, code, hasData);
+ stream.push(null);
+ stream[kMaybeDestroy](null, code);
}
// Receives a chunk of data for a given stream and forwards it on
// to the Http2Stream Duplex for processing.
-function onStreamRead(nread, buf, handle) {
- const stream = handle[kOwner];
+function onStreamRead(nread, buf) {
+ const stream = this[kOwner];
if (nread >= 0 && !stream.destroyed) {
debug(`Http2Stream ${stream[kID]} [Http2Session ` +
`${sessionName(stream[kSession][kType])}]: receiving data chunk ` +
@@ -314,7 +318,7 @@ function onStreamRead(nread, buf, handle) {
stream[kUpdateTimer]();
if (!stream.push(buf)) {
if (!stream.destroyed) // we have to check a second time
- handle.readStop();
+ this.readStop();
}
return;
}
@@ -1344,6 +1348,7 @@ class ClientHttp2Session extends Http2Session {
throw headersList;
const stream = new ClientHttp2Stream(this, undefined, undefined, {});
+ stream[kSentHeaders] = headers;
// Close the writable side of the stream if options.endStream is set.
if (options.endStream)
@@ -1422,13 +1427,8 @@ function streamOnResume() {
}
function streamOnPause() {
- // if (!this.destroyed && !this.pending)
- // this[kHandle].readStop();
-}
-
-function handleFlushData(self) {
if (!this.destroyed && !this.pending)
- this[kHandle].flushData();
+ this[kHandle].readStop();
}
// If the writable side of the Http2Stream is still open, emit the
@@ -1507,6 +1507,18 @@ class Http2Stream extends Duplex {
return `Http2Stream ${util.format(obj)}`;
}
+ get sentHeaders() {
+ return this[kSentHeaders];
+ }
+
+ get sentTrailers() {
+ return this[kSentTrailers];
+ }
+
+ get sentInfoHeaders() {
+ return this[kInfoHeaders];
+ }
+
get pending() {
return this[kID] === undefined;
}
@@ -1549,7 +1561,7 @@ class Http2Stream extends Duplex {
return !!(this[kState].flags & STREAM_FLAGS_HEADERS_SENT);
}
- // true if the Http2Stream was aborted abornomally.
+ // true if the Http2Stream was aborted abnormally.
get aborted() {
return !!(this[kState].flags & STREAM_FLAGS_ABORTED);
}
@@ -1662,11 +1674,10 @@ class Http2Stream extends Duplex {
this.push(null);
return;
}
- const flushfn = handleFlushData.bind(this);
if (!this.pending) {
- flushfn();
+ streamOnResume.call(this);
} else {
- this.once('ready', flushfn);
+ this.once('ready', streamOnResume);
}
}
@@ -1805,10 +1816,10 @@ class Http2Stream extends Duplex {
// The Http2Stream can be destroyed if it has closed and if the readable
// side has received the final chunk.
- [kMaybeDestroy](error, code = NGHTTP2_NO_ERROR, hasData = true) {
+ [kMaybeDestroy](error, code = NGHTTP2_NO_ERROR) {
if (error == null) {
if (code === NGHTTP2_NO_ERROR &&
- ((!this._readableState.ended && hasData) ||
+ (!this._readableState.ended ||
!this._writableState.ended ||
this._writableState.pendingcb > 0 ||
!this.closed)) {
@@ -1846,6 +1857,7 @@ function processRespondWithFD(self, fd, headers, offset = 0, length = -1,
state.flags |= STREAM_FLAGS_HEADERS_SENT;
const headersList = mapToHeaders(headers, assertValidPseudoHeaderResponse);
+ self[kSentHeaders] = headers;
if (!Array.isArray(headersList)) {
self.destroy(headersList);
return;
@@ -2076,6 +2088,7 @@ class ServerHttp2Stream extends Http2Stream {
const id = ret.id();
const stream = new ServerHttp2Stream(session, ret, id, options, headers);
+ stream[kSentHeaders] = headers;
if (options.endStream)
stream.end();
@@ -2135,6 +2148,7 @@ class ServerHttp2Stream extends Http2Stream {
const headersList = mapToHeaders(headers, assertValidPseudoHeaderResponse);
if (!Array.isArray(headersList))
throw headersList;
+ this[kSentHeaders] = headers;
state.flags |= STREAM_FLAGS_HEADERS_SENT;
@@ -2320,6 +2334,10 @@ class ServerHttp2Stream extends Http2Stream {
const headersList = mapToHeaders(headers, assertValidPseudoHeaderResponse);
if (!Array.isArray(headersList))
throw headersList;
+ if (!this[kInfoHeaders])
+ this[kInfoHeaders] = [headers];
+ else
+ this[kInfoHeaders].push(headers);
const ret = this[kHandle].info(headersList);
if (ret < 0)
@@ -2380,7 +2398,7 @@ Object.defineProperty(Http2Session.prototype, 'setTimeout', setTimeout);
// When the socket emits an error, destroy the associated Http2Session and
-// foward it the same error.
+// forward it the same error.
function socketOnError(error) {
const session = this[kSession];
if (session !== undefined) {
diff --git a/lib/internal/process.js b/lib/internal/process.js
index e58b83d21631ff..757c8de8e685f1 100644
--- a/lib/internal/process.js
+++ b/lib/internal/process.js
@@ -178,24 +178,23 @@ function setupKillAndExit() {
function setupSignalHandlers() {
- // Load events module in order to access prototype elements on process like
- // process.addListener.
- const signalWraps = {};
+ const signalWraps = Object.create(null);
+ let Signal;
function isSignal(event) {
return typeof event === 'string' && constants[event] !== undefined;
}
// Detect presence of a listener for the special signal types
- process.on('newListener', function(type, listener) {
- if (isSignal(type) &&
- !signalWraps.hasOwnProperty(type)) {
- const Signal = process.binding('signal_wrap').Signal;
+ process.on('newListener', function(type) {
+ if (isSignal(type) && signalWraps[type] === undefined) {
+ if (Signal === undefined)
+ Signal = process.binding('signal_wrap').Signal;
const wrap = new Signal();
wrap.unref();
- wrap.onsignal = function() { process.emit(type, type); };
+ wrap.onsignal = process.emit.bind(process, type, type);
const signum = constants[type];
const err = wrap.start(signum);
@@ -208,8 +207,8 @@ function setupSignalHandlers() {
}
});
- process.on('removeListener', function(type, listener) {
- if (signalWraps.hasOwnProperty(type) && this.listenerCount(type) === 0) {
+ process.on('removeListener', function(type) {
+ if (signalWraps[type] !== undefined && this.listenerCount(type) === 0) {
signalWraps[type].close();
delete signalWraps[type];
}
@@ -227,14 +226,7 @@ function setupChannel() {
// Make sure it's not accidentally inherited by child processes.
delete process.env.NODE_CHANNEL_FD;
- const cp = require('child_process');
-
- // Load tcp_wrap to avoid situation where we might immediately receive
- // a message.
- // FIXME is this really necessary?
- process.binding('tcp_wrap');
-
- cp._forkChild(fd);
+ require('child_process')._forkChild(fd);
assert(process.send);
}
}
diff --git a/lib/internal/process/next_tick.js b/lib/internal/process/next_tick.js
index 260aa70b431b86..bf7d0bc94dc4ce 100644
--- a/lib/internal/process/next_tick.js
+++ b/lib/internal/process/next_tick.js
@@ -48,7 +48,7 @@ function setupNextTick() {
const promises = require('internal/process/promises');
const errors = require('internal/errors');
const emitPendingUnhandledRejections = promises.setup(scheduleMicrotasks);
- const initTriggerId = async_hooks.initTriggerId;
+ const getDefaultTriggerAsyncId = async_hooks.getDefaultTriggerAsyncId;
// Two arrays that share state between C++ and JS.
const { async_hook_fields, async_id_fields } = async_wrap;
// Used to change the state of the async id stack.
@@ -210,7 +210,7 @@ function setupNextTick() {
nextTickQueue.push(new TickObject(callback,
args,
++async_id_fields[kAsyncIdCounter],
- initTriggerId()));
+ getDefaultTriggerAsyncId()));
}
// `internalNextTick()` will not enqueue any callback when the process is
@@ -237,7 +237,7 @@ function setupNextTick() {
}
if (triggerAsyncId === null)
- triggerAsyncId = initTriggerId();
+ triggerAsyncId = getDefaultTriggerAsyncId();
// In V8 6.2, moving tickInfo & async_id_fields[kAsyncIdCounter] into the
// TickObject incurs a significant performance penalty in the
// next-tick-breadth-args benchmark (revisit later)
diff --git a/lib/internal/streams/BufferList.js b/lib/internal/streams/BufferList.js
index 23d5a8a2db0eb7..b2daf82e74190b 100644
--- a/lib/internal/streams/BufferList.js
+++ b/lib/internal/streams/BufferList.js
@@ -61,8 +61,6 @@ module.exports = class BufferList {
concat(n) {
if (this.length === 0)
return Buffer.alloc(0);
- if (this.length === 1)
- return this.head.data;
const ret = Buffer.allocUnsafe(n >>> 0);
var p = this.head;
var i = 0;
diff --git a/lib/internal/trace_events_async_hooks.js b/lib/internal/trace_events_async_hooks.js
index 6d996b083fb49b..704da98e14412b 100644
--- a/lib/internal/trace_events_async_hooks.js
+++ b/lib/internal/trace_events_async_hooks.js
@@ -4,7 +4,7 @@ const trace_events = process.binding('trace_events');
const async_wrap = process.binding('async_wrap');
const async_hooks = require('async_hooks');
-// Use small letters such that chrome://traceing groups by the name.
+// Use small letters such that chrome://tracing groups by the name.
// The behavior is not only useful but the same as the events emitted using
// the specific C++ macros.
const BEFORE_EVENT = 'b'.charCodeAt(0);
diff --git a/lib/internal/url.js b/lib/internal/url.js
index caa4c3d0283a3e..b395e77b046f10 100644
--- a/lib/internal/url.js
+++ b/lib/internal/url.js
@@ -6,7 +6,7 @@ const {
isHexTable
} = require('internal/querystring');
-const { getConstructorOf } = require('internal/util');
+const { getConstructorOf, removeColors } = require('internal/util');
const errors = require('internal/errors');
const querystring = require('querystring');
@@ -181,9 +181,8 @@ class URLSearchParams {
for (var i = 0; i < list.length; i += 2)
output.push(`${innerInspect(list[i])} => ${innerInspect(list[i + 1])}`);
- var colorRe = /\u001b\[\d\d?m/g;
var length = output.reduce(
- (prev, cur) => prev + cur.replace(colorRe, '').length + separator.length,
+ (prev, cur) => prev + removeColors(cur).length + separator.length,
-separator.length
);
if (length > ctx.breakLength) {
diff --git a/lib/internal/util.js b/lib/internal/util.js
index 9f32785fbbfe53..e4d184e117175e 100644
--- a/lib/internal/util.js
+++ b/lib/internal/util.js
@@ -12,6 +12,12 @@ const noCrypto = !process.versions.openssl;
const experimentalWarnings = new Set();
+const colorRegExp = /\u001b\[\d\d?m/g;
+
+function removeColors(str) {
+ return str.replace(colorRegExp, '');
+}
+
function isError(e) {
return objectToString(e) === '[object Error]' || e instanceof Error;
}
@@ -297,6 +303,7 @@ module.exports = {
objectToString,
promisify,
spliceOne,
+ removeColors,
// Symbol used to customize promisify conversion
customPromisifyArgs: kCustomPromisifyArgsSymbol,
diff --git a/lib/internal/wrap_js_stream.js b/lib/internal/wrap_js_stream.js
index 611095655b5a65..1c494e57e1f9dd 100644
--- a/lib/internal/wrap_js_stream.js
+++ b/lib/internal/wrap_js_stream.js
@@ -8,6 +8,15 @@ const uv = process.binding('uv');
const debug = util.debuglog('stream_wrap');
const errors = require('internal/errors');
+const kCurrentWriteRequest = Symbol('kCurrentWriteRequest');
+const kCurrentShutdownRequest = Symbol('kCurrentShutdownRequest');
+
+function isClosing() { return this.owner.isClosing(); }
+function onreadstart() { return this.owner.readStart(); }
+function onreadstop() { return this.owner.readStop(); }
+function onshutdown(req) { return this.owner.doShutdown(req); }
+function onwrite(req, bufs) { return this.owner.doWrite(req, bufs); }
+
/* This class serves as a wrapper for when the C++ side of Node wants access
* to a standard JS stream. For example, TLS or HTTP do not operate on network
* resources conceptually, although that is the common case and what we are
@@ -27,12 +36,13 @@ class JSStreamWrap extends Socket {
debug('close');
this.doClose(cb);
};
- handle.isAlive = () => this.isAlive();
- handle.isClosing = () => this.isClosing();
- handle.onreadstart = () => this.readStart();
- handle.onreadstop = () => this.readStop();
- handle.onshutdown = (req) => this.doShutdown(req);
- handle.onwrite = (req, bufs) => this.doWrite(req, bufs);
+ // Inside of the following functions, `this` refers to the handle
+ // and `this.owner` refers to this JSStreamWrap instance.
+ handle.isClosing = isClosing;
+ handle.onreadstart = onreadstart;
+ handle.onreadstop = onreadstop;
+ handle.onshutdown = onshutdown;
+ handle.onwrite = onwrite;
stream.pause();
stream.on('error', (err) => this.emit('error', err));
@@ -60,7 +70,10 @@ class JSStreamWrap extends Socket {
super({ handle, manualStart: true });
this.stream = stream;
- this._list = null;
+ this[kCurrentWriteRequest] = null;
+ this[kCurrentShutdownRequest] = null;
+
+ // Start reading.
this.read(0);
}
@@ -69,10 +82,6 @@ class JSStreamWrap extends Socket {
return JSStreamWrap;
}
- isAlive() {
- return true;
- }
-
isClosing() {
return !this.readable || !this.writable;
}
@@ -88,33 +97,56 @@ class JSStreamWrap extends Socket {
}
doShutdown(req) {
+ assert.strictEqual(this[kCurrentShutdownRequest], null);
+ this[kCurrentShutdownRequest] = req;
+
+ // TODO(addaleax): It might be nice if we could get into a state where
+ // DoShutdown() is not called on streams while a write is still pending.
+ //
+ // Currently, the only part of the code base where that happens is the
+ // TLS implementation, which calls both DoWrite() and DoShutdown() on the
+ // underlying network stream inside of its own DoShutdown() method.
+ // Working around that on the native side is not quite trivial (yet?),
+ // so for now that is supported here.
+
+ if (this[kCurrentWriteRequest] !== null)
+ return this.on('drain', () => this.doShutdown(req));
+ assert.strictEqual(this[kCurrentWriteRequest], null);
+
const handle = this._handle;
- const item = this._enqueue('shutdown', req);
this.stream.end(() => {
// Ensure that write was dispatched
setImmediate(() => {
- if (!this._dequeue(item))
- return;
-
- handle.finishShutdown(req, 0);
+ this.finishShutdown(handle, 0);
});
});
return 0;
}
+ // handle === this._handle except when called from doClose().
+ finishShutdown(handle, errCode) {
+ // The shutdown request might already have been cancelled.
+ if (this[kCurrentShutdownRequest] === null)
+ return;
+ const req = this[kCurrentShutdownRequest];
+ this[kCurrentShutdownRequest] = null;
+ handle.finishShutdown(req, errCode);
+ }
+
doWrite(req, bufs) {
- const self = this;
- const handle = this._handle;
+ assert.strictEqual(this[kCurrentWriteRequest], null);
+ assert.strictEqual(this[kCurrentShutdownRequest], null);
+ this[kCurrentWriteRequest] = req;
- var pending = bufs.length;
+ const handle = this._handle;
+ const self = this;
- // Queue the request to be able to cancel it
- const item = this._enqueue('write', req);
+ let pending = bufs.length;
this.stream.cork();
- for (var n = 0; n < bufs.length; n++)
- this.stream.write(bufs[n], done);
+ for (var i = 0; i < bufs.length; ++i)
+ this.stream.write(bufs[i], done);
this.stream.uncork();
function done(err) {
@@ -126,93 +158,42 @@ class JSStreamWrap extends Socket {
let errCode = 0;
if (err) {
- const code = uv[`UV_${err.code}`];
- errCode = (err.code && code) ? code : uv.UV_EPIPE;
+ errCode = uv[`UV_${err.code}`] || uv.UV_EPIPE;
}
// Ensure that write was dispatched
- setImmediate(function() {
- // Do not invoke callback twice
- if (!self._dequeue(item))
- return;
-
- handle.finishWrite(req, errCode);
+ setImmediate(() => {
+ self.finishWrite(handle, errCode);
});
}
return 0;
}
- _enqueue(type, req) {
- const item = new QueueItem(type, req);
- if (this._list === null) {
- this._list = item;
- return item;
- }
-
- item.next = this._list.next;
- item.prev = this._list;
- item.next.prev = item;
- item.prev.next = item;
-
- return item;
- }
-
- _dequeue(item) {
- assert(item instanceof QueueItem);
-
- var next = item.next;
- var prev = item.prev;
-
- if (next === null && prev === null)
- return false;
-
- item.next = null;
- item.prev = null;
-
- if (next === item) {
- prev = null;
- next = null;
- } else {
- prev.next = next;
- next.prev = prev;
- }
-
- if (this._list === item)
- this._list = next;
+ // handle === this._handle except when called from doClose().
+ finishWrite(handle, errCode) {
+ // The write request might already have been cancelled.
+ if (this[kCurrentWriteRequest] === null)
+ return;
+ const req = this[kCurrentWriteRequest];
+ this[kCurrentWriteRequest] = null;
- return true;
+ handle.finishWrite(req, errCode);
}
doClose(cb) {
const handle = this._handle;
setImmediate(() => {
- while (this._list !== null) {
- const item = this._list;
- const req = item.req;
- this._dequeue(item);
-
- const errCode = uv.UV_ECANCELED;
- if (item.type === 'write') {
- handle.finishWrite(req, errCode);
- } else if (item.type === 'shutdown') {
- handle.finishShutdown(req, errCode);
- }
- }
-
// Should be already set by net.js
assert.strictEqual(this._handle, null);
+
+ this.finishWrite(handle, uv.UV_ECANCELED);
+ this.finishShutdown(handle, uv.UV_ECANCELED);
+
cb();
});
}
}
-function QueueItem(type, req) {
- this.type = type;
- this.req = req;
- this.prev = this;
- this.next = this;
-}
-
module.exports = JSStreamWrap;
diff --git a/lib/net.js b/lib/net.js
index 540de753641e88..886365092c05a6 100644
--- a/lib/net.js
+++ b/lib/net.js
@@ -43,7 +43,7 @@ const { TCPConnectWrap } = process.binding('tcp_wrap');
const { PipeConnectWrap } = process.binding('pipe_wrap');
const { ShutdownWrap, WriteWrap } = process.binding('stream_wrap');
const { async_id_symbol } = process.binding('async_wrap');
-const { newUid, setInitTriggerId } = require('internal/async_hooks');
+const { newUid, defaultTriggerAsyncIdScope } = require('internal/async_hooks');
const { nextTick } = require('internal/process/next_tick');
const errors = require('internal/errors');
const dns = require('dns');
@@ -274,6 +274,14 @@ Socket.prototype._unrefTimer = function _unrefTimer() {
timers._unrefActive(s);
};
+
+function shutdownSocket(self, callback) {
+ var req = new ShutdownWrap();
+ req.oncomplete = callback;
+ req.handle = self._handle;
+ return self._handle.shutdown(req);
+}
+
// the user has called .end(), and all the bytes have been
// sent out to the other side.
function onSocketFinish() {
@@ -295,14 +303,9 @@ function onSocketFinish() {
if (!this._handle || !this._handle.shutdown)
return this.destroy();
- var req = new ShutdownWrap();
- req.oncomplete = afterShutdown;
- req.handle = this._handle;
- // node::ShutdownWrap isn't instantiated and attached to the JS instance of
- // ShutdownWrap above until shutdown() is called. So don't set the init
- // trigger id until now.
- setInitTriggerId(this[async_id_symbol]);
- var err = this._handle.shutdown(req);
+ var err = defaultTriggerAsyncIdScope(
+ this[async_id_symbol], shutdownSocket, this, afterShutdown
+ );
if (err)
return this.destroy(errnoException(err, 'shutdown'));
@@ -936,23 +939,15 @@ function internalConnect(
req.localAddress = localAddress;
req.localPort = localPort;
- // node::TCPConnectWrap isn't instantiated and attached to the JS instance
- // of TCPConnectWrap above until connect() is called. So don't set the init
- // trigger id until now.
- setInitTriggerId(self[async_id_symbol]);
if (addressType === 4)
err = self._handle.connect(req, address, port);
else
err = self._handle.connect6(req, address, port);
-
} else {
const req = new PipeConnectWrap();
req.address = address;
req.oncomplete = afterConnect;
- // node::PipeConnectWrap isn't instantiated and attached to the JS instance
- // of PipeConnectWrap above until connect() is called. So don't set the
- // init trigger id until now.
- setInitTriggerId(self[async_id_symbol]);
+
err = self._handle.connect(req, address, afterConnect);
}
@@ -1021,7 +1016,9 @@ Socket.prototype.connect = function(...args) {
'string',
path);
}
- internalConnect(this, path);
+ defaultTriggerAsyncIdScope(
+ this[async_id_symbol], internalConnect, this, path
+ );
} else {
lookupAndConnect(this, options);
}
@@ -1064,7 +1061,11 @@ function lookupAndConnect(self, options) {
if (addressType) {
nextTick(self[async_id_symbol], function() {
if (self.connecting)
- internalConnect(self, host, port, addressType, localAddress, localPort);
+ defaultTriggerAsyncIdScope(
+ self[async_id_symbol],
+ internalConnect,
+ self, host, port, addressType, localAddress, localPort
+ );
});
return;
}
@@ -1091,33 +1092,33 @@ function lookupAndConnect(self, options) {
debug('connect: dns options', dnsopts);
self._host = host;
var lookup = options.lookup || dns.lookup;
- setInitTriggerId(self[async_id_symbol]);
- lookup(host, dnsopts, function emitLookup(err, ip, addressType) {
- self.emit('lookup', err, ip, addressType, host);
+ defaultTriggerAsyncIdScope(self[async_id_symbol], function() {
+ lookup(host, dnsopts, function emitLookup(err, ip, addressType) {
+ self.emit('lookup', err, ip, addressType, host);
- // It's possible we were destroyed while looking this up.
- // XXX it would be great if we could cancel the promise returned by
- // the look up.
- if (!self.connecting) return;
+ // It's possible we were destroyed while looking this up.
+ // XXX it would be great if we could cancel the promise returned by
+ // the look up.
+ if (!self.connecting) return;
- if (err) {
- // net.createConnection() creates a net.Socket object and
- // immediately calls net.Socket.connect() on it (that's us).
- // There are no event listeners registered yet so defer the
- // error event to the next tick.
- err.host = options.host;
- err.port = options.port;
- err.message = err.message + ' ' + options.host + ':' + options.port;
- process.nextTick(connectErrorNT, self, err);
- } else {
- self._unrefTimer();
- internalConnect(self,
- ip,
- port,
- addressType,
- localAddress,
- localPort);
- }
+ if (err) {
+ // net.createConnection() creates a net.Socket object and
+ // immediately calls net.Socket.connect() on it (that's us).
+ // There are no event listeners registered yet so defer the
+ // error event to the next tick.
+ err.host = options.host;
+ err.port = options.port;
+ err.message = err.message + ' ' + options.host + ':' + options.port;
+ process.nextTick(connectErrorNT, self, err);
+ } else {
+ self._unrefTimer();
+ defaultTriggerAsyncIdScope(
+ self[async_id_symbol],
+ internalConnect,
+ self, ip, port, addressType, localAddress, localPort
+ );
+ }
+ });
});
}
diff --git a/lib/path.js b/lib/path.js
index e9f76bce9973c5..eca4fcb9d21718 100644
--- a/lib/path.js
+++ b/lib/path.js
@@ -51,19 +51,14 @@ function normalizeStringWin32(path, allowAboveRoot) {
res.charCodeAt(res.length - 1) !== 46/*.*/ ||
res.charCodeAt(res.length - 2) !== 46/*.*/) {
if (res.length > 2) {
- const start = res.length - 1;
- var j = start;
- for (; j >= 0; --j) {
- if (res.charCodeAt(j) === 92/*\*/)
- break;
- }
- if (j !== start) {
- if (j === -1) {
+ const lastSlashIndex = res.lastIndexOf('\\');
+ if (lastSlashIndex !== res.length - 1) {
+ if (lastSlashIndex === -1) {
res = '';
lastSegmentLength = 0;
} else {
- res = res.slice(0, j);
- lastSegmentLength = j;
+ res = res.slice(0, lastSlashIndex);
+ lastSegmentLength = res.length - 1 - res.lastIndexOf('\\');
}
lastSlash = i;
dots = 0;
@@ -124,19 +119,14 @@ function normalizeStringPosix(path, allowAboveRoot) {
res.charCodeAt(res.length - 1) !== 46/*.*/ ||
res.charCodeAt(res.length - 2) !== 46/*.*/) {
if (res.length > 2) {
- const start = res.length - 1;
- var j = start;
- for (; j >= 0; --j) {
- if (res.charCodeAt(j) === 47/*/*/)
- break;
- }
- if (j !== start) {
- if (j === -1) {
+ const lastSlashIndex = res.lastIndexOf('/');
+ if (lastSlashIndex !== res.length - 1) {
+ if (lastSlashIndex === -1) {
res = '';
lastSegmentLength = 0;
} else {
- res = res.slice(0, j);
- lastSegmentLength = j;
+ res = res.slice(0, lastSlashIndex);
+ lastSegmentLength = res.length - 1 - res.lastIndexOf('/');
}
lastSlash = i;
dots = 0;
diff --git a/lib/perf_hooks.js b/lib/perf_hooks.js
index 4a05f7ccba7bc8..15256a63c0b97c 100644
--- a/lib/perf_hooks.js
+++ b/lib/perf_hooks.js
@@ -471,6 +471,10 @@ class Performance extends PerformanceObserverEntryList {
this[kClearEntry]('function', name);
}
+ clearEntries(name) {
+ this[kClearEntry](name);
+ }
+
timerify(fn) {
if (typeof fn !== 'function') {
const errors = lazyErrors();
diff --git a/lib/readline.js b/lib/readline.js
index 9b9a054fd66fb7..bca9c25ab2348b 100644
--- a/lib/readline.js
+++ b/lib/readline.js
@@ -760,7 +760,8 @@ Interface.prototype._ttyWrite = function(s, key) {
key = key || {};
this._previousKey = key;
- // Ignore escape key - Fixes #2876
+ // Ignore escape key, fixes
+ // https://github.com/nodejs/node-v0.x-archive/issues/2876.
if (key.name === 'escape') return;
if (key.ctrl && key.shift) {
diff --git a/lib/timers.js b/lib/timers.js
index 1560215fe0ad5d..d637ca91ae48e9 100644
--- a/lib/timers.js
+++ b/lib/timers.js
@@ -34,7 +34,7 @@ const kOnTimeout = TimerWrap.kOnTimeout | 0;
// Two arrays that share state between C++ and JS.
const { async_hook_fields, async_id_fields } = async_wrap;
const {
- initTriggerId,
+ getDefaultTriggerAsyncId,
// The needed emit*() functions.
emitInit,
emitBefore,
@@ -181,7 +181,7 @@ function insert(item, unrefed) {
if (!item[async_id_symbol] || item._destroyed) {
item._destroyed = false;
item[async_id_symbol] = ++async_id_fields[kAsyncIdCounter];
- item[trigger_async_id_symbol] = initTriggerId();
+ item[trigger_async_id_symbol] = getDefaultTriggerAsyncId();
if (async_hook_fields[kInit] > 0) {
emitInit(item[async_id_symbol],
'Timeout',
@@ -207,11 +207,11 @@ function TimersList(msecs, unrefed) {
if (unrefed === true)
timer.unref();
timer.start(msecs);
-
- timer[kOnTimeout] = listOnTimeout;
}
-function listOnTimeout() {
+// adds listOnTimeout to the C++ object prototype, as
+// V8 would not inline it otherwise.
+TimerWrap.prototype[kOnTimeout] = function listOnTimeout() {
var list = this._list;
var msecs = list.msecs;
@@ -280,7 +280,7 @@ function listOnTimeout() {
return;
this.close();
-}
+};
// An optimization so that the try/finally only de-optimizes (since at least v8
@@ -552,7 +552,7 @@ function Timeout(callback, after, args, isRepeat) {
this._destroyed = false;
this[async_id_symbol] = ++async_id_fields[kAsyncIdCounter];
- this[trigger_async_id_symbol] = initTriggerId();
+ this[trigger_async_id_symbol] = getDefaultTriggerAsyncId();
if (async_hook_fields[kInit] > 0) {
emitInit(this[async_id_symbol],
'Timeout',
@@ -769,7 +769,7 @@ function Immediate(callback, args) {
this._destroyed = false;
this[async_id_symbol] = ++async_id_fields[kAsyncIdCounter];
- this[trigger_async_id_symbol] = initTriggerId();
+ this[trigger_async_id_symbol] = getDefaultTriggerAsyncId();
if (async_hook_fields[kInit] > 0) {
emitInit(this[async_id_symbol],
'Immediate',
diff --git a/lib/util.js b/lib/util.js
index 27a9d983c6cc47..68ef730759bb06 100644
--- a/lib/util.js
+++ b/lib/util.js
@@ -58,7 +58,8 @@ const {
getConstructorOf,
isError,
promisify,
- join
+ join,
+ removeColors
} = require('internal/util');
const inspectDefaultOptions = Object.seal({
@@ -84,7 +85,6 @@ const strEscapeSequencesRegExp = /[\x00-\x1f\x27\x5c]/;
const strEscapeSequencesReplacer = /[\x00-\x1f\x27\x5c]/g;
/* eslint-enable */
const keyStrRegExp = /^[a-zA-Z_][a-zA-Z_0-9]*$/;
-const colorRegExp = /\u001b\[\d\d?m/g;
const numberRegExp = /^(0|[1-9][0-9]*)$/;
// Escaped special characters. Use empty strings to fill up unused entries.
@@ -843,7 +843,7 @@ function reduceToSingleString(ctx, output, base, braces, addLn) {
var length = 0;
for (var i = 0; i < output.length && length <= breakLength; i++) {
if (ctx.colors) {
- length += output[i].replace(colorRegExp, '').length + 1;
+ length += removeColors(output[i]).length + 1;
} else {
length += output[i].length + 1;
}
diff --git a/node.gyp b/node.gyp
index 46796d01fd800f..ca5eb730129224 100644
--- a/node.gyp
+++ b/node.gyp
@@ -22,6 +22,8 @@
'node_v8_options%': '',
'node_enable_v8_vtunejit%': 'false',
'node_core_target_name%': 'node',
+ 'node_lib_target_name%': 'node_lib',
+ 'node_intermediate_lib_type%': 'static_library',
'library_files': [
'lib/internal/bootstrap_node.js',
'lib/async_hooks.js',
@@ -156,6 +158,17 @@
'conditions': [
[ 'node_shared=="true"', {
'node_target_type%': 'shared_library',
+ 'conditions': [
+ ['OS=="aix"', {
+ # For AIX, always generate static library first,
+ # It needs an extra step to generate exp and
+ # then use both static lib and exp to create
+ # shared lib.
+ 'node_intermediate_lib_type': 'static_library',
+ }, {
+ 'node_intermediate_lib_type': 'shared_library',
+ }],
+ ],
}, {
'node_target_type%': 'executable',
}],
@@ -172,7 +185,81 @@
'targets': [
{
'target_name': '<(node_core_target_name)',
- 'type': '<(node_target_type)',
+ 'type': 'executable',
+ 'sources': [
+ 'src/node_main.cc'
+ ],
+ 'include_dirs': [
+ 'src',
+ 'deps/v8/include',
+ ],
+ 'conditions': [
+ [ 'node_intermediate_lib_type=="static_library" and '
+ 'node_shared=="true" and OS=="aix"', {
+ # For AIX, shared lib is linked by static lib and .exp. In the
+ # case here, the executable needs to link to shared lib.
+ # Therefore, use 'node_aix_shared' target to generate the
+ # shared lib and then executable.
+ 'dependencies': [ 'node_aix_shared' ],
+ }, {
+ 'dependencies': [ '<(node_lib_target_name)' ],
+ }],
+ [ 'node_intermediate_lib_type=="static_library" and '
+ 'node_shared=="false"', {
+ 'includes': [
+ 'node.gypi'
+ ],
+ 'xcode_settings': {
+ 'OTHER_LDFLAGS': [
+ '-Wl,-force_load,<(PRODUCT_DIR)/<(STATIC_LIB_PREFIX)'
+ '<(node_core_target_name)<(STATIC_LIB_SUFFIX)',
+ ],
+ },
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'AdditionalOptions': [
+ '/WHOLEARCHIVE:<(PRODUCT_DIR)\\lib\\'
+ '<(node_core_target_name)<(STATIC_LIB_SUFFIX)',
+ ],
+ },
+ },
+ 'conditions': [
+ ['OS in "linux freebsd openbsd solaris android"', {
+ 'ldflags': [
+ '-Wl,--whole-archive,<(OBJ_DIR)/<(STATIC_LIB_PREFIX)'
+ '<(node_core_target_name)<(STATIC_LIB_SUFFIX)',
+ '-Wl,--no-whole-archive',
+ ],
+ }],
+ [ 'OS=="win"', {
+ 'sources': [ 'src/res/node.rc' ],
+ 'conditions': [
+ [ 'node_use_etw=="true"', {
+ 'sources': [
+ 'tools/msvs/genfiles/node_etw_provider.rc'
+ ],
+ }],
+ [ 'node_use_perfctr=="true"', {
+ 'sources': [
+ 'tools/msvs/genfiles/node_perfctr_provider.rc',
+ ],
+ }]
+ ],
+ }],
+ ],
+ }],
+ [ 'node_intermediate_lib_type=="shared_library" and OS=="win"', {
+ # On Windows, having the same name for both executable and shared
+ # lib causes filename collision. Need a different PRODUCT_NAME for
+ # the executable and rename it back to node.exe later
+ 'product_name': '<(node_core_target_name)-win',
+ }],
+ ],
+ },
+ {
+ 'target_name': '<(node_lib_target_name)',
+ 'type': '<(node_intermediate_lib_type)',
+ 'product_name': '<(node_core_target_name)',
'dependencies': [
'node_js2c#host',
@@ -184,7 +271,6 @@
'include_dirs': [
'src',
- 'tools/msvs/genfiles',
'<(SHARED_INTERMEDIATE_DIR)' # for node_natives.h
],
@@ -210,7 +296,6 @@
'src/node_file.cc',
'src/node_http2.cc',
'src/node_http_parser.cc',
- 'src/node_main.cc',
'src/node_os.cc',
'src/node_platform.cc',
'src/node_perf.cc',
@@ -314,6 +399,9 @@
[ 'node_shared=="true" and node_module_version!="" and OS!="win"', {
'product_extension': '<(shlib_suffix)',
}],
+ ['node_shared=="true" and OS=="aix"', {
+ 'product_name': 'node_base',
+ }],
[ 'v8_enable_inspector==1', {
'defines': [
'HAVE_INSPECTOR=1',
@@ -344,7 +432,7 @@
'src/backtrace_win32.cc',
],
'conditions': [
- [ 'node_target_type!="static_library"', {
+ [ 'node_intermediate_lib_type!="static_library"', {
'sources': [
'src/res/node.rc',
],
@@ -357,6 +445,10 @@
'FD_SETSIZE=1024',
# we need to use node's preferred "win32" rather than gyp's preferred "win"
'NODE_PLATFORM="win32"',
+ # Stop from defining macros that conflict with
+ # std::min() and std::max(). We don't use (much)
+ # but we still inherit it from uv.h.
+ 'NOMINMAX',
'_UNICODE=1',
],
'libraries': [ '-lpsapi.lib' ]
@@ -364,6 +456,64 @@
'defines': [ '__POSIX__' ],
'sources': [ 'src/backtrace_posix.cc' ],
}],
+ [ 'node_use_etw=="true"', {
+ 'defines': [ 'HAVE_ETW=1' ],
+ 'dependencies': [ 'node_etw' ],
+ 'include_dirs': [
+ 'src',
+ 'tools/msvs/genfiles',
+ '<(SHARED_INTERMEDIATE_DIR)' # for node_natives.h
+ ],
+ 'sources': [
+ 'src/node_win32_etw_provider.h',
+ 'src/node_win32_etw_provider-inl.h',
+ 'src/node_win32_etw_provider.cc',
+ 'src/node_dtrace.cc',
+ 'tools/msvs/genfiles/node_etw_provider.h',
+ ],
+ 'conditions': [
+ ['node_intermediate_lib_type != "static_library"', {
+ 'sources': [
+ 'tools/msvs/genfiles/node_etw_provider.rc',
+ ],
+ }],
+ ],
+ }],
+ [ 'node_use_perfctr=="true"', {
+ 'defines': [ 'HAVE_PERFCTR=1' ],
+ 'dependencies': [ 'node_perfctr' ],
+ 'include_dirs': [
+ 'src',
+ 'tools/msvs/genfiles',
+ '<(SHARED_INTERMEDIATE_DIR)' # for node_natives.h
+ ],
+ 'sources': [
+ 'src/node_win32_perfctr_provider.h',
+ 'src/node_win32_perfctr_provider.cc',
+ 'src/node_counters.cc',
+ 'src/node_counters.h',
+ ],
+ 'conditions': [
+ ['node_intermediate_lib_type != "static_library"', {
+ 'sources': [
+ 'tools/msvs/genfiles/node_perfctr_provider.rc',
+ ],
+ }],
+ ],
+ }],
+ [ 'node_use_lttng=="true"', {
+ 'defines': [ 'HAVE_LTTNG=1' ],
+ 'include_dirs': [ '<(SHARED_INTERMEDIATE_DIR)' ],
+ 'libraries': [ '-llttng-ust' ],
+ 'include_dirs': [
+ 'src',
+ 'tools/msvs/genfiles',
+ '<(SHARED_INTERMEDIATE_DIR)' # for node_natives.h
+ ],
+ 'sources': [
+ 'src/node_lttng.cc'
+ ],
+ }],
[ 'node_use_dtrace=="true"', {
'defines': [ 'HAVE_DTRACE=1' ],
'dependencies': [
@@ -404,7 +554,6 @@
] ]
} ],
[ 'node_use_openssl=="true"', {
- 'defines': [ 'HAVE_OPENSSL=1' ],
'sources': [
'src/node_crypto.cc',
'src/node_crypto_bio.cc',
@@ -415,49 +564,6 @@
'src/tls_wrap.cc',
'src/tls_wrap.h'
],
- 'conditions': [
- ['openssl_fips != ""', {
- 'defines': [ 'NODE_FIPS_MODE' ],
- }],
- [ 'node_shared_openssl=="false"', {
- 'dependencies': [
- './deps/openssl/openssl.gyp:openssl',
-
- # For tests
- './deps/openssl/openssl.gyp:openssl-cli',
- ],
- 'conditions': [
- # -force_load or --whole-archive are not applicable for
- # the static library
- [ 'node_target_type!="static_library"', {
- 'xcode_settings': {
- 'OTHER_LDFLAGS': [
- '-Wl,-force_load,<(PRODUCT_DIR)/<(OPENSSL_PRODUCT)',
- ],
- },
- 'conditions': [
- ['OS in "linux freebsd" and node_shared=="false"', {
- 'ldflags': [
- '-Wl,--whole-archive,'
- '<(OBJ_DIR)/deps/openssl/'
- '<(OPENSSL_PRODUCT)',
- '-Wl,--no-whole-archive',
- ],
- }],
- # openssl.def is based on zlib.def, zlib symbols
- # are always exported.
- ['use_openssl_def==1', {
- 'sources': ['<(SHARED_INTERMEDIATE_DIR)/openssl.def'],
- }],
- ['OS=="win" and use_openssl_def==0', {
- 'sources': ['deps/zlib/win32/zlib.def'],
- }],
- ],
- }],
- ],
- }]]
- }, {
- 'defines': [ 'HAVE_OPENSSL=0' ]
}],
],
},
@@ -515,7 +621,7 @@
'target_name': 'node_etw',
'type': 'none',
'conditions': [
- [ 'node_use_etw=="true" and node_target_type!="static_library"', {
+ [ 'node_use_etw=="true"', {
'actions': [
{
'action_name': 'node_etw',
@@ -536,7 +642,7 @@
'target_name': 'node_perfctr',
'type': 'none',
'conditions': [
- [ 'node_use_perfctr=="true" and node_target_type!="static_library"', {
+ [ 'node_use_perfctr=="true"', {
'actions': [
{
'action_name': 'node_perfctr_man',
@@ -598,15 +704,13 @@
'<(SHARED_INTERMEDIATE_DIR)/node_javascript.cc',
],
'conditions': [
- [ 'node_use_dtrace=="false" and node_use_etw=="false" or '
- 'node_target_type=="static_library"', {
+ [ 'node_use_dtrace=="false" and node_use_etw=="false"', {
'inputs': [ 'src/notrace_macros.py' ]
}],
- ['node_use_lttng=="false" or node_target_type=="static_library"', {
+ [ 'node_use_lttng=="false"', {
'inputs': [ 'src/nolttng_macros.py' ]
}],
- [ 'node_use_perfctr=="false" or '
- 'node_target_type=="static_library"', {
+ [ 'node_use_perfctr=="false"', {
'inputs': [ 'src/noperfctr_macros.py' ]
}]
],
@@ -656,10 +760,10 @@
{
'action_name': 'node_dtrace_provider_o',
'inputs': [
- '<(OBJ_DIR)/node/src/node_dtrace.o',
+ '<(OBJ_DIR)/<(node_lib_target_name)/src/node_dtrace.o',
],
'outputs': [
- '<(OBJ_DIR)/node/src/node_dtrace_provider.o'
+ '<(OBJ_DIR)/<(node_lib_target_name)/src/node_dtrace_provider.o'
],
'action': [ 'dtrace', '-G', '-xnolibs', '-s', 'src/node_provider.d',
'<@(_inputs)', '-o', '<@(_outputs)' ]
@@ -709,7 +813,7 @@
'<(SHARED_INTERMEDIATE_DIR)/v8constants.h'
],
'outputs': [
- '<(OBJ_DIR)/node/src/node_dtrace_ustack.o'
+ '<(OBJ_DIR)/<(node_lib_target_name)/src/node_dtrace_ustack.o'
],
'conditions': [
[ 'target_arch=="ia32" or target_arch=="arm"', {
@@ -756,12 +860,41 @@
} ],
]
},
+ {
+ # When using shared lib to build executable in Windows, in order to avoid
+ # filename collision, the executable name is node-win.exe. Need to rename
+ # it back to node.exe
+ 'target_name': 'rename_node_bin_win',
+ 'type': 'none',
+ 'dependencies': [
+ '<(node_core_target_name)',
+ ],
+ 'conditions': [
+ [ 'OS=="win" and node_intermediate_lib_type=="shared_library"', {
+ 'actions': [
+ {
+ 'action_name': 'rename_node_bin_win',
+ 'inputs': [
+ '<(PRODUCT_DIR)/<(node_core_target_name)-win.exe'
+ ],
+ 'outputs': [
+ '<(PRODUCT_DIR)/<(node_core_target_name).exe',
+ ],
+ 'action': [
+ 'mv', '<@(_inputs)', '<@(_outputs)',
+ ],
+ },
+ ],
+ } ],
+ ]
+ },
{
'target_name': 'cctest',
'type': 'executable',
'dependencies': [
'<(node_core_target_name)',
+ 'rename_node_bin_win',
'deps/gtest/gtest.gyp:gtest',
'node_js2c#host',
'node_dtrace_header',
@@ -770,9 +903,9 @@
],
'variables': {
- 'OBJ_PATH': '<(OBJ_DIR)/node/src',
- 'OBJ_GEN_PATH': '<(OBJ_DIR)/node/gen',
- 'OBJ_TRACING_PATH': '<(OBJ_DIR)/node/src/tracing',
+ 'OBJ_PATH': '<(OBJ_DIR)/<(node_lib_target_name)/src',
+ 'OBJ_GEN_PATH': '<(OBJ_DIR)/<(node_lib_target_name)/gen',
+ 'OBJ_TRACING_PATH': '<(OBJ_DIR)/<(node_lib_target_name)/src/tracing',
'OBJ_SUFFIX': 'o',
'OBJ_SEPARATOR': '/',
'conditions': [
@@ -783,18 +916,19 @@
'OBJ_PATH': '<(OBJ_DIR)/src',
'OBJ_GEN_PATH': '<(OBJ_DIR)/gen',
'OBJ_TRACING_PATH': '<(OBJ_DIR)/src/tracing',
- 'OBJ_SEPARATOR': '/node.',
+ 'OBJ_SEPARATOR': '/<(node_lib_target_name).',
}, {
'conditions': [
['OS=="win"', {
- 'OBJ_PATH': '<(OBJ_DIR)/node',
- 'OBJ_GEN_PATH': '<(OBJ_DIR)/node',
- 'OBJ_TRACING_PATH': '<(OBJ_DIR)/node',
+ 'OBJ_PATH': '<(OBJ_DIR)/<(node_lib_target_name)',
+ 'OBJ_GEN_PATH': '<(OBJ_DIR)/<(node_lib_target_name)',
+ 'OBJ_TRACING_PATH': '<(OBJ_DIR)/<(node_lib_target_name)',
}],
['OS=="aix"', {
- 'OBJ_PATH': '<(OBJ_DIR)/node_base/src',
- 'OBJ_GEN_PATH': '<(OBJ_DIR)/node_base/gen',
- 'OBJ_TRACING_PATH': '<(OBJ_DIR)/node_base/src/tracing',
+ 'OBJ_PATH': '<(OBJ_DIR)/<(node_lib_target_name)/src',
+ 'OBJ_GEN_PATH': '<(OBJ_DIR)/<(node_lib_target_name)/gen',
+ 'OBJ_TRACING_PATH':
+ '<(OBJ_DIR)/<(node_lib_target_name)/src/tracing',
}],
]}
]
@@ -826,62 +960,73 @@
'test/cctest/test_url.cc'
],
- 'sources!': [
- 'src/node_main.cc'
+ 'libraries': [
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)async_wrap.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)env.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node_buffer.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node_debug_options.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node_i18n.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node_perf.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node_platform.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node_url.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)util.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)string_bytes.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)string_search.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)stream_base.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node_constants.<(OBJ_SUFFIX)',
+ '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)agent.<(OBJ_SUFFIX)',
+ '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)node_trace_buffer.<(OBJ_SUFFIX)',
+ '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)node_trace_writer.<(OBJ_SUFFIX)',
+ '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)trace_event.<(OBJ_SUFFIX)',
+ '<(OBJ_GEN_PATH)<(OBJ_SEPARATOR)node_javascript.<(OBJ_SUFFIX)',
],
'conditions': [
- ['node_target_type!="static_library"', {
- 'libraries': [
- '<(OBJ_PATH)<(OBJ_SEPARATOR)async_wrap.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)env.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)node.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)node_buffer.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)node_debug_options.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)node_i18n.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)node_perf.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)node_platform.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)node_url.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)util.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)string_bytes.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)string_search.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)stream_base.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)node_constants.<(OBJ_SUFFIX)',
- '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)agent.<(OBJ_SUFFIX)',
- '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)node_trace_buffer.<(OBJ_SUFFIX)',
- '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)node_trace_writer.<(OBJ_SUFFIX)',
- '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)trace_event.<(OBJ_SUFFIX)',
- '<(OBJ_GEN_PATH)<(OBJ_SEPARATOR)node_javascript.<(OBJ_SUFFIX)',
- ],
- }],
[ 'node_use_openssl=="true"', {
- 'libraries': [
- '<(OBJ_PATH)<(OBJ_SEPARATOR)node_crypto.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)node_crypto_bio.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)node_crypto_clienthello.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)tls_wrap.<(OBJ_SUFFIX)',
+ 'conditions': [
+ ['node_target_type!="static_library"', {
+ 'libraries': [
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node_crypto.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node_crypto_bio.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node_crypto_clienthello.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)tls_wrap.<(OBJ_SUFFIX)',
+ ],
+ }],
],
'defines': [
'HAVE_OPENSSL=1',
],
}],
+ [ 'node_use_perfctr=="true"', {
+ 'defines': [ 'HAVE_PERFCTR=1' ],
+ 'libraries': [
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node_counters.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)'
+ 'node_win32_perfctr_provider.<(OBJ_SUFFIX)',
+ ],
+ }],
['v8_enable_inspector==1', {
'sources': [
'test/cctest/test_inspector_socket.cc',
'test/cctest/test_inspector_socket_server.cc'
],
- 'libraries': [
- '<(OBJ_PATH)<(OBJ_SEPARATOR)inspector_agent.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)inspector_io.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)inspector_js_api.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)inspector_socket.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)inspector_socket_server.<(OBJ_SUFFIX)',
+ 'conditions': [
+ ['node_target_type!="static_library"', {
+ 'libraries': [
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)inspector_agent.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)inspector_io.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)inspector_js_api.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)inspector_socket.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)inspector_socket_server.<(OBJ_SUFFIX)',
+ ],
+ }],
],
'defines': [
'HAVE_INSPECTOR=1',
],
}],
- [ 'node_use_dtrace=="true"', {
+ [ 'node_use_dtrace=="true" and node_target_type!="static_library"', {
'libraries': [
'<(OBJ_PATH)<(OBJ_SEPARATOR)node_dtrace.<(OBJ_SUFFIX)',
],
@@ -894,143 +1039,56 @@
}],
['OS=="linux"', {
'libraries': [
- '<(SHARED_INTERMEDIATE_DIR)/node_dtrace_provider.o',
+ '<(SHARED_INTERMEDIATE_DIR)<(OBJ_SEPARATOR)'
+ 'node_dtrace_provider.<(OBJ_SUFFIX)',
]
}],
],
- }],
- [ 'OS=="win"', {
- 'libraries': [
- '<(OBJ_PATH)<(OBJ_SEPARATOR)backtrace_win32.<(OBJ_SUFFIX)',
- ],
}, {
- 'libraries': [
- '<(OBJ_PATH)<(OBJ_SEPARATOR)backtrace_posix.<(OBJ_SUFFIX)',
- ],
- }],
- [ 'node_shared_zlib=="false"', {
- 'dependencies': [
- 'deps/zlib/zlib.gyp:zlib',
- ]
- }],
- [ 'node_shared_openssl=="false" and node_shared=="false"', {
- 'dependencies': [
- 'deps/openssl/openssl.gyp:openssl'
- ]
- }],
- [ 'node_shared_http_parser=="false"', {
- 'dependencies': [
- 'deps/http_parser/http_parser.gyp:http_parser'
- ]
- }],
- [ 'node_shared_libuv=="false"', {
- 'dependencies': [
- 'deps/uv/uv.gyp:libuv'
+ 'conditions': [
+ [ 'node_use_etw=="true" and OS=="win"', {
+ 'libraries': [
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node_dtrace.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)'
+ 'node_win32_etw_provider.<(OBJ_SUFFIX)',
+ ],
+ }]
]
}],
- [ 'node_shared_nghttp2=="false"', {
- 'dependencies': [
- 'deps/nghttp2/nghttp2.gyp:nghttp2'
+ [ 'OS=="win" and node_target_type!="static_library"', {
+ 'libraries': [
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)backtrace_win32.<(OBJ_SUFFIX)',
],
- 'include_dirs': [
- 'deps/nghttp2/lib/includes'
- ]
- }],
- [ 'node_use_v8_platform=="true"', {
- 'dependencies': [
- 'deps/v8/src/v8.gyp:v8_libplatform',
+ }, {
+ 'conditions': [
+ ['node_target_type!="static_library"', {
+ 'libraries': [
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)backtrace_posix.<(OBJ_SUFFIX)',
+ ],
+ }],
],
}],
['OS=="solaris"', {
'ldflags': [ '-I<(SHARED_INTERMEDIATE_DIR)' ]
}],
- [ 'node_use_openssl=="true"', {
- 'conditions': [
- [ 'node_shared_openssl=="false"', {
- 'conditions': [
- # -force_load or --whole-archive are not applicable for
- # the static library
- [ 'node_target_type!="static_library"', {
- 'xcode_settings': {
- 'OTHER_LDFLAGS': [
- '-Wl,-force_load,<(PRODUCT_DIR)/<(OPENSSL_PRODUCT)',
- ],
- },
- 'conditions': [
- ['OS in "linux freebsd" and node_shared=="false"', {
- 'ldflags': [
- '-Wl,--whole-archive,'
- '<(OBJ_DIR)/deps/openssl/'
- '<(OPENSSL_PRODUCT)',
- '-Wl,--no-whole-archive',
- ],
- }],
- ],
- }],
- ],
- }]]
- }],
]
}
], # end targets
'conditions': [
- [ 'node_target_type=="static_library"', {
+ [ 'OS=="aix" and node_shared=="true"', {
'targets': [
{
- 'target_name': 'static_node',
- 'type': 'executable',
+ 'target_name': 'node_aix_shared',
+ 'type': 'shared_library',
'product_name': '<(node_core_target_name)',
- 'dependencies': [
- '<(node_core_target_name)',
- ],
- 'sources+': [
- 'src/node_main.cc',
- ],
- 'include_dirs': [
- 'deps/v8/include',
- ],
- 'xcode_settings': {
- 'OTHER_LDFLAGS': [
- '-Wl,-force_load,<(PRODUCT_DIR)/<(STATIC_LIB_PREFIX)'
- '<(node_core_target_name)<(STATIC_LIB_SUFFIX)',
- ],
- },
- 'msvs_settings': {
- 'VCLinkerTool': {
- 'AdditionalOptions': [
- '/WHOLEARCHIVE:<(PRODUCT_DIR)/lib/'
- '<(node_core_target_name)<(STATIC_LIB_SUFFIX)',
- ],
- },
- },
- 'conditions': [
- ['OS in "linux freebsd openbsd solaris android"', {
- 'ldflags': [
- '-Wl,--whole-archive,<(OBJ_DIR)/<(STATIC_LIB_PREFIX)'
- '<(node_core_target_name)<(STATIC_LIB_SUFFIX)',
- '-Wl,--no-whole-archive',
- ],
- }],
- ],
- },
- ],
- }],
- ['OS=="aix"', {
- 'targets': [
- {
- 'target_name': 'node',
+ 'ldflags': [ '--shared' ],
+ 'product_extension': '<(shlib_suffix)',
'conditions': [
- ['node_shared=="true"', {
- 'type': 'shared_library',
- 'ldflags': ['--shared'],
- 'product_extension': '<(shlib_suffix)',
- }, {
- 'type': 'executable',
- }],
['target_arch=="ppc64"', {
'ldflags': [
- '-Wl,-blibpath:/usr/lib:/lib:/opt/freeware/lib/pthread/ppc64'
+ '-Wl,-blibpath:/usr/lib:/lib:'
+ '/opt/freeware/lib/pthread/ppc64'
],
}],
['target_arch=="ppc"', {
@@ -1039,45 +1097,20 @@
],
}]
],
- 'dependencies': ['<(node_core_target_name)', 'node_exp'],
-
+ 'includes': [
+ 'node.gypi'
+ ],
+ 'dependencies': [ '<(node_lib_target_name)' ],
'include_dirs': [
'src',
'deps/v8/include',
],
-
'sources': [
- 'src/node_main.cc',
'<@(library_files)',
- # node.gyp is added to the project by default.
'common.gypi',
],
-
- 'ldflags': ['-Wl,-bE:<(PRODUCT_DIR)/node.exp', '-Wl,-brtl'],
},
- {
- 'target_name': 'node_exp',
- 'type': 'none',
- 'dependencies': [
- '<(node_core_target_name)',
- ],
- 'actions': [
- {
- 'action_name': 'expfile',
- 'inputs': [
- '<(OBJ_DIR)'
- ],
- 'outputs': [
- '<(PRODUCT_DIR)/node.exp'
- ],
- 'action': [
- 'sh', 'tools/create_expfile.sh',
- '<@(_inputs)', '<@(_outputs)'
- ],
- }
- ]
- }
- ], # end targets
+ ]
}], # end aix section
], # end conditions block
}
diff --git a/node.gypi b/node.gypi
index 3990c59ef98851..386601906fbe4a 100644
--- a/node.gypi
+++ b/node.gypi
@@ -1,4 +1,29 @@
{
+ # 'force_load' means to include the static libs into the shared lib or
+ # executable. Therefore, it is enabled when building:
+ # 1. The executable and it uses static lib (cctest and node)
+ # 2. The shared lib
+ # Linker optimizes out functions that are not used. When force_load=true,
+ # --whole-archive,force_load and /WHOLEARCHIVE are used to include
+ # all obj files in static libs into the executable or shared lib.
+ 'variables': {
+ 'variables': {
+ 'variables': {
+ 'force_load%': 'true',
+ 'current_type%': '<(_type)',
+ },
+ 'force_load%': '<(force_load)',
+ 'conditions': [
+ ['current_type=="static_library"', {
+ 'force_load': 'false',
+ }],
+ [ 'current_type=="executable" and node_target_type=="shared_library"', {
+ 'force_load': 'false',
+ }]
+ ],
+ },
+ 'force_load%': '<(force_load)',
+ },
'conditions': [
[ 'node_shared=="false"', {
'msvs_settings': {
@@ -36,12 +61,6 @@
[ 'node_v8_options!=""', {
'defines': [ 'NODE_V8_OPTIONS="<(node_v8_options)"'],
}],
- # No node_main.cc for anything except executable
- [ 'node_target_type!="executable"', {
- 'sources!': [
- 'src/node_main.cc',
- ],
- }],
[ 'node_release_urlbase!=""', {
'defines': [
'NODE_RELEASE_URLBASE="<(node_release_urlbase)"',
@@ -70,37 +89,6 @@
'deps/v8/src/third_party/vtune/v8vtune.gyp:v8_vtune'
],
}],
- [ 'node_use_lttng=="true"', {
- 'defines': [ 'HAVE_LTTNG=1' ],
- 'include_dirs': [ '<(SHARED_INTERMEDIATE_DIR)' ],
- 'libraries': [ '-llttng-ust' ],
- 'sources': [
- 'src/node_lttng.cc'
- ],
- } ],
- [ 'node_use_etw=="true" and node_target_type!="static_library"', {
- 'defines': [ 'HAVE_ETW=1' ],
- 'dependencies': [ 'node_etw' ],
- 'sources': [
- 'src/node_win32_etw_provider.h',
- 'src/node_win32_etw_provider-inl.h',
- 'src/node_win32_etw_provider.cc',
- 'src/node_dtrace.cc',
- 'tools/msvs/genfiles/node_etw_provider.h',
- 'tools/msvs/genfiles/node_etw_provider.rc',
- ]
- } ],
- [ 'node_use_perfctr=="true" and node_target_type!="static_library"', {
- 'defines': [ 'HAVE_PERFCTR=1' ],
- 'dependencies': [ 'node_perfctr' ],
- 'sources': [
- 'src/node_win32_perfctr_provider.h',
- 'src/node_win32_perfctr_provider.cc',
- 'src/node_counters.cc',
- 'src/node_counters.h',
- 'tools/msvs/genfiles/node_perfctr_provider.rc',
- ]
- } ],
[ 'node_no_browser_globals=="true"', {
'defines': [ 'NODE_NO_BROWSER_GLOBALS' ],
} ],
@@ -108,7 +96,7 @@
'dependencies': [ 'deps/v8/src/v8.gyp:postmortem-metadata' ],
'conditions': [
# -force_load is not applicable for the static library
- [ 'node_target_type!="static_library"', {
+ [ 'force_load=="true"', {
'xcode_settings': {
'OTHER_LDFLAGS': [
'-Wl,-force_load,<(V8_BASE)',
@@ -159,6 +147,27 @@
'defines': [
'_LINUX_SOURCE_COMPAT',
],
+ 'conditions': [
+ [ 'force_load=="true"', {
+
+ 'actions': [
+ {
+ 'action_name': 'expfile',
+ 'inputs': [
+ '<(OBJ_DIR)'
+ ],
+ 'outputs': [
+ '<(PRODUCT_DIR)/node.exp'
+ ],
+ 'action': [
+ 'sh', 'tools/create_expfile.sh',
+ '<@(_inputs)', '<@(_outputs)'
+ ],
+ }
+ ],
+ 'ldflags': ['-Wl,-bE:<(PRODUCT_DIR)/node.exp', '-Wl,-brtl'],
+ }],
+ ],
}],
[ 'OS=="solaris"', {
'libraries': [
@@ -174,12 +183,14 @@
'NODE_PLATFORM="sunos"',
],
}],
- [ '(OS=="freebsd" or OS=="linux") and node_shared=="false" and coverage=="false"', {
+ [ '(OS=="freebsd" or OS=="linux") and node_shared=="false"'
+ ' and coverage=="false" and force_load=="true"', {
'ldflags': [ '-Wl,-z,noexecstack',
'-Wl,--whole-archive <(V8_BASE)',
'-Wl,--no-whole-archive' ]
}],
- [ '(OS=="freebsd" or OS=="linux") and node_shared=="false" and coverage=="true"', {
+ [ '(OS=="freebsd" or OS=="linux") and node_shared=="false"'
+ ' and coverage=="true" and force_load=="true"', {
'ldflags': [ '-Wl,-z,noexecstack',
'-Wl,--whole-archive <(V8_BASE)',
'-Wl,--no-whole-archive',
@@ -206,5 +217,54 @@
[ 'OS=="sunos"', {
'ldflags': [ '-Wl,-M,/usr/lib/ld/map.noexstk' ],
}],
+
+ [ 'node_use_openssl=="true"', {
+ 'defines': [ 'HAVE_OPENSSL=1' ],
+ 'conditions': [
+ ['openssl_fips != ""', {
+ 'defines': [ 'NODE_FIPS_MODE' ],
+ }],
+ [ 'node_shared_openssl=="false"', {
+ 'dependencies': [
+ './deps/openssl/openssl.gyp:openssl',
+
+ # For tests
+ './deps/openssl/openssl.gyp:openssl-cli',
+ ],
+ 'conditions': [
+ # -force_load or --whole-archive are not applicable for
+ # the static library
+ [ 'force_load=="true"', {
+ 'xcode_settings': {
+ 'OTHER_LDFLAGS': [
+ '-Wl,-force_load,<(PRODUCT_DIR)/<(OPENSSL_PRODUCT)',
+ ],
+ },
+ 'conditions': [
+ ['OS in "linux freebsd" and node_shared=="false"', {
+ 'ldflags': [
+ '-Wl,--whole-archive,'
+ '<(OBJ_DIR)/deps/openssl/'
+ '<(OPENSSL_PRODUCT)',
+ '-Wl,--no-whole-archive',
+ ],
+ }],
+ # openssl.def is based on zlib.def, zlib symbols
+ # are always exported.
+ ['use_openssl_def==1', {
+ 'sources': ['<(SHARED_INTERMEDIATE_DIR)/openssl.def'],
+ }],
+ ['OS=="win" and use_openssl_def==0', {
+ 'sources': ['deps/zlib/win32/zlib.def'],
+ }],
+ ],
+ }],
+ ],
+ }]]
+
+ }, {
+ 'defines': [ 'HAVE_OPENSSL=0' ]
+ }],
+
],
}
diff --git a/src/aliased_buffer.h b/src/aliased_buffer.h
index 21aaeb61141c59..b99b01f5d94ca2 100644
--- a/src/aliased_buffer.h
+++ b/src/aliased_buffer.h
@@ -95,6 +95,21 @@ class AliasedBuffer {
js_array_.Reset();
}
+ AliasedBuffer& operator=(AliasedBuffer&& that) {
+ this->~AliasedBuffer();
+ isolate_ = that.isolate_;
+ count_ = that.count_;
+ byte_offset_ = that.byte_offset_;
+ buffer_ = that.buffer_;
+ free_buffer_ = that.free_buffer_;
+
+ js_array_.Reset(isolate_, that.js_array_.Get(isolate_));
+
+ that.buffer_ = nullptr;
+ that.js_array_.Reset();
+ return *this;
+ }
+
/**
* Helper class that is returned from operator[] to support assignment into
* a specified location.
@@ -111,11 +126,17 @@ class AliasedBuffer {
index_(that.index_) {
}
- inline Reference& operator=(const NativeT &val) {
+ template
+ inline Reference& operator=(const T& val) {
aliased_buffer_->SetValue(index_, val);
return *this;
}
+ // This is not caught by the template operator= above.
+ inline Reference& operator=(const Reference& val) {
+ return *this = static_cast(val);
+ }
+
operator NativeT() const {
return aliased_buffer_->GetValue(index_);
}
@@ -186,8 +207,12 @@ class AliasedBuffer {
return GetValue(index);
}
+ size_t Length() const {
+ return count_;
+ }
+
private:
- v8::Isolate* const isolate_;
+ v8::Isolate* isolate_;
size_t count_;
size_t byte_offset_;
NativeT* buffer_;
diff --git a/src/async_wrap.cc b/src/async_wrap.cc
index 236c5670472980..f770348b9c9183 100644
--- a/src/async_wrap.cc
+++ b/src/async_wrap.cc
@@ -308,12 +308,13 @@ static void PromiseHook(PromiseHookType type, Local promise,
if (parent_wrap == nullptr) {
parent_wrap = PromiseWrap::New(env, parent_promise, nullptr, true);
}
- // get id from parentWrap
- double trigger_async_id = parent_wrap->get_async_id();
- env->set_init_trigger_async_id(trigger_async_id);
- }
- wrap = PromiseWrap::New(env, promise, parent_wrap, silent);
+ AsyncHooks::DefaultTriggerAsyncIdScope trigger_scope(
+ env, parent_wrap->get_async_id());
+ wrap = PromiseWrap::New(env, promise, parent_wrap, silent);
+ } else {
+ wrap = PromiseWrap::New(env, promise, nullptr, silent);
+ }
}
CHECK_NE(wrap, nullptr);
@@ -466,13 +467,6 @@ void AsyncWrap::PopAsyncIds(const FunctionCallbackInfo& args) {
}
-void AsyncWrap::AsyncIdStackSize(const FunctionCallbackInfo& args) {
- Environment* env = Environment::GetCurrent(args);
- args.GetReturnValue().Set(
- static_cast(env->async_hooks()->stack_size()));
-}
-
-
void AsyncWrap::ClearAsyncIdStack(const FunctionCallbackInfo& args) {
Environment* env = Environment::GetCurrent(args);
env->async_hooks()->clear_async_id_stack();
@@ -511,7 +505,6 @@ void AsyncWrap::Initialize(Local target,
env->SetMethod(target, "setupHooks", SetupHooks);
env->SetMethod(target, "pushAsyncIds", PushAsyncIds);
env->SetMethod(target, "popAsyncIds", PopAsyncIds);
- env->SetMethod(target, "asyncIdStackSize", AsyncIdStackSize);
env->SetMethod(target, "clearAsyncIdStack", ClearAsyncIdStack);
env->SetMethod(target, "queueDestroyAsyncId", QueueDestroyAsyncId);
env->SetMethod(target, "enablePromiseHook", EnablePromiseHook);
@@ -541,13 +534,18 @@ void AsyncWrap::Initialize(Local target,
//
// kAsyncUid: Maintains the state of the next unique id to be assigned.
//
- // kInitTriggerAsyncId: Write the id of the resource responsible for a
+ // kDefaultTriggerAsyncId: Write the id of the resource responsible for a
// handle's creation just before calling the new handle's constructor.
- // After the new handle is constructed kInitTriggerAsyncId is set back to 0.
+ // After the new handle is constructed kDefaultTriggerAsyncId is set back
+ // to 0.
FORCE_SET_TARGET_FIELD(target,
"async_id_fields",
env->async_hooks()->async_id_fields().GetJSArray());
+ target->Set(context,
+ env->async_ids_stack_string(),
+ env->async_hooks()->async_ids_stack().GetJSArray()).FromJust();
+
Local constants = Object::New(isolate);
#define SET_HOOKS_CONSTANT(name) \
FORCE_SET_TARGET_FIELD( \
@@ -563,7 +561,8 @@ void AsyncWrap::Initialize(Local target,
SET_HOOKS_CONSTANT(kExecutionAsyncId);
SET_HOOKS_CONSTANT(kTriggerAsyncId);
SET_HOOKS_CONSTANT(kAsyncIdCounter);
- SET_HOOKS_CONSTANT(kInitTriggerAsyncId);
+ SET_HOOKS_CONSTANT(kDefaultTriggerAsyncId);
+ SET_HOOKS_CONSTANT(kStackLength);
#undef SET_HOOKS_CONSTANT
FORCE_SET_TARGET_FIELD(target, "constants", constants);
@@ -593,6 +592,7 @@ void AsyncWrap::Initialize(Local target,
env->set_async_hooks_after_function(Local());
env->set_async_hooks_destroy_function(Local());
env->set_async_hooks_promise_resolve_function(Local());
+ env->set_async_hooks_binding(target);
}
@@ -676,7 +676,7 @@ void AsyncWrap::EmitDestroy(Environment* env, double async_id) {
void AsyncWrap::AsyncReset(double execution_async_id, bool silent) {
async_id_ =
execution_async_id == -1 ? env()->new_async_id() : execution_async_id;
- trigger_async_id_ = env()->get_init_trigger_async_id();
+ trigger_async_id_ = env()->get_default_trigger_async_id();
switch (provider_type()) {
#define V(PROVIDER) \
@@ -777,7 +777,7 @@ async_context EmitAsyncInit(Isolate* isolate,
// Initialize async context struct
if (trigger_async_id == -1)
- trigger_async_id = env->get_init_trigger_async_id();
+ trigger_async_id = env->get_default_trigger_async_id();
async_context context = {
env->new_async_id(), // async_id_
diff --git a/src/async_wrap.h b/src/async_wrap.h
index c5dd4506886984..9b5632598bcc0b 100644
--- a/src/async_wrap.h
+++ b/src/async_wrap.h
@@ -123,7 +123,6 @@ class AsyncWrap : public BaseObject {
static void GetAsyncId(const v8::FunctionCallbackInfo& args);
static void PushAsyncIds(const v8::FunctionCallbackInfo& args);
static void PopAsyncIds(const v8::FunctionCallbackInfo& args);
- static void AsyncIdStackSize(const v8::FunctionCallbackInfo& args);
static void ClearAsyncIdStack(
const v8::FunctionCallbackInfo& args);
static void AsyncReset(const v8::FunctionCallbackInfo& args);
diff --git a/src/cares_wrap.cc b/src/cares_wrap.cc
index de3cb8f89c1ea2..165a8cda20618b 100644
--- a/src/cares_wrap.cc
+++ b/src/cares_wrap.cc
@@ -1876,60 +1876,42 @@ void AfterGetNameInfo(uv_getnameinfo_t* req,
delete req_wrap;
}
+using ParseIPResult = decltype(static_cast(0)->addr);
+
+int ParseIP(const char* ip, ParseIPResult* result = nullptr) {
+ ParseIPResult tmp;
+ if (result == nullptr) result = &tmp;
+ if (0 == uv_inet_pton(AF_INET, ip, result)) return 4;
+ if (0 == uv_inet_pton(AF_INET6, ip, result)) return 6;
+ return 0;
+}
void IsIP(const FunctionCallbackInfo& args) {
node::Utf8Value ip(args.GetIsolate(), args[0]);
- char address_buffer[sizeof(struct in6_addr)];
-
- int rc = 0;
- if (uv_inet_pton(AF_INET, *ip, &address_buffer) == 0)
- rc = 4;
- else if (uv_inet_pton(AF_INET6, *ip, &address_buffer) == 0)
- rc = 6;
-
- args.GetReturnValue().Set(rc);
+ args.GetReturnValue().Set(ParseIP(*ip));
}
void IsIPv4(const FunctionCallbackInfo& args) {
node::Utf8Value ip(args.GetIsolate(), args[0]);
- char address_buffer[sizeof(struct in_addr)];
-
- if (uv_inet_pton(AF_INET, *ip, &address_buffer) == 0) {
- args.GetReturnValue().Set(true);
- } else {
- args.GetReturnValue().Set(false);
- }
+ args.GetReturnValue().Set(4 == ParseIP(*ip));
}
void IsIPv6(const FunctionCallbackInfo& args) {
node::Utf8Value ip(args.GetIsolate(), args[0]);
- char address_buffer[sizeof(struct in6_addr)];
-
- if (uv_inet_pton(AF_INET6, *ip, &address_buffer) == 0) {
- args.GetReturnValue().Set(true);
- } else {
- args.GetReturnValue().Set(false);
- }
+ args.GetReturnValue().Set(6 == ParseIP(*ip));
}
void CanonicalizeIP(const FunctionCallbackInfo& args) {
v8::Isolate* isolate = args.GetIsolate();
node::Utf8Value ip(isolate, args[0]);
- char address_buffer[sizeof(struct in6_addr)];
- char canonical_ip[INET6_ADDRSTRLEN];
- int af;
- if (uv_inet_pton(AF_INET, *ip, &address_buffer) == 0)
- af = AF_INET;
- else if (uv_inet_pton(AF_INET6, *ip, &address_buffer) == 0)
- af = AF_INET6;
- else
- return;
-
- int err = uv_inet_ntop(af, address_buffer, canonical_ip,
- sizeof(canonical_ip));
- CHECK_EQ(err, 0);
+ ParseIPResult result;
+ const int rc = ParseIP(*ip, &result);
+ if (rc == 0) return;
+ char canonical_ip[INET6_ADDRSTRLEN];
+ const int af = (rc == 4 ? AF_INET : AF_INET6);
+ CHECK_EQ(0, uv_inet_ntop(af, &result, canonical_ip, sizeof(canonical_ip)));
args.GetReturnValue().Set(String::NewFromUtf8(isolate, canonical_ip));
}
diff --git a/src/connection_wrap.cc b/src/connection_wrap.cc
index d82e7195d76579..8de77f361dcde4 100644
--- a/src/connection_wrap.cc
+++ b/src/connection_wrap.cc
@@ -49,7 +49,6 @@ void ConnectionWrap::OnConnection(uv_stream_t* handle,
};
if (status == 0) {
- env->set_init_trigger_async_id(wrap_data->get_async_id());
// Instantiate the client javascript object and handle.
Local client_obj = WrapType::Instantiate(env,
wrap_data,
diff --git a/src/env-inl.h b/src/env-inl.h
index 955cf2688f88fa..7cdd9cecd378b9 100644
--- a/src/env-inl.h
+++ b/src/env-inl.h
@@ -53,11 +53,11 @@ inline MultiIsolatePlatform* IsolateData::platform() const {
return platform_;
}
-inline Environment::AsyncHooks::AsyncHooks(v8::Isolate* isolate)
- : isolate_(isolate),
- fields_(isolate, kFieldsCount),
- async_id_fields_(isolate, kUidFieldsCount) {
- v8::HandleScope handle_scope(isolate_);
+inline Environment::AsyncHooks::AsyncHooks()
+ : async_ids_stack_(env()->isolate(), 16 * 2),
+ fields_(env()->isolate(), kFieldsCount),
+ async_id_fields_(env()->isolate(), kUidFieldsCount) {
+ v8::HandleScope handle_scope(env()->isolate());
// Always perform async_hooks checks, not just when async_hooks is enabled.
// TODO(AndreasMadsen): Consider removing this for LTS releases.
@@ -66,6 +66,12 @@ inline Environment::AsyncHooks::AsyncHooks(v8::Isolate* isolate)
// and flag changes won't be included.
fields_[kCheck] = 1;
+ // kDefaultTriggerAsyncId should be -1, this indicates that there is no
+ // specified default value and it should fallback to the executionAsyncId.
+ // 0 is not used as the magic value, because that indicates a missing context
+ // which is different from a default context.
+ async_id_fields_[AsyncHooks::kDefaultTriggerAsyncId] = -1;
+
// kAsyncIdCounter should start at 1 because that'll be the id the execution
// context during bootstrap (code that runs before entering uv_run()).
async_id_fields_[AsyncHooks::kAsyncIdCounter] = 1;
@@ -75,9 +81,9 @@ inline Environment::AsyncHooks::AsyncHooks(v8::Isolate* isolate)
// strings can be retrieved quickly.
#define V(Provider) \
providers_[AsyncWrap::PROVIDER_ ## Provider].Set( \
- isolate_, \
+ env()->isolate(), \
v8::String::NewFromOneByte( \
- isolate_, \
+ env()->isolate(), \
reinterpret_cast(#Provider), \
v8::NewStringType::kInternalized, \
sizeof(#Provider) - 1).ToLocalChecked());
@@ -95,8 +101,13 @@ Environment::AsyncHooks::async_id_fields() {
return async_id_fields_;
}
+inline AliasedBuffer&
+Environment::AsyncHooks::async_ids_stack() {
+ return async_ids_stack_;
+}
+
inline v8::Local Environment::AsyncHooks::provider_string(int idx) {
- return providers_[idx].Get(isolate_);
+ return providers_[idx].Get(env()->isolate());
}
inline void Environment::AsyncHooks::no_force_checks() {
@@ -104,6 +115,11 @@ inline void Environment::AsyncHooks::no_force_checks() {
fields_[kCheck] = fields_[kCheck] - 1;
}
+inline Environment* Environment::AsyncHooks::env() {
+ return Environment::ForAsyncHooks(this);
+}
+
+// Remember to keep this code aligned with pushAsyncIds() in JS.
inline void Environment::AsyncHooks::push_async_ids(double async_id,
double trigger_async_id) {
// Since async_hooks is experimental, do only perform the check
@@ -113,16 +129,21 @@ inline void Environment::AsyncHooks::push_async_ids(double async_id,
CHECK_GE(trigger_async_id, -1);
}
- async_ids_stack_.push({ async_id_fields_[kExecutionAsyncId],
- async_id_fields_[kTriggerAsyncId] });
+ uint32_t offset = fields_[kStackLength];
+ if (offset * 2 >= async_ids_stack_.Length())
+ grow_async_ids_stack();
+ async_ids_stack_[2 * offset] = async_id_fields_[kExecutionAsyncId];
+ async_ids_stack_[2 * offset + 1] = async_id_fields_[kTriggerAsyncId];
+ fields_[kStackLength] = fields_[kStackLength] + 1;
async_id_fields_[kExecutionAsyncId] = async_id;
async_id_fields_[kTriggerAsyncId] = trigger_async_id;
}
+// Remember to keep this code aligned with popAsyncIds() in JS.
inline bool Environment::AsyncHooks::pop_async_id(double async_id) {
// In case of an exception then this may have already been reset, if the
// stack was multiple MakeCallback()'s deep.
- if (async_ids_stack_.empty()) return false;
+ if (fields_[kStackLength] == 0) return false;
// Ask for the async_id to be restored as a check that the stack
// hasn't been corrupted.
@@ -134,51 +155,55 @@ inline bool Environment::AsyncHooks::pop_async_id(double async_id) {
"actual: %.f, expected: %.f)\n",
async_id_fields_.GetValue(kExecutionAsyncId),
async_id);
- Environment* env = Environment::GetCurrent(isolate_);
DumpBacktrace(stderr);
fflush(stderr);
- if (!env->abort_on_uncaught_exception())
+ if (!env()->abort_on_uncaught_exception())
exit(1);
fprintf(stderr, "\n");
fflush(stderr);
ABORT_NO_BACKTRACE();
}
- auto async_ids = async_ids_stack_.top();
- async_ids_stack_.pop();
- async_id_fields_[kExecutionAsyncId] = async_ids.async_id;
- async_id_fields_[kTriggerAsyncId] = async_ids.trigger_async_id;
- return !async_ids_stack_.empty();
-}
+ uint32_t offset = fields_[kStackLength] - 1;
+ async_id_fields_[kExecutionAsyncId] = async_ids_stack_[2 * offset];
+ async_id_fields_[kTriggerAsyncId] = async_ids_stack_[2 * offset + 1];
+ fields_[kStackLength] = offset;
-inline size_t Environment::AsyncHooks::stack_size() {
- return async_ids_stack_.size();
+ return fields_[kStackLength] > 0;
}
inline void Environment::AsyncHooks::clear_async_id_stack() {
- while (!async_ids_stack_.empty())
- async_ids_stack_.pop();
async_id_fields_[kExecutionAsyncId] = 0;
async_id_fields_[kTriggerAsyncId] = 0;
+ fields_[kStackLength] = 0;
}
-inline Environment::AsyncHooks::InitScope::InitScope(
- Environment* env, double init_trigger_async_id)
- : env_(env),
- async_id_fields_ref_(env->async_hooks()->async_id_fields()) {
- if (env_->async_hooks()->fields()[AsyncHooks::kCheck] > 0) {
- CHECK_GE(init_trigger_async_id, -1);
+inline Environment::AsyncHooks::DefaultTriggerAsyncIdScope
+ ::DefaultTriggerAsyncIdScope(Environment* env,
+ double default_trigger_async_id)
+ : async_id_fields_ref_(env->async_hooks()->async_id_fields()) {
+ if (env->async_hooks()->fields()[AsyncHooks::kCheck] > 0) {
+ CHECK_GE(default_trigger_async_id, 0);
}
- env->async_hooks()->push_async_ids(
- async_id_fields_ref_[AsyncHooks::kExecutionAsyncId],
- init_trigger_async_id);
+
+ old_default_trigger_async_id_ =
+ async_id_fields_ref_[AsyncHooks::kDefaultTriggerAsyncId];
+ async_id_fields_ref_[AsyncHooks::kDefaultTriggerAsyncId] =
+ default_trigger_async_id;
}
-inline Environment::AsyncHooks::InitScope::~InitScope() {
- env_->async_hooks()->pop_async_id(
- async_id_fields_ref_[AsyncHooks::kExecutionAsyncId]);
+inline Environment::AsyncHooks::DefaultTriggerAsyncIdScope
+ ::~DefaultTriggerAsyncIdScope() {
+ async_id_fields_ref_[AsyncHooks::kDefaultTriggerAsyncId] =
+ old_default_trigger_async_id_;
}
+
+Environment* Environment::ForAsyncHooks(AsyncHooks* hooks) {
+ return ContainerOf(&Environment::async_hooks_, hooks);
+}
+
+
inline Environment::AsyncCallbackScope::AsyncCallbackScope(Environment* env)
: env_(env) {
env_->makecallback_cntr_++;
@@ -252,7 +277,6 @@ inline Environment::Environment(IsolateData* isolate_data,
v8::Local context)
: isolate_(context->GetIsolate()),
isolate_data_(isolate_data),
- async_hooks_(context->GetIsolate()),
timer_base_(uv_now(isolate_data->event_loop())),
using_domains_(false),
printed_error_(false),
@@ -274,22 +298,10 @@ inline Environment::Environment(IsolateData* isolate_data,
v8::Context::Scope context_scope(context);
set_as_external(v8::External::New(isolate(), this));
- v8::Local null = v8::Null(isolate());
- v8::Local binding_cache_object = v8::Object::New(isolate());
- CHECK(binding_cache_object->SetPrototype(context, null).FromJust());
- set_binding_cache_object(binding_cache_object);
-
- v8::Local internal_binding_cache_object =
- v8::Object::New(isolate());
- CHECK(internal_binding_cache_object->SetPrototype(context, null).FromJust());
- set_internal_binding_cache_object(internal_binding_cache_object);
-
- set_module_load_list_array(v8::Array::New(isolate()));
-
AssignToContext(context, ContextInfo(""));
destroy_async_id_list_.reserve(512);
- performance_state_ = Calloc(1);
+ performance_state_.reset(new performance::performance_state(isolate()));
performance_state_->milestones[
performance::NODE_PERFORMANCE_MILESTONE_ENVIRONMENT] =
PERFORMANCE_NOW();
@@ -322,7 +334,6 @@ inline Environment::~Environment() {
delete[] heap_statistics_buffer_;
delete[] heap_space_statistics_buffer_;
delete[] http_parser_buffer_;
- free(performance_state_);
}
inline v8::Isolate* Environment::isolate() const {
@@ -440,17 +451,13 @@ inline double Environment::trigger_async_id() {
return async_hooks()->async_id_fields()[AsyncHooks::kTriggerAsyncId];
}
-inline double Environment::get_init_trigger_async_id() {
- AliasedBuffer& async_id_fields =
- async_hooks()->async_id_fields();
- double tid = async_id_fields[AsyncHooks::kInitTriggerAsyncId];
- async_id_fields[AsyncHooks::kInitTriggerAsyncId] = 0;
- if (tid <= 0) tid = execution_async_id();
- return tid;
-}
-
-inline void Environment::set_init_trigger_async_id(const double id) {
- async_hooks()->async_id_fields()[AsyncHooks::kInitTriggerAsyncId] = id;
+inline double Environment::get_default_trigger_async_id() {
+ double default_trigger_async_id =
+ async_hooks()->async_id_fields()[AsyncHooks::kDefaultTriggerAsyncId];
+ // If defaultTriggerAsyncId isn't set, use the executionAsyncId
+ if (default_trigger_async_id < 0)
+ default_trigger_async_id = execution_async_id();
+ return default_trigger_async_id;
}
inline double* Environment::heap_statistics_buffer() const {
@@ -521,7 +528,7 @@ void Environment::SetImmediate(native_immediate_callback cb,
}
inline performance::performance_state* Environment::performance_state() {
- return performance_state_;
+ return performance_state_.get();
}
inline std::map* Environment::performance_marks() {
diff --git a/src/env.cc b/src/env.cc
index 64fc2dea04e8d1..902429e18a7e74 100644
--- a/src/env.cc
+++ b/src/env.cc
@@ -323,4 +323,20 @@ void Environment::ActivateImmediateCheck() {
uv_idle_start(&immediate_idle_handle_, [](uv_idle_t*){ });
}
+void Environment::AsyncHooks::grow_async_ids_stack() {
+ const uint32_t old_capacity = async_ids_stack_.Length() / 2;
+ const uint32_t new_capacity = old_capacity * 1.5;
+ AliasedBuffer new_buffer(
+ env()->isolate(), new_capacity * 2);
+
+ for (uint32_t i = 0; i < old_capacity * 2; ++i)
+ new_buffer[i] = async_ids_stack_[i];
+ async_ids_stack_ = std::move(new_buffer);
+
+ env()->async_hooks_binding()->Set(
+ env()->context(),
+ env()->async_ids_stack_string(),
+ async_ids_stack_.GetJSArray()).FromJust();
+}
+
} // namespace node
diff --git a/src/env.h b/src/env.h
index 79ef42a5c0159e..748c15f8af78f1 100644
--- a/src/env.h
+++ b/src/env.h
@@ -41,7 +41,6 @@
#include
#include
#include
-#include
#include
struct nghttp2_rcbuf;
@@ -49,7 +48,7 @@ struct nghttp2_rcbuf;
namespace node {
namespace performance {
-struct performance_state;
+class performance_state;
}
namespace loader {
@@ -100,6 +99,7 @@ class ModuleWrap;
V(address_string, "address") \
V(args_string, "args") \
V(async, "async") \
+ V(async_ids_stack_string, "async_ids_stack") \
V(buffer_string, "buffer") \
V(bytes_string, "bytes") \
V(bytes_parsed_string, "bytesParsed") \
@@ -165,7 +165,6 @@ class ModuleWrap;
V(internal_string, "internal") \
V(ipv4_string, "IPv4") \
V(ipv6_string, "IPv6") \
- V(isalive_string, "isAlive") \
V(isclosing_string, "isClosing") \
V(issuer_string, "issuer") \
V(issuercert_string, "issuerCertificate") \
@@ -250,7 +249,6 @@ class ModuleWrap;
V(subject_string, "subject") \
V(subjectaltname_string, "subjectaltname") \
V(syscall_string, "syscall") \
- V(tick_domain_cb_string, "_tickDomainCallback") \
V(ticketkeycallback_string, "onticketkeycallback") \
V(timeout_string, "timeout") \
V(tls_ticket_string, "tlsTicket") \
@@ -282,15 +280,13 @@ class ModuleWrap;
V(async_hooks_before_function, v8::Function) \
V(async_hooks_after_function, v8::Function) \
V(async_hooks_promise_resolve_function, v8::Function) \
- V(binding_cache_object, v8::Object) \
- V(internal_binding_cache_object, v8::Object) \
+ V(async_hooks_binding, v8::Object) \
V(buffer_prototype_object, v8::Object) \
V(context, v8::Context) \
V(http2ping_constructor_template, v8::ObjectTemplate) \
V(http2stream_constructor_template, v8::ObjectTemplate) \
V(http2settings_constructor_template, v8::ObjectTemplate) \
V(inspector_console_api_object, v8::Object) \
- V(module_load_list_array, v8::Array) \
V(pbkdf2_constructor_template, v8::ObjectTemplate) \
V(pipe_constructor_template, v8::FunctionTemplate) \
V(performance_entry_callback, v8::Function) \
@@ -314,11 +310,6 @@ class ModuleWrap;
class Environment;
-struct node_async_ids {
- double async_id;
- double trigger_async_id;
-};
-
class IsolateData {
public:
IsolateData(v8::Isolate* isolate, uv_loop_t* event_loop,
@@ -382,6 +373,7 @@ class Environment {
kPromiseResolve,
kTotals,
kCheck,
+ kStackLength,
kFieldsCount,
};
@@ -389,55 +381,57 @@ class Environment {
kExecutionAsyncId,
kTriggerAsyncId,
kAsyncIdCounter,
- kInitTriggerAsyncId,
+ kDefaultTriggerAsyncId,
kUidFieldsCount,
};
- AsyncHooks() = delete;
-
inline AliasedBuffer& fields();
inline AliasedBuffer& async_id_fields();
+ inline AliasedBuffer& async_ids_stack();
inline v8::Local provider_string(int idx);
inline void no_force_checks();
+ inline Environment* env();
inline void push_async_ids(double async_id, double trigger_async_id);
inline bool pop_async_id(double async_id);
- inline size_t stack_size();
inline void clear_async_id_stack(); // Used in fatal exceptions.
- // Used to propagate the trigger_async_id to the constructor of any newly
- // created resources using RAII. Instead of needing to pass the
- // trigger_async_id along with other constructor arguments.
- class InitScope {
+ // Used to set the kDefaultTriggerAsyncId in a scope. This is instead of
+ // passing the trigger_async_id along with other constructor arguments.
+ class DefaultTriggerAsyncIdScope {
public:
- InitScope() = delete;
- explicit InitScope(Environment* env, double init_trigger_async_id);
- ~InitScope();
+ DefaultTriggerAsyncIdScope() = delete;
+ explicit DefaultTriggerAsyncIdScope(Environment* env,
+ double init_trigger_async_id);
+ ~DefaultTriggerAsyncIdScope();
private:
- Environment* env_;
AliasedBuffer async_id_fields_ref_;
+ double old_default_trigger_async_id_;
- DISALLOW_COPY_AND_ASSIGN(InitScope);
+ DISALLOW_COPY_AND_ASSIGN(DefaultTriggerAsyncIdScope);
};
+
private:
friend class Environment; // So we can call the constructor.
- inline explicit AsyncHooks(v8::Isolate* isolate);
+ inline AsyncHooks();
// Keep a list of all Persistent strings used for Provider types.
v8::Eternal providers_[AsyncWrap::PROVIDERS_LENGTH];
- // Used by provider_string().
- v8::Isolate* isolate_;
+ // Keep track of the environment copy itself.
+ Environment* env_;
// Stores the ids of the current execution context stack.
- std::stack async_ids_stack_;
+ AliasedBuffer async_ids_stack_;
// Attached to a Uint32Array that tracks the number of active hooks for
// each type.
AliasedBuffer fields_;
// Attached to a Float64Array that tracks the state of async resources.
AliasedBuffer async_id_fields_;
+ void grow_async_ids_stack();
+
DISALLOW_COPY_AND_ASSIGN(AsyncHooks);
};
@@ -565,8 +559,7 @@ class Environment {
inline double new_async_id();
inline double execution_async_id();
inline double trigger_async_id();
- inline double get_init_trigger_async_id();
- inline void set_init_trigger_async_id(const double id);
+ inline double get_default_trigger_async_id();
// List of id's that have been destroyed and need the destroy() cb called.
inline std::vector* destroy_async_id_list();
@@ -683,6 +676,8 @@ class Environment {
inline bool inside_should_not_abort_on_uncaught_scope() const;
+ static inline Environment* ForAsyncHooks(AsyncHooks* hooks);
+
private:
inline void ThrowError(v8::Local (*fun)(v8::Local),
const char* errmsg);
@@ -710,7 +705,7 @@ class Environment {
int should_not_abort_scope_counter_ = 0;
- performance::performance_state* performance_state_ = nullptr;
+ std::unique_ptr performance_state_;
std::map performance_marks_;
#if HAVE_INSPECTOR
diff --git a/src/js_stream.cc b/src/js_stream.cc
index c4e32feeba0f7d..7d1115f12ac3e2 100644
--- a/src/js_stream.cc
+++ b/src/js_stream.cc
@@ -14,9 +14,9 @@ using v8::FunctionCallbackInfo;
using v8::FunctionTemplate;
using v8::HandleScope;
using v8::Local;
-using v8::MaybeLocal;
using v8::Object;
using v8::String;
+using v8::TryCatch;
using v8::Value;
@@ -80,37 +80,48 @@ AsyncWrap* JSStream::GetAsyncWrap() {
bool JSStream::IsAlive() {
- HandleScope scope(env()->isolate());
- Context::Scope context_scope(env()->context());
- v8::Local fn = object()->Get(env()->isalive_string());
- if (!fn->IsFunction())
- return false;
- return MakeCallback(fn.As(), 0, nullptr)
- .ToLocalChecked()->IsTrue();
+ return true;
}
bool JSStream::IsClosing() {
HandleScope scope(env()->isolate());
Context::Scope context_scope(env()->context());
- return MakeCallback(env()->isclosing_string(), 0, nullptr)
- .ToLocalChecked()->IsTrue();
+ TryCatch try_catch(env()->isolate());
+ Local value;
+ if (!MakeCallback(env()->isclosing_string(), 0, nullptr).ToLocal(&value)) {
+ FatalException(env()->isolate(), try_catch);
+ return true;
+ }
+ return value->IsTrue();
}
int JSStream::ReadStart() {
HandleScope scope(env()->isolate());
Context::Scope context_scope(env()->context());
- return MakeCallback(env()->onreadstart_string(), 0, nullptr)
- .ToLocalChecked()->Int32Value();
+ TryCatch try_catch(env()->isolate());
+ Local value;
+ int value_int = UV_EPROTO;
+ if (!MakeCallback(env()->onreadstart_string(), 0, nullptr).ToLocal(&value) ||
+ !value->Int32Value(env()->context()).To(&value_int)) {
+ FatalException(env()->isolate(), try_catch);
+ }
+ return value_int;
}
int JSStream::ReadStop() {
HandleScope scope(env()->isolate());
Context::Scope context_scope(env()->context());
- return MakeCallback(env()->onreadstop_string(), 0, nullptr)
- .ToLocalChecked()->Int32Value();
+ TryCatch try_catch(env()->isolate());
+ Local value;
+ int value_int = UV_EPROTO;
+ if (!MakeCallback(env()->onreadstop_string(), 0, nullptr).ToLocal(&value) ||
+ !value->Int32Value(env()->context()).To(&value_int)) {
+ FatalException(env()->isolate(), try_catch);
+ }
+ return value_int;
}
@@ -123,10 +134,17 @@ int JSStream::DoShutdown(ShutdownWrap* req_wrap) {
};
req_wrap->Dispatched();
- MaybeLocal res =
- MakeCallback(env()->onshutdown_string(), arraysize(argv), argv);
- return res.ToLocalChecked()->Int32Value();
+ TryCatch try_catch(env()->isolate());
+ Local value;
+ int value_int = UV_EPROTO;
+ if (!MakeCallback(env()->onshutdown_string(),
+ arraysize(argv),
+ argv).ToLocal(&value) ||
+ !value->Int32Value(env()->context()).To(&value_int)) {
+ FatalException(env()->isolate(), try_catch);
+ }
+ return value_int;
}
@@ -152,10 +170,17 @@ int JSStream::DoWrite(WriteWrap* w,
};
w->Dispatched();
- MaybeLocal res =
- MakeCallback(env()->onwrite_string(), arraysize(argv), argv);
- return res.ToLocalChecked()->Int32Value();
+ TryCatch try_catch(env()->isolate());
+ Local value;
+ int value_int = UV_EPROTO;
+ if (!MakeCallback(env()->onwrite_string(),
+ arraysize(argv),
+ argv).ToLocal(&value) ||
+ !value->Int32Value(env()->context()).To(&value_int)) {
+ FatalException(env()->isolate(), try_catch);
+ }
+ return value_int;
}
diff --git a/src/node.cc b/src/node.cc
index 55e5ad3dd71a75..cc2bdefbb41f66 100644
--- a/src/node.cc
+++ b/src/node.cc
@@ -1379,17 +1379,13 @@ MaybeLocal InternalMakeCallback(Environment* env,
return Undefined(env->isolate());
}
- MaybeLocal ret;
+ MaybeLocal ret = callback->Call(env->context(), recv, argc, argv);
- {
- ret = callback->Call(env->context(), recv, argc, argv);
-
- if (ret.IsEmpty()) {
- // NOTE: For backwards compatibility with public API we return Undefined()
- // if the top level call threw.
- scope.MarkAsFailed();
- return scope.IsInnerMakeCallback() ? ret : Undefined(env->isolate());
- }
+ if (ret.IsEmpty()) {
+ // NOTE: For backwards compatibility with public API we return Undefined()
+ // if the top level call threw.
+ scope.MarkAsFailed();
+ return scope.IsInnerMakeCallback() ? ret : Undefined(env->isolate());
}
scope.Close();
@@ -2839,22 +2835,6 @@ Maybe ProcessEmitDeprecationWarning(Environment* env,
}
-static bool PullFromCache(Environment* env,
- const FunctionCallbackInfo& args,
- Local module,
- Local cache) {
- Local context = env->context();
- Local exports_v;
- Local exports;
- if (cache->Get(context, module).ToLocal(&exports_v) &&
- exports_v->IsObject() &&
- exports_v->ToObject(context).ToLocal(&exports)) {
- args.GetReturnValue().Set(exports);
- return true;
- }
- return false;
-}
-
static Local InitModule(Environment* env,
node_module* mod,
Local module) {
@@ -2882,22 +2862,10 @@ static void ThrowIfNoSuchModule(Environment* env, const char* module_v) {
static void Binding(const FunctionCallbackInfo& args) {
Environment* env = Environment::GetCurrent(args);
- Local module;
- if (!args[0]->ToString(env->context()).ToLocal(&module)) return;
-
- Local cache = env->binding_cache_object();
-
- if (PullFromCache(env, args, module, cache))
- return;
+ CHECK(args[0]->IsString());
- // Append a string to process.moduleLoadList
- char buf[1024];
+ Local module = args[0].As();
node::Utf8Value module_v(env->isolate(), module);
- snprintf(buf, sizeof(buf), "Binding %s", *module_v);
-
- Local modules = env->module_load_list_array();
- uint32_t l = modules->Length();
- modules->Set(l, OneByteString(env->isolate(), buf));
node_module* mod = get_builtin_module(*module_v);
Local exports;
@@ -2914,7 +2882,6 @@ static void Binding(const FunctionCallbackInfo& args) {
} else {
return ThrowIfNoSuchModule(env, *module_v);
}
- cache->Set(module, exports);
args.GetReturnValue().Set(exports);
}
@@ -2922,27 +2889,14 @@ static void Binding(const FunctionCallbackInfo& args) {
static void InternalBinding(const FunctionCallbackInfo& args) {
Environment* env = Environment::GetCurrent(args);
- Local module;
- if (!args[0]->ToString(env->context()).ToLocal(&module)) return;
-
- Local cache = env->internal_binding_cache_object();
-
- if (PullFromCache(env, args, module, cache))
- return;
+ CHECK(args[0]->IsString());
- // Append a string to process.moduleLoadList
- char buf[1024];
+ Local module = args[0].As();
node::Utf8Value module_v(env->isolate(), module);
- snprintf(buf, sizeof(buf), "Internal Binding %s", *module_v);
-
- Local modules = env->module_load_list_array();
- uint32_t l = modules->Length();
- modules->Set(l, OneByteString(env->isolate(), buf));
node_module* mod = get_internal_module(*module_v);
if (mod == nullptr) return ThrowIfNoSuchModule(env, *module_v);
Local exports = InitModule(env, mod, module);
- cache->Set(module, exports);
args.GetReturnValue().Set(exports);
}
@@ -2950,14 +2904,9 @@ static void InternalBinding(const FunctionCallbackInfo& args) {
static void LinkedBinding(const FunctionCallbackInfo& args) {
Environment* env = Environment::GetCurrent(args.GetIsolate());
- Local module_name;
- if (!args[0]->ToString(env->context()).ToLocal(&module_name)) return;
-
- Local cache = env->binding_cache_object();
- Local exports_v = cache->Get(module_name);
+ CHECK(args[0]->IsString());
- if (exports_v->IsObject())
- return args.GetReturnValue().Set(exports_v.As());
+ Local module_name = args[0].As();
node::Utf8Value module_name_v(env->isolate(), module_name);
node_module* mod = get_linked_module(*module_name_v);
@@ -2988,7 +2937,6 @@ static void LinkedBinding(const FunctionCallbackInfo& args) {
}
auto effective_exports = module->Get(exports_prop);
- cache->Set(module_name, effective_exports);
args.GetReturnValue().Set(effective_exports);
}
@@ -3323,11 +3271,6 @@ void SetupProcessObject(Environment* env,
"version",
FIXED_ONE_BYTE_STRING(env->isolate(), NODE_VERSION));
- // process.moduleLoadList
- READONLY_PROPERTY(process,
- "moduleLoadList",
- env->module_load_list_array());
-
// process.versions
Local versions = Object::New(env->isolate());
READONLY_PROPERTY(process, "versions", versions);
@@ -3368,6 +3311,12 @@ void SetupProcessObject(Environment* env,
"nghttp2",
FIXED_ONE_BYTE_STRING(env->isolate(), NGHTTP2_VERSION));
+ const char node_napi_version[] = NODE_STRINGIFY(NAPI_VERSION);
+ READONLY_PROPERTY(
+ versions,
+ "napi",
+ FIXED_ONE_BYTE_STRING(env->isolate(), node_napi_version));
+
// process._promiseRejectEvent
Local promiseRejectEvent = Object::New(env->isolate());
READONLY_DONT_ENUM_PROPERTY(process,
diff --git a/src/node_api.cc b/src/node_api.cc
index ac0b0959b599d6..27ab6707de7a6f 100644
--- a/src/node_api.cc
+++ b/src/node_api.cc
@@ -18,8 +18,6 @@
#include "node_api.h"
#include "node_internals.h"
-#define NAPI_VERSION 2
-
static
napi_status napi_set_last_error(napi_env env, napi_status error_code,
uint32_t engine_error_code = 0,
@@ -144,6 +142,30 @@ struct napi_env__ {
(!try_catch.HasCaught() ? napi_ok \
: napi_set_last_error((env), napi_pending_exception))
+#define THROW_RANGE_ERROR_IF_FALSE(env, condition, error, message) \
+ do { \
+ if (!(condition)) { \
+ napi_throw_range_error((env), (error), (message)); \
+ return napi_set_last_error((env), napi_generic_failure); \
+ } \
+ } while (0)
+
+#define CREATE_TYPED_ARRAY( \
+ env, type, size_of_element, buffer, byte_offset, length, out) \
+ do { \
+ if ((size_of_element) > 1) { \
+ THROW_RANGE_ERROR_IF_FALSE( \
+ (env), (byte_offset) % (size_of_element) == 0, \
+ "ERR_NAPI_INVALID_TYPEDARRAY_ALIGNMENT", \
+ "start offset of "#type" should be a multiple of "#size_of_element); \
+ } \
+ THROW_RANGE_ERROR_IF_FALSE((env), (length) * (size_of_element) + \
+ (byte_offset) <= buffer->ByteLength(), \
+ "ERR_NAPI_INVALID_TYPEDARRAY_LENGTH", \
+ "Invalid typed array length"); \
+ (out) = v8::type::New((buffer), (byte_offset), (length)); \
+ } while (0)
+
namespace {
namespace v8impl {
@@ -3065,31 +3087,40 @@ napi_status napi_create_typedarray(napi_env env,
switch (type) {
case napi_int8_array:
- typedArray = v8::Int8Array::New(buffer, byte_offset, length);
+ CREATE_TYPED_ARRAY(
+ env, Int8Array, 1, buffer, byte_offset, length, typedArray);
break;
case napi_uint8_array:
- typedArray = v8::Uint8Array::New(buffer, byte_offset, length);
+ CREATE_TYPED_ARRAY(
+ env, Uint8Array, 1, buffer, byte_offset, length, typedArray);
break;
case napi_uint8_clamped_array:
- typedArray = v8::Uint8ClampedArray::New(buffer, byte_offset, length);
+ CREATE_TYPED_ARRAY(
+ env, Uint8ClampedArray, 1, buffer, byte_offset, length, typedArray);
break;
case napi_int16_array:
- typedArray = v8::Int16Array::New(buffer, byte_offset, length);
+ CREATE_TYPED_ARRAY(
+ env, Int16Array, 2, buffer, byte_offset, length, typedArray);
break;
case napi_uint16_array:
- typedArray = v8::Uint16Array::New(buffer, byte_offset, length);
+ CREATE_TYPED_ARRAY(
+ env, Uint16Array, 2, buffer, byte_offset, length, typedArray);
break;
case napi_int32_array:
- typedArray = v8::Int32Array::New(buffer, byte_offset, length);
+ CREATE_TYPED_ARRAY(
+ env, Int32Array, 4, buffer, byte_offset, length, typedArray);
break;
case napi_uint32_array:
- typedArray = v8::Uint32Array::New(buffer, byte_offset, length);
+ CREATE_TYPED_ARRAY(
+ env, Uint32Array, 4, buffer, byte_offset, length, typedArray);
break;
case napi_float32_array:
- typedArray = v8::Float32Array::New(buffer, byte_offset, length);
+ CREATE_TYPED_ARRAY(
+ env, Float32Array, 4, buffer, byte_offset, length, typedArray);
break;
case napi_float64_array:
- typedArray = v8::Float64Array::New(buffer, byte_offset, length);
+ CREATE_TYPED_ARRAY(
+ env, Float64Array, 8, buffer, byte_offset, length, typedArray);
break;
default:
return napi_set_last_error(env, napi_invalid_arg);
@@ -3170,6 +3201,14 @@ napi_status napi_create_dataview(napi_env env,
RETURN_STATUS_IF_FALSE(env, value->IsArrayBuffer(), napi_invalid_arg);
v8::Local buffer = value.As();
+ if (byte_length + byte_offset > buffer->ByteLength()) {
+ napi_throw_range_error(
+ env,
+ "ERR_NAPI_INVALID_DATAVIEW_ARGS",
+ "byte_offset + byte_length should be less than or "
+ "equal to the size in bytes of the array passed in");
+ return napi_set_last_error(env, napi_pending_exception);
+ }
v8::Local DataView = v8::DataView::New(buffer, byte_offset,
byte_length);
diff --git a/src/node_buffer.cc b/src/node_buffer.cc
index 7008395ce46392..dff9d3c0995e02 100644
--- a/src/node_buffer.cc
+++ b/src/node_buffer.cc
@@ -303,15 +303,14 @@ MaybeLocal New(Environment* env, size_t length) {
data,
length,
ArrayBufferCreationMode::kInternalized);
- Local ui = Uint8Array::New(ab, 0, length);
- Maybe mb =
- ui->SetPrototype(env->context(), env->buffer_prototype_object());
- if (mb.FromMaybe(false))
- return scope.Escape(ui);
+ MaybeLocal ui = Buffer::New(env, ab, 0, length);
- // Object failed to be created. Clean up resources.
- free(data);
- return Local();
+ if (ui.IsEmpty()) {
+ // Object failed to be created. Clean up resources.
+ free(data);
+ }
+
+ return scope.Escape(ui.FromMaybe(Local()));
}
@@ -349,15 +348,14 @@ MaybeLocal Copy(Environment* env, const char* data, size_t length) {
new_data,
length,
ArrayBufferCreationMode::kInternalized);
- Local ui = Uint8Array::New(ab, 0, length);
- Maybe mb =
- ui->SetPrototype(env->context(), env->buffer_prototype_object());
- if (mb.FromMaybe(false))
- return scope.Escape(ui);
+ MaybeLocal ui = Buffer::New(env, ab, 0, length);
- // Object failed to be created. Clean up resources.
- free(new_data);
- return Local();
+ if (ui.IsEmpty()) {
+ // Object failed to be created. Clean up resources.
+ free(new_data);
+ }
+
+ return scope.Escape(ui.FromMaybe(Local()));
}
@@ -392,15 +390,14 @@ MaybeLocal New(Environment* env,
// correct.
if (data == nullptr)
ab->Neuter();
- Local ui = Uint8Array::New(ab, 0, length);
- Maybe mb =
- ui->SetPrototype(env->context(), env->buffer_prototype_object());
+ MaybeLocal ui = Buffer::New(env, ab, 0, length);
- if (!mb.FromMaybe(false))
+ if (ui.IsEmpty()) {
return Local();
+ }
CallbackInfo::New(env->isolate(), ab, callback, data, hint);
- return scope.Escape(ui);
+ return scope.Escape(ui.ToLocalChecked());
}
@@ -415,8 +412,6 @@ MaybeLocal New(Isolate* isolate, char* data, size_t length) {
MaybeLocal New(Environment* env, char* data, size_t length) {
- EscapableHandleScope scope(env->isolate());
-
if (length > 0) {
CHECK_NE(data, nullptr);
CHECK(length <= kMaxLength);
@@ -427,12 +422,7 @@ MaybeLocal New(Environment* env, char* data, size_t length) {
data,
length,
ArrayBufferCreationMode::kInternalized);
- Local ui = Uint8Array::New(ab, 0, length);
- Maybe mb =
- ui->SetPrototype(env->context(), env->buffer_prototype_object());
- if (mb.FromMaybe(false))
- return scope.Escape(ui);
- return Local();
+ return Buffer::New(env, ab, 0, length).FromMaybe(Local());
}
namespace {
diff --git a/src/node_http2.cc b/src/node_http2.cc
index 238e2afc1deb71..bd7eeee8655e52 100644
--- a/src/node_http2.cc
+++ b/src/node_http2.cc
@@ -9,6 +9,7 @@
namespace node {
+using v8::ArrayBuffer;
using v8::Boolean;
using v8::Context;
using v8::Float64Array;
@@ -553,7 +554,8 @@ Http2Session::~Http2Session() {
}
inline bool HasHttp2Observer(Environment* env) {
- uint32_t* observers = env->performance_state()->observers;
+ AliasedBuffer& observers =
+ env->performance_state()->observers;
return observers[performance::NODE_PERFORMANCE_ENTRY_TYPE_HTTP2] != 0;
}
@@ -831,7 +833,7 @@ inline int Http2Session::OnBeginHeadersCallback(nghttp2_session* handle,
}
// Called by nghttp2 for each header name/value pair in a HEADERS block.
-// This had to have been preceeded by a call to OnBeginHeadersCallback so
+// This had to have been preceded by a call to OnBeginHeadersCallback so
// the Http2Stream is guaranteed to already exist.
inline int Http2Session::OnHeaderCallback(nghttp2_session* handle,
const nghttp2_frame* frame,
@@ -978,7 +980,6 @@ inline int Http2Session::OnStreamClose(nghttp2_session* handle,
// Intentionally ignore the callback if the stream does not exist or has
// already been destroyed
if (stream != nullptr && !stream->IsDestroyed()) {
- stream->AddChunk(nullptr, 0);
stream->Close(code);
// It is possible for the stream close to occur before the stream is
// ever passed on to the javascript side. If that happens, skip straight
@@ -989,9 +990,8 @@ inline int Http2Session::OnStreamClose(nghttp2_session* handle,
stream->object()->Get(context, env->onstreamclose_string())
.ToLocalChecked();
if (fn->IsFunction()) {
- Local argv[2] = {
- Integer::NewFromUnsigned(isolate, code),
- Boolean::New(isolate, stream->HasDataChunks(true))
+ Local argv[] = {
+ Integer::NewFromUnsigned(isolate, code)
};
stream->MakeCallback(fn.As(), arraysize(argv), argv);
} else {
@@ -1028,6 +1028,8 @@ inline int Http2Session::OnDataChunkReceived(nghttp2_session* handle,
Http2Session* session = static_cast(user_data);
DEBUG_HTTP2SESSION2(session, "buffering data chunk for stream %d, size: "
"%d, flags: %d", id, len, flags);
+ Environment* env = session->env();
+ HandleScope scope(env->isolate());
// We should never actually get a 0-length chunk so this check is
// only a precaution at this point.
if (len > 0) {
@@ -1039,8 +1041,25 @@ inline int Http2Session::OnDataChunkReceived(nghttp2_session* handle,
// If the stream has been destroyed, ignore this chunk
if (stream->IsDestroyed())
return 0;
+
stream->statistics_.received_bytes += len;
- stream->AddChunk(data, len);
+
+ // There is a single large array buffer for the entire data read from the
+ // network; create a slice of that array buffer and emit it as the
+ // received data buffer.
+ CHECK(!session->stream_buf_ab_.IsEmpty());
+ size_t offset = reinterpret_cast(data) - session->stream_buf_;
+ // Verify that the data offset is inside the current read buffer.
+ CHECK_LE(offset, session->stream_buf_size_);
+
+ Local buf =
+ Buffer::New(env, session->stream_buf_ab_, offset, len).ToLocalChecked();
+
+ stream->EmitData(len, buf, Local());
+ if (!stream->IsReading())
+ stream->inbound_consumed_data_while_paused_ += len;
+ else
+ nghttp2_session_consume_stream(handle, id, len);
}
return 0;
}
@@ -1226,9 +1245,8 @@ inline void Http2Session::HandlePriorityFrame(const nghttp2_frame* frame) {
// Called by OnFrameReceived when a complete DATA frame has been received.
-// If we know that this is the last DATA frame (because the END_STREAM flag
-// is set), then we'll terminate the readable side of the StreamBase. If
-// the StreamBase is flowing, we'll push the chunks of data out to JS land.
+// If we know that this was the last DATA frame (because the END_STREAM flag
+// is set), then we'll terminate the readable side of the StreamBase.
inline void Http2Session::HandleDataFrame(const nghttp2_frame* frame) {
int32_t id = GetFrameID(frame);
DEBUG_HTTP2SESSION2(this, "handling data frame for stream %d", id);
@@ -1239,11 +1257,8 @@ inline void Http2Session::HandleDataFrame(const nghttp2_frame* frame) {
return;
if (frame->hd.flags & NGHTTP2_FLAG_END_STREAM) {
- stream->AddChunk(nullptr, 0);
+ stream->EmitData(UV_EOF, Local(), Local());
}
-
- if (stream->IsReading())
- stream->FlushDataChunks();
}
@@ -1618,45 +1633,67 @@ void Http2Session::OnStreamAllocImpl(size_t suggested_size,
uv_buf_t* buf,
void* ctx) {
Http2Session* session = static_cast(ctx);
- buf->base = session->stream_alloc();
- buf->len = kAllocBufferSize;
+ CHECK_EQ(session->stream_buf_, nullptr);
+ CHECK_EQ(session->stream_buf_size_, 0);
+ buf->base = session->stream_buf_ = Malloc(suggested_size);
+ buf->len = session->stream_buf_size_ = suggested_size;
+ session->IncrementCurrentSessionMemory(suggested_size);
}
// Callback used to receive inbound data from the i/o stream
void Http2Session::OnStreamReadImpl(ssize_t nread,
- const uv_buf_t* bufs,
+ const uv_buf_t* buf,
uv_handle_type pending,
void* ctx) {
Http2Session* session = static_cast(ctx);
Http2Scope h2scope(session);
CHECK_NE(session->stream_, nullptr);
DEBUG_HTTP2SESSION2(session, "receiving %d bytes", nread);
- if (nread < 0) {
- uv_buf_t tmp_buf;
- tmp_buf.base = nullptr;
- tmp_buf.len = 0;
- session->prev_read_cb_.fn(nread,
- &tmp_buf,
- pending,
- session->prev_read_cb_.ctx);
- return;
- }
- if (bufs->len > 0) {
+ if (nread <= 0) {
+ free(session->stream_buf_);
+ if (nread < 0) {
+ uv_buf_t tmp_buf = uv_buf_init(nullptr, 0);
+ session->prev_read_cb_.fn(nread,
+ &tmp_buf,
+ pending,
+ session->prev_read_cb_.ctx);
+ }
+ } else {
// Only pass data on if nread > 0
- uv_buf_t buf[] { uv_buf_init((*bufs).base, nread) };
+
+ // Verify that currently: There is memory allocated into which
+ // the data has been read, and that memory buffer is at least as large
+ // as the amount of data we have read, but we have not yet made an
+ // ArrayBuffer out of it.
+ CHECK_NE(session->stream_buf_, nullptr);
+ CHECK_EQ(session->stream_buf_, buf->base);
+ CHECK_EQ(session->stream_buf_size_, buf->len);
+ CHECK_GE(session->stream_buf_size_, static_cast(nread));
+ CHECK(session->stream_buf_ab_.IsEmpty());
+
+ Environment* env = session->env();
+ Isolate* isolate = env->isolate();
+ HandleScope scope(isolate);
+ Local context = env->context();
+ Context::Scope context_scope(context);
+
+ // Create an array buffer for the read data. DATA frames will be emitted
+ // as slices of this array buffer to avoid having to copy memory.
+ session->stream_buf_ab_ =
+ ArrayBuffer::New(isolate,
+ session->stream_buf_,
+ session->stream_buf_size_,
+ v8::ArrayBufferCreationMode::kInternalized);
+
+ uv_buf_t buf_ = uv_buf_init(buf->base, nread);
session->statistics_.data_received += nread;
- ssize_t ret = session->Write(buf, 1);
+ ssize_t ret = session->Write(&buf_, 1);
// Note: if ssize_t is not defined (e.g. on Win32), nghttp2 will typedef
// ssize_t to int. Cast here so that the < 0 check actually works on
// Windows.
if (static_cast(ret) < 0) {
DEBUG_HTTP2SESSION2(session, "fatal error receiving data: %d", ret);
- Environment* env = session->env();
- Isolate* isolate = env->isolate();
- HandleScope scope(isolate);
- Local context = env->context();
- Context::Scope context_scope(context);
Local argv[1] = {
Integer::New(isolate, ret),
@@ -1667,6 +1704,13 @@ void Http2Session::OnStreamReadImpl(ssize_t nread,
nghttp2_session_want_read(**session));
}
}
+
+ // Since we are finished handling this write, reset the stream buffer.
+ // The memory has either been free()d or was handed over to V8.
+ session->DecrementCurrentSessionMemory(session->stream_buf_size_);
+ session->stream_buf_ = nullptr;
+ session->stream_buf_size_ = 0;
+ session->stream_buf_ab_ = Local();
}
void Http2Session::OnStreamDestructImpl(void* ctx) {
@@ -1781,30 +1825,6 @@ void Http2Stream::OnTrailers(const SubmitTrailers& submit_trailers) {
}
}
-inline bool Http2Stream::HasDataChunks(bool ignore_eos) {
- return data_chunks_.size() > (ignore_eos ? 1 : 0);
-}
-
-// Appends a chunk of received DATA frame data to this Http2Streams internal
-// queue. Note that we must memcpy each chunk because of the way that nghttp2
-// handles it's internal memory`.
-inline void Http2Stream::AddChunk(const uint8_t* data, size_t len) {
- CHECK(!this->IsDestroyed());
- if (this->statistics_.first_byte == 0)
- this->statistics_.first_byte = uv_hrtime();
- if (flags_ & NGHTTP2_STREAM_FLAG_EOS)
- return;
- char* buf = nullptr;
- if (len > 0 && data != nullptr) {
- buf = Malloc(len);
- memcpy(buf, data, len);
- } else if (data == nullptr) {
- flags_ |= NGHTTP2_STREAM_FLAG_EOS;
- }
- data_chunks_.emplace(uv_buf_init(buf, len));
-}
-
-
inline void Http2Stream::Close(int32_t code) {
CHECK(!this->IsDestroyed());
flags_ |= NGHTTP2_STREAM_FLAG_CLOSED;
@@ -1841,13 +1861,6 @@ inline void Http2Stream::Destroy() {
DEBUG_HTTP2STREAM(this, "destroying stream");
- // Free any remaining incoming data chunks.
- while (!data_chunks_.empty()) {
- uv_buf_t buf = data_chunks_.front();
- free(buf.base);
- data_chunks_.pop();
- }
-
// Wait until the start of the next loop to delete because there
// may still be some pending operations queued for this stream.
env()->SetImmediate([](Environment* env, void* data) {
@@ -1873,39 +1886,6 @@ inline void Http2Stream::Destroy() {
}
-// Uses the StreamBase API to push a single chunk of queued inbound DATA
-// to JS land.
-void Http2Stream::OnDataChunk(uv_buf_t* chunk) {
- CHECK(!this->IsDestroyed());
- Isolate* isolate = env()->isolate();
- HandleScope scope(isolate);
- ssize_t len = -1;
- Local buf;
- if (chunk != nullptr) {
- len = chunk->len;
- buf = Buffer::New(isolate, chunk->base, len).ToLocalChecked();
- }
- EmitData(len, buf, this->object());
-}
-
-
-inline void Http2Stream::FlushDataChunks() {
- CHECK(!this->IsDestroyed());
- Http2Scope h2scope(this);
- if (!data_chunks_.empty()) {
- uv_buf_t buf = data_chunks_.front();
- data_chunks_.pop();
- if (buf.len > 0) {
- CHECK_EQ(nghttp2_session_consume_stream(session_->session(),
- id_, buf.len), 0);
- OnDataChunk(&buf);
- } else {
- OnDataChunk(nullptr);
- }
- }
-}
-
-
// Initiates a response on the Http2Stream using data provided via the
// StreamBase Streams API.
inline int Http2Stream::SubmitResponse(nghttp2_nv* nva,
@@ -2012,13 +1992,20 @@ inline Http2Stream* Http2Stream::SubmitPushPromise(nghttp2_nv* nva,
// Switch the StreamBase into flowing mode to begin pushing chunks of data
// out to JS land.
inline int Http2Stream::ReadStart() {
+ Http2Scope h2scope(this);
CHECK(!this->IsDestroyed());
flags_ |= NGHTTP2_STREAM_FLAG_READ_START;
flags_ &= ~NGHTTP2_STREAM_FLAG_READ_PAUSED;
- // Flush any queued data chunks immediately out to the JS layer
- FlushDataChunks();
DEBUG_HTTP2STREAM(this, "reading starting");
+
+ // Tell nghttp2 about our consumption of the data that was handed
+ // off to JS land.
+ nghttp2_session_consume_stream(session_->session(),
+ id_,
+ inbound_consumed_data_while_paused_);
+ inbound_consumed_data_while_paused_ = 0;
+
return 0;
}
@@ -2731,7 +2718,7 @@ void Http2Session::Ping(const FunctionCallbackInfo& args) {
return args.GetReturnValue().Set(false);
}
- // The Ping itself is an Async resource. When the acknowledgement is recieved,
+ // The Ping itself is an Async resource. When the acknowledgement is received,
// the callback will be invoked and a notification sent out to JS land. The
// notification will include the duration of the ping, allowing the round
// trip to be measured.
diff --git a/src/node_http2.h b/src/node_http2.h
index 765f7294768489..9027ed7feb7dad 100644
--- a/src/node_http2.h
+++ b/src/node_http2.h
@@ -550,12 +550,6 @@ class Http2Stream : public AsyncWrap,
inline void EmitStatistics();
- inline bool HasDataChunks(bool ignore_eos = false);
-
- inline void AddChunk(const uint8_t* data, size_t len);
-
- inline void FlushDataChunks();
-
// Process a Data Chunk
void OnDataChunk(uv_buf_t* chunk);
@@ -740,8 +734,11 @@ class Http2Stream : public AsyncWrap,
uint32_t current_headers_length_ = 0; // total number of octets
std::vector current_headers_;
- // Inbound Data... This is the data received via DATA frames for this stream.
- std::queue data_chunks_;
+ // This keeps track of the amount of data read from the socket while the
+ // socket was in paused mode. When `ReadStart()` is called (and not before
+ // then), we tell nghttp2 that we consumed that data to get proper
+ // backpressure handling.
+ size_t inbound_consumed_data_while_paused_ = 0;
// Outbound Data... This is the data written by the JS layer that is
// waiting to be written out to the socket.
@@ -1048,24 +1045,6 @@ class Http2Session : public AsyncWrap {
int lib_error_code,
void* user_data);
-
- static inline ssize_t OnStreamReadFD(
- nghttp2_session* session,
- int32_t id,
- uint8_t* buf,
- size_t length,
- uint32_t* flags,
- nghttp2_data_source* source,
- void* user_data);
- static inline ssize_t OnStreamRead(
- nghttp2_session* session,
- int32_t id,
- uint8_t* buf,
- size_t length,
- uint32_t* flags,
- nghttp2_data_source* source,
- void* user_data);
-
struct Callbacks {
inline explicit Callbacks(bool kHasGetPaddingCallback);
inline ~Callbacks();
@@ -1103,8 +1082,9 @@ class Http2Session : public AsyncWrap {
// use this to allow timeout tracking during long-lasting writes
uint32_t chunks_sent_since_last_write_ = 0;
- uv_prepare_t* prep_ = nullptr;
- char stream_buf_[kAllocBufferSize];
+ char* stream_buf_ = nullptr;
+ size_t stream_buf_size_ = 0;
+ v8::Local stream_buf_ab_;
size_t max_outstanding_pings_ = DEFAULT_MAX_PINGS;
std::queue outstanding_pings_;
diff --git a/src/node_http_parser.cc b/src/node_http_parser.cc
index f378a0475a65c0..9debb8a205ef1c 100644
--- a/src/node_http_parser.cc
+++ b/src/node_http_parser.cc
@@ -72,22 +72,6 @@ const uint32_t kOnMessageComplete = 3;
const uint32_t kOnExecute = 4;
-#define HTTP_CB(name) \
- static int name(http_parser* p_) { \
- Parser* self = ContainerOf(&Parser::parser_, p_); \
- return self->name##_(); \
- } \
- int name##_()
-
-
-#define HTTP_DATA_CB(name) \
- static int name(http_parser* p_, const char* at, size_t length) { \
- Parser* self = ContainerOf(&Parser::parser_, p_); \
- return self->name##_(at, length); \
- } \
- int name##_(const char* at, size_t length)
-
-
// helper class for the Parser
struct StringPtr {
StringPtr() {
@@ -182,7 +166,7 @@ class Parser : public AsyncWrap {
}
- HTTP_CB(on_message_begin) {
+ int on_message_begin() {
num_fields_ = num_values_ = 0;
url_.Reset();
status_message_.Reset();
@@ -190,19 +174,19 @@ class Parser : public AsyncWrap {
}
- HTTP_DATA_CB(on_url) {
+ int on_url(const char* at, size_t length) {
url_.Update(at, length);
return 0;
}
- HTTP_DATA_CB(on_status) {
+ int on_status(const char* at, size_t length) {
status_message_.Update(at, length);
return 0;
}
- HTTP_DATA_CB(on_header_field) {
+ int on_header_field(const char* at, size_t length) {
if (num_fields_ == num_values_) {
// start of new field name
num_fields_++;
@@ -224,7 +208,7 @@ class Parser : public AsyncWrap {
}
- HTTP_DATA_CB(on_header_value) {
+ int on_header_value(const char* at, size_t length) {
if (num_values_ != num_fields_) {
// start of new header value
num_values_++;
@@ -240,7 +224,7 @@ class Parser : public AsyncWrap {
}
- HTTP_CB(on_headers_complete) {
+ int on_headers_complete() {
// Arguments for the on-headers-complete javascript callback. This
// list needs to be kept in sync with the actual argument list for
// `parserOnHeadersComplete` in lib/_http_common.js.
@@ -317,7 +301,7 @@ class Parser : public AsyncWrap {
}
- HTTP_DATA_CB(on_body) {
+ int on_body(const char* at, size_t length) {
EscapableHandleScope scope(env()->isolate());
Local obj = object();
@@ -354,7 +338,7 @@ class Parser : public AsyncWrap {
}
- HTTP_CB(on_message_complete) {
+ int on_message_complete() {
HandleScope scope(env()->isolate());
if (num_fields_)
@@ -392,8 +376,7 @@ class Parser : public AsyncWrap {
Parser* parser;
ASSIGN_OR_RETURN_UNWRAP(&parser, args.Holder());
- if (--parser->refcount_ == 0)
- delete parser;
+ delete parser;
}
@@ -559,22 +542,6 @@ class Parser : public AsyncWrap {
}
protected:
- class ScopedRetainParser {
- public:
- explicit ScopedRetainParser(Parser* p) : p_(p) {
- CHECK_GT(p_->refcount_, 0);
- p_->refcount_++;
- }
-
- ~ScopedRetainParser() {
- if (0 == --p_->refcount_)
- delete p_;
- }
-
- private:
- Parser* const p_;
- };
-
static const size_t kAllocBufferSize = 64 * 1024;
static void OnAllocImpl(size_t suggested_size, uv_buf_t* buf, void* ctx) {
@@ -611,8 +578,6 @@ class Parser : public AsyncWrap {
if (nread == 0)
return;
- ScopedRetainParser retain(parser);
-
parser->current_buffer_.Clear();
Local ret = parser->Execute(buf->base, nread);
@@ -750,22 +715,33 @@ class Parser : public AsyncWrap {
char* current_buffer_data_;
StreamResource::Callback prev_alloc_cb_;
StreamResource::Callback prev_read_cb_;
- int refcount_ = 1;
- static const struct http_parser_settings settings;
- friend class ScopedRetainParser;
-};
+ // These are helper functions for filling `http_parser_settings`, which turn
+ // a member function of Parser into a C-style HTTP parser callback.
+ template struct Proxy;
+ template
+ struct Proxy {
+ static int Raw(http_parser* p, Args ... args) {
+ Parser* parser = ContainerOf(&Parser::parser_, p);
+ return (parser->*Member)(std::forward(args)...);
+ }
+ };
+
+ typedef int (Parser::*Call)();
+ typedef int (Parser::*DataCall)(const char* at, size_t length);
+ static const struct http_parser_settings settings;
+};
const struct http_parser_settings Parser::settings = {
- Parser::on_message_begin,
- Parser::on_url,
- Parser::on_status,
- Parser::on_header_field,
- Parser::on_header_value,
- Parser::on_headers_complete,
- Parser::on_body,
- Parser::on_message_complete,
+ Proxy::Raw,
+ Proxy::Raw,
+ Proxy::Raw,
+ Proxy::Raw,
+ Proxy::Raw,
+ Proxy::Raw,
+ Proxy::Raw,
+ Proxy::Raw,
nullptr, // on_chunk_header
nullptr // on_chunk_complete
};
diff --git a/src/node_i18n.cc b/src/node_i18n.cc
index 041eda94f3bdd5..71ae6a000336e2 100644
--- a/src/node_i18n.cc
+++ b/src/node_i18n.cc
@@ -788,7 +788,8 @@ static int GetColumnWidth(UChar32 codepoint,
if (ambiguous_as_full_width) {
return 2;
}
- // Fall through if ambiguous_as_full_width if false.
+ // If ambiguous_as_full_width is false:
+ // Fall through
case U_EA_NEUTRAL:
if (u_hasBinaryProperty(codepoint, UCHAR_EMOJI_PRESENTATION)) {
return 2;
diff --git a/src/node_internals.h b/src/node_internals.h
index 758df736d35545..06f7b6bdeb23ed 100644
--- a/src/node_internals.h
+++ b/src/node_internals.h
@@ -357,6 +357,19 @@ v8::MaybeLocal New(Environment* env,
// Mixing operator new and free() is undefined behavior so don't do that.
v8::MaybeLocal New(Environment* env, char* data, size_t length);
+inline
+v8::MaybeLocal New(Environment* env,
+ v8::Local ab,
+ size_t byte_offset,
+ size_t length) {
+ v8::Local ui = v8::Uint8Array::New(ab, byte_offset, length);
+ v8::Maybe mb =
+ ui->SetPrototype(env->context(), env->buffer_prototype_object());
+ if (mb.IsNothing())
+ return v8::MaybeLocal();
+ return ui;
+}
+
// Construct a Buffer from a MaybeStackBuffer (and also its subclasses like
// Utf8Value and TwoByteValue).
// If |buf| is invalidated, an empty MaybeLocal is returned, and nothing is
diff --git a/src/node_perf.cc b/src/node_perf.cc
index 97d3a2d99522fe..38dbdaca5adce1 100644
--- a/src/node_perf.cc
+++ b/src/node_perf.cc
@@ -7,7 +7,6 @@ namespace node {
namespace performance {
using v8::Array;
-using v8::ArrayBuffer;
using v8::Context;
using v8::Function;
using v8::FunctionCallbackInfo;
@@ -85,9 +84,9 @@ void PerformanceEntry::Notify(Environment* env,
PerformanceEntryType type,
Local object) {
Context::Scope scope(env->context());
- uint32_t* observers = env->performance_state()->observers;
- if (observers != nullptr &&
- type != NODE_PERFORMANCE_ENTRY_TYPE_INVALID &&
+ AliasedBuffer& observers =
+ env->performance_state()->observers;
+ if (type != NODE_PERFORMANCE_ENTRY_TYPE_INVALID &&
observers[type]) {
node::MakeCallback(env->isolate(),
env->process_object(),
@@ -130,7 +129,8 @@ void Measure(const FunctionCallbackInfo& args) {
Utf8Value startMark(env->isolate(), args[1]);
Utf8Value endMark(env->isolate(), args[2]);
- double* milestones = env->performance_state()->milestones;
+ AliasedBuffer& milestones =
+ env->performance_state()->milestones;
uint64_t startTimestamp = timeOrigin;
uint64_t start = GetPerformanceMark(env, *startMark);
@@ -165,7 +165,8 @@ void Measure(const FunctionCallbackInfo& args) {
void MarkMilestone(const FunctionCallbackInfo& args) {
Environment* env = Environment::GetCurrent(args);
Local context = env->context();
- double* milestones = env->performance_state()->milestones;
+ AliasedBuffer& milestones =
+ env->performance_state()->milestones;
PerformanceMilestone milestone =
static_cast(
args[0]->Int32Value(context).ToChecked());
@@ -188,7 +189,8 @@ void PerformanceGCCallback(uv_async_t* handle) {
HandleScope scope(env->isolate());
Local context = env->context();
- uint32_t* observers = env->performance_state()->observers;
+ AliasedBuffer& observers =
+ env->performance_state()->observers;
if (observers[NODE_PERFORMANCE_ENTRY_TYPE_GC]) {
Local obj = entry->ToObject();
v8::PropertyAttribute attr =
@@ -297,8 +299,8 @@ void TimerFunctionCall(const FunctionCallbackInfo& args) {
args.GetReturnValue().Set(ret.ToLocalChecked());
}
-
- uint32_t* observers = env->performance_state()->observers;
+ AliasedBuffer& observers =
+ env->performance_state()->observers;
if (!observers[NODE_PERFORMANCE_ENTRY_TYPE_FUNCTION])
return;
@@ -329,18 +331,13 @@ void Init(Local target,
Environment* env = Environment::GetCurrent(context);
Isolate* isolate = env->isolate();
performance_state* state = env->performance_state();
- auto state_ab = ArrayBuffer::New(isolate, state, sizeof(*state));
-
- #define SET_STATE_TYPEDARRAY(name, type, field) \
- target->Set(context, \
- FIXED_ONE_BYTE_STRING(isolate, (name)), \
- type::New(state_ab, \
- offsetof(performance_state, field), \
- arraysize(state->field))) \
- .FromJust()
- SET_STATE_TYPEDARRAY("observerCounts", v8::Uint32Array, observers);
- SET_STATE_TYPEDARRAY("milestones", v8::Float64Array, milestones);
- #undef SET_STATE_TYPEDARRAY
+
+ target->Set(context,
+ FIXED_ONE_BYTE_STRING(isolate, "observerCounts"),
+ state->observers.GetJSArray()).FromJust();
+ target->Set(context,
+ FIXED_ONE_BYTE_STRING(isolate, "milestones"),
+ state->milestones.GetJSArray()).FromJust();
Local performanceEntryString =
FIXED_ONE_BYTE_STRING(isolate, "PerformanceEntry");
diff --git a/src/node_perf_common.h b/src/node_perf_common.h
index 713f126d7f70d9..435a4cffe5a753 100644
--- a/src/node_perf_common.h
+++ b/src/node_perf_common.h
@@ -61,10 +61,33 @@ enum PerformanceEntryType {
node::performance::NODE_PERFORMANCE_MILESTONE_##n); \
} while (0);
-struct performance_state {
- // doubles first so that they are always sizeof(double)-aligned
- double milestones[NODE_PERFORMANCE_MILESTONE_INVALID];
- uint32_t observers[NODE_PERFORMANCE_ENTRY_TYPE_INVALID];
+class performance_state {
+ public:
+ explicit performance_state(v8::Isolate* isolate) :
+ root(
+ isolate,
+ sizeof(performance_state_internal)),
+ milestones(
+ isolate,
+ offsetof(performance_state_internal, milestones),
+ NODE_PERFORMANCE_MILESTONE_INVALID,
+ root),
+ observers(
+ isolate,
+ offsetof(performance_state_internal, observers),
+ NODE_PERFORMANCE_ENTRY_TYPE_INVALID,
+ root) {}
+
+ AliasedBuffer root;
+ AliasedBuffer milestones;
+ AliasedBuffer observers;
+
+ private:
+ struct performance_state_internal {
+ // doubles first so that they are always sizeof(double)-aligned
+ double milestones[NODE_PERFORMANCE_MILESTONE_INVALID];
+ uint32_t observers[NODE_PERFORMANCE_ENTRY_TYPE_INVALID];
+ };
};
} // namespace performance
diff --git a/src/node_version.h b/src/node_version.h
index aa271549247d15..35d856fd6ee1e7 100644
--- a/src/node_version.h
+++ b/src/node_version.h
@@ -23,7 +23,7 @@
#define SRC_NODE_VERSION_H_
#define NODE_MAJOR_VERSION 9
-#define NODE_MINOR_VERSION 4
+#define NODE_MINOR_VERSION 5
#define NODE_PATCH_VERSION 1
#define NODE_VERSION_IS_LTS 0
@@ -108,4 +108,7 @@
*/
#define NODE_MODULE_VERSION 59
+// the NAPI_VERSION provided by this version of the runtime
+#define NAPI_VERSION 2
+
#endif // SRC_NODE_VERSION_H_
diff --git a/src/pipe_wrap.cc b/src/pipe_wrap.cc
index 76280f0ce77e86..465cbf4d16dbfe 100644
--- a/src/pipe_wrap.cc
+++ b/src/pipe_wrap.cc
@@ -53,7 +53,8 @@ Local PipeWrap::Instantiate(Environment* env,
AsyncWrap* parent,
PipeWrap::SocketType type) {
EscapableHandleScope handle_scope(env->isolate());
- AsyncHooks::InitScope init_scope(env, parent->get_async_id());
+ AsyncHooks::DefaultTriggerAsyncIdScope trigger_scope(env,
+ parent->get_async_id());
CHECK_EQ(false, env->pipe_constructor_template().IsEmpty());
Local constructor = env->pipe_constructor_template()->GetFunction();
CHECK_EQ(false, constructor.IsEmpty());
diff --git a/src/stream_base-inl.h b/src/stream_base-inl.h
index cc89a11bac249c..cdcff67cc55e66 100644
--- a/src/stream_base-inl.h
+++ b/src/stream_base-inl.h
@@ -143,7 +143,8 @@ void StreamBase::JSMethod(const FunctionCallbackInfo& args) {
if (!wrap->IsAlive())
return args.GetReturnValue().Set(UV_EINVAL);
- AsyncHooks::InitScope init_scope(handle->env(), handle->get_async_id());
+ AsyncHooks::DefaultTriggerAsyncIdScope trigger_scope(
+ handle->env(), handle->get_async_id());
args.GetReturnValue().Set((wrap->*Method)(args));
}
diff --git a/src/stream_base.cc b/src/stream_base.cc
index bb25fc1cff0e9c..ecb5f3dd1b954e 100644
--- a/src/stream_base.cc
+++ b/src/stream_base.cc
@@ -52,7 +52,7 @@ int StreamBase::Shutdown(const FunctionCallbackInfo& args) {
AsyncWrap* wrap = GetAsyncWrap();
CHECK_NE(wrap, nullptr);
- env->set_init_trigger_async_id(wrap->get_async_id());
+ AsyncHooks::DefaultTriggerAsyncIdScope(env, wrap->get_async_id());
ShutdownWrap* req_wrap = new ShutdownWrap(env,
req_wrap_obj,
this);
@@ -109,7 +109,6 @@ int StreamBase::Writev(const FunctionCallbackInfo& args) {
size_t storage_size = 0;
uint32_t bytes = 0;
size_t offset;
- AsyncWrap* wrap;
WriteWrap* req_wrap;
int err;
@@ -153,10 +152,13 @@ int StreamBase::Writev(const FunctionCallbackInfo& args) {
goto done;
}
- wrap = GetAsyncWrap();
- CHECK_NE(wrap, nullptr);
- env->set_init_trigger_async_id(wrap->get_async_id());
- req_wrap = WriteWrap::New(env, req_wrap_obj, this, storage_size);
+ {
+ AsyncWrap* wrap = GetAsyncWrap();
+ CHECK_NE(wrap, nullptr);
+ AsyncHooks::DefaultTriggerAsyncIdScope trigger_scope(env,
+ wrap->get_async_id());
+ req_wrap = WriteWrap::New(env, req_wrap_obj, this, storage_size);
+ }
offset = 0;
if (!all_buffers) {
@@ -226,7 +228,6 @@ int StreamBase::WriteBuffer(const FunctionCallbackInfo& args) {
const char* data = Buffer::Data(args[1]);
size_t length = Buffer::Length(args[1]);
- AsyncWrap* wrap;
WriteWrap* req_wrap;
uv_buf_t buf;
buf.base = const_cast(data);
@@ -242,11 +243,14 @@ int StreamBase::WriteBuffer(const FunctionCallbackInfo& args) {
goto done;
CHECK_EQ(count, 1);
- wrap = GetAsyncWrap();
- if (wrap != nullptr)
- env->set_init_trigger_async_id(wrap->get_async_id());
// Allocate, or write rest
- req_wrap = WriteWrap::New(env, req_wrap_obj, this);
+ {
+ AsyncWrap* wrap = GetAsyncWrap();
+ CHECK_NE(wrap, nullptr);
+ AsyncHooks::DefaultTriggerAsyncIdScope trigger_scope(env,
+ wrap->get_async_id());
+ req_wrap = WriteWrap::New(env, req_wrap_obj, this);
+ }
err = DoWrite(req_wrap, bufs, count, nullptr);
req_wrap_obj->Set(env->async(), True(env->isolate()));
@@ -276,7 +280,6 @@ int StreamBase::WriteString(const FunctionCallbackInfo& args) {
Local req_wrap_obj = args[0].As();
Local string = args[1].As();
Local