Import Upstream version 12.18.1
This commit is contained in:
commit
557f7faa75
|
@ -0,0 +1,111 @@
|
|||
---
|
||||
Language: Cpp
|
||||
# BasedOnStyle: Google
|
||||
AccessModifierOffset: -1
|
||||
AlignAfterOpenBracket: Align
|
||||
AlignConsecutiveAssignments: false
|
||||
AlignConsecutiveDeclarations: false
|
||||
AlignEscapedNewlines: Right
|
||||
AlignOperands: true
|
||||
AlignTrailingComments: true
|
||||
AllowAllParametersOfDeclarationOnNextLine: true
|
||||
AllowShortBlocksOnASingleLine: false
|
||||
AllowShortCaseLabelsOnASingleLine: false
|
||||
AllowShortFunctionsOnASingleLine: Inline
|
||||
AllowShortIfStatementsOnASingleLine: true
|
||||
AllowShortLoopsOnASingleLine: true
|
||||
AlwaysBreakAfterDefinitionReturnType: None
|
||||
AlwaysBreakAfterReturnType: None
|
||||
AlwaysBreakBeforeMultilineStrings: false
|
||||
AlwaysBreakTemplateDeclarations: true
|
||||
BinPackArguments: false
|
||||
BinPackParameters: false
|
||||
BraceWrapping:
|
||||
AfterClass: false
|
||||
AfterControlStatement: false
|
||||
AfterEnum: false
|
||||
AfterFunction: false
|
||||
AfterNamespace: false
|
||||
AfterObjCDeclaration: false
|
||||
AfterStruct: false
|
||||
AfterUnion: false
|
||||
AfterExternBlock: false
|
||||
BeforeCatch: false
|
||||
BeforeElse: false
|
||||
IndentBraces: false
|
||||
SplitEmptyFunction: true
|
||||
SplitEmptyRecord: true
|
||||
SplitEmptyNamespace: true
|
||||
BreakBeforeBinaryOperators: None
|
||||
BreakBeforeBraces: Attach
|
||||
BreakBeforeInheritanceComma: false
|
||||
BreakBeforeTernaryOperators: true
|
||||
BreakConstructorInitializersBeforeComma: false
|
||||
BreakConstructorInitializers: BeforeColon
|
||||
BreakAfterJavaFieldAnnotations: false
|
||||
BreakStringLiterals: true
|
||||
ColumnLimit: 80
|
||||
CommentPragmas: '^ IWYU pragma:'
|
||||
CompactNamespaces: false
|
||||
ConstructorInitializerAllOnOneLineOrOnePerLine: true
|
||||
ConstructorInitializerIndentWidth: 4
|
||||
ContinuationIndentWidth: 4
|
||||
Cpp11BracedListStyle: true
|
||||
DerivePointerAlignment: false
|
||||
DisableFormat: false
|
||||
ExperimentalAutoDetectBinPacking: false
|
||||
FixNamespaceComments: true
|
||||
ForEachMacros:
|
||||
- foreach
|
||||
- Q_FOREACH
|
||||
- BOOST_FOREACH
|
||||
IncludeBlocks: Preserve
|
||||
IncludeCategories:
|
||||
- Regex: '^<ext/.*\.h>'
|
||||
Priority: 2
|
||||
- Regex: '^<.*\.h>'
|
||||
Priority: 1
|
||||
- Regex: '^<.*'
|
||||
Priority: 2
|
||||
- Regex: '.*'
|
||||
Priority: 3
|
||||
IncludeIsMainRegex: '([-_](test|unittest))?$'
|
||||
IndentCaseLabels: true
|
||||
IndentPPDirectives: None
|
||||
IndentWidth: 2
|
||||
IndentWrappedFunctionNames: false
|
||||
JavaScriptQuotes: Leave
|
||||
JavaScriptWrapImports: true
|
||||
KeepEmptyLinesAtTheStartOfBlocks: false
|
||||
MacroBlockBegin: ''
|
||||
MacroBlockEnd: ''
|
||||
MaxEmptyLinesToKeep: 1
|
||||
NamespaceIndentation: None
|
||||
ObjCBlockIndentWidth: 2
|
||||
ObjCSpaceAfterProperty: false
|
||||
ObjCSpaceBeforeProtocolList: false
|
||||
PenaltyBreakAssignment: 2
|
||||
PenaltyBreakBeforeFirstCallParameter: 1
|
||||
PenaltyBreakComment: 300
|
||||
PenaltyBreakFirstLessLess: 120
|
||||
PenaltyBreakString: 1000
|
||||
PenaltyExcessCharacter: 1000000
|
||||
PenaltyReturnTypeOnItsOwnLine: 200
|
||||
PointerAlignment: Left
|
||||
ReflowComments: true
|
||||
SortIncludes: true
|
||||
SortUsingDeclarations: true
|
||||
SpaceAfterCStyleCast: false
|
||||
SpaceAfterTemplateKeyword: true
|
||||
SpaceBeforeAssignmentOperators: true
|
||||
SpaceBeforeParens: ControlStatements
|
||||
SpaceInEmptyParentheses: false
|
||||
SpacesBeforeTrailingComments: 2
|
||||
SpacesInAngles: false
|
||||
SpacesInContainerLiterals: true
|
||||
SpacesInCStyleCastParentheses: false
|
||||
SpacesInParentheses: false
|
||||
SpacesInSquareBrackets: false
|
||||
Standard: Auto
|
||||
TabWidth: 8
|
||||
UseTab: Never
|
|
@ -0,0 +1,3 @@
|
|||
set noparent
|
||||
filter=-build/include_alpha,-build/include_subdir,-build/include_what_you_use,-legal/copyright,-readability/nolint
|
||||
linelength=80
|
|
@ -0,0 +1,3 @@
|
|||
[flake8]
|
||||
exclude=.git,deps,lib,src,tools/gyp,tools/inspector_protocol,tools/pip,tools/v8_gypfiles/broken
|
||||
ignore=E1,E2,E3,E4,E5,E7,W5,W6
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
"exclude": [
|
||||
"**/internal/process/write-coverage.js"
|
||||
],
|
||||
"compact": false,
|
||||
"reporter": ["html", "text"]
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
# pmake might add -J (private)
|
||||
FLAGS=${.MAKEFLAGS:C/\-J ([0-9]+,?)+//W}
|
||||
|
||||
all: .DEFAULT
|
||||
.DEFAULT:
|
||||
@which gmake > /dev/null 2>&1 ||\
|
||||
(echo "GMake is required for node.js to build.\
|
||||
Install and try again" && exit 1)
|
||||
@gmake ${.FLAGS} ${.TARGETS}
|
||||
|
||||
.PHONY: test
|
|
@ -0,0 +1,772 @@
|
|||
# Building Node.js
|
||||
|
||||
Depending on what platform or features you need, the build process may
|
||||
differ. After you've built a binary, running the
|
||||
test suite to confirm that the binary works as intended is a good next step.
|
||||
|
||||
If you can reproduce a test failure, search for it in the
|
||||
[Node.js issue tracker](https://github.com/nodejs/node/issues) or
|
||||
file a new issue.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
* [Supported platforms](#supported-platforms)
|
||||
* [Input](#input)
|
||||
* [Strategy](#strategy)
|
||||
* [Platform list](#platform-list)
|
||||
* [Supported toolchains](#supported-toolchains)
|
||||
* [Official binary platforms and toolchains](#official-binary-platforms-and-toolchains)
|
||||
* [OpenSSL asm support](#openssl-asm-support)
|
||||
* [Previous versions of this document](#previous-versions-of-this-document)
|
||||
* [Building Node.js on supported platforms](#building-nodejs-on-supported-platforms)
|
||||
* [Note about Python 2 and Python 3](#note-about-python-2-and-python-3)
|
||||
* [Unix and macOS](#unix-and-macos)
|
||||
* [Unix prerequisites](#unix-prerequisites)
|
||||
* [macOS prerequisites](#macos-prerequisites)
|
||||
* [Building Node.js](#building-nodejs-1)
|
||||
* [Running Tests](#running-tests)
|
||||
* [Running Coverage](#running-coverage)
|
||||
* [Building the documentation](#building-the-documentation)
|
||||
* [Building a debug build](#building-a-debug-build)
|
||||
* [Building an ASAN build](#building-an-asan-build)
|
||||
* [Troubleshooting Unix and macOS builds](#troubleshooting-unix-and-macos-builds)
|
||||
* [Windows](#windows)
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Option 1: Manual install](#option-1-manual-install)
|
||||
* [Option 2: Automated install with Boxstarter](#option-2-automated-install-with-boxstarter)
|
||||
* [Building Node.js](#building-nodejs-2)
|
||||
* [Android/Android-based devices (e.g. Firefox OS)](#androidandroid-based-devices-eg-firefox-os)
|
||||
* [`Intl` (ECMA-402) support](#intl-ecma-402-support)
|
||||
* [Default: `small-icu` (English only) support](#default-small-icu-english-only-support)
|
||||
* [Build with full ICU support (all locales supported by ICU)](#build-with-full-icu-support-all-locales-supported-by-icu)
|
||||
* [Unix/macOS](#unixmacos)
|
||||
* [Windows](#windows-1)
|
||||
* [Building without Intl support](#building-without-intl-support)
|
||||
* [Unix/macOS](#unixmacos-1)
|
||||
* [Windows](#windows-2)
|
||||
* [Use existing installed ICU (Unix/macOS only)](#use-existing-installed-icu-unixmacos-only)
|
||||
* [Build with a specific ICU](#build-with-a-specific-icu)
|
||||
* [Unix/macOS](#unixmacos-2)
|
||||
* [Windows](#windows-3)
|
||||
* [Building Node.js with FIPS-compliant OpenSSL](#building-nodejs-with-fips-compliant-openssl)
|
||||
* [Building Node.js with external core modules](#building-nodejs-with-external-core-modules)
|
||||
* [Unix/macOS](#unixmacos-3)
|
||||
* [Windows](#windows-4)
|
||||
* [Note for downstream distributors of Node.js](#note-for-downstream-distributors-of-nodejs)
|
||||
|
||||
## Supported platforms
|
||||
|
||||
This list of supported platforms is current as of the branch/release to
|
||||
which it belongs.
|
||||
|
||||
### Input
|
||||
|
||||
Node.js relies on V8 and libuv. We adopt a subset of their supported platforms.
|
||||
|
||||
### Strategy
|
||||
|
||||
There are three support tiers:
|
||||
|
||||
* **Tier 1**: These platforms represent the majority of Node.js users. The
|
||||
Node.js Build Working Group maintains infrastructure for full test coverage.
|
||||
Test failures on tier 1 platforms will block releases.
|
||||
* **Tier 2**: These platforms represent smaller segments of the Node.js user
|
||||
base. The Node.js Build Working Group maintains infrastructure for full test
|
||||
coverage. Test failures on tier 2 platforms will block releases.
|
||||
Infrastructure issues may delay the release of binaries for these platforms.
|
||||
* **Experimental**: May not compile or test suite may not pass. The core team
|
||||
does not create releases for these platforms. Test failures on experimental
|
||||
platforms do not block releases. Contributions to improve support for these
|
||||
platforms are welcome.
|
||||
|
||||
Platforms may move between tiers between major release lines. The table below
|
||||
will reflect those changes.
|
||||
|
||||
### Platform list
|
||||
|
||||
Node.js compilation/execution support depends on operating system, architecture,
|
||||
and libc version. The table below lists the support tier for each supported
|
||||
combination. A list of [supported compile toolchains](#supported-toolchains) is
|
||||
also supplied for tier 1 platforms.
|
||||
|
||||
**For production applications, run Node.js on supported platforms only.**
|
||||
|
||||
Node.js does not support a platform version if a vendor has expired support
|
||||
for it. In other words, Node.js does not support running on End-of-Life (EoL)
|
||||
platforms. This is true regardless of entries in the table below.
|
||||
|
||||
| Operating System | Architectures | Versions | Support Type | Notes |
|
||||
| ---------------- | ---------------- | ------------------------------- | ------------ | --------------------------------- |
|
||||
| GNU/Linux | x64 | kernel >= 3.10, glibc >= 2.17 | Tier 1 | e.g. Ubuntu 16.04 <sup>[1](#fn1)</sup>, Debian 9, EL 7 <sup>[2](#fn2)</sup> |
|
||||
| GNU/Linux | x64 | kernel >= 3.10, musl >= 1.1.19 | Experimental | e.g. Alpine 3.8 |
|
||||
| GNU/Linux | x86 | kernel >= 3.10, glibc >= 2.17 | Experimental | Downgraded as of Node.js 10 |
|
||||
| GNU/Linux | arm64 | kernel >= 4.5, glibc >= 2.17 | Tier 1 | e.g. Ubuntu 16.04, Debian 9, EL 7 <sup>[3](#fn3)</sup> |
|
||||
| GNU/Linux | armv7 | kernel >= 4.14, glibc >= 2.24 | Tier 1 | e.g. Ubuntu 18.04, Debian 9 |
|
||||
| GNU/Linux | armv6 | kernel >= 4.14, glibc >= 2.24 | Experimental | Downgraded as of Node.js 12 |
|
||||
| GNU/Linux | ppc64le >=power8 | kernel >= 3.10.0, glibc >= 2.17 | Tier 2 | e.g. Ubuntu 16.04 <sup>[1](#fn1)</sup>, EL 7 <sup>[2](#fn2)</sup> |
|
||||
| GNU/Linux | s390x | kernel >= 3.10.0, glibc >= 2.17 | Tier 2 | e.g. EL 7 <sup>[2](#fn2)</sup> |
|
||||
| Windows | x64, x86 (WoW64) | >= Windows 7/2008 R2/2012 R2 | Tier 1 | <sup>[4](#fn4),[5](#fn5)</sup> |
|
||||
| Windows | x86 (native) | >= Windows 7/2008 R2/2012 R2 | Tier 1 (running) / Experimental (compiling) <sup>[6](#fn6)</sup> | |
|
||||
| Windows | arm64 | >= Windows 10 | Experimental | |
|
||||
| macOS | x64 | >= 10.11 | Tier 1 | |
|
||||
| SmartOS | x64 | >= 18 | Tier 2 | |
|
||||
| AIX | ppc64be >=power7 | >= 7.2 TL02 | Tier 2 | |
|
||||
| FreeBSD | x64 | >= 11 | Experimental | Downgraded as of Node.js 12 <sup>[7](#fn7)</sup> |
|
||||
|
||||
<em id="fn1">1</em>: GCC 6 is not provided on the base platform. Users will
|
||||
need the
|
||||
[Toolchain test builds PPA](https://launchpad.net/~ubuntu-toolchain-r/+archive/ubuntu/test?field.series_filter=xenial)
|
||||
or similar to source a newer compiler.
|
||||
|
||||
<em id="fn2">2</em>: GCC 6 is not provided on the base platform. Users will
|
||||
need the
|
||||
[devtoolset-6](https://www.softwarecollections.org/en/scls/rhscl/devtoolset-6/)
|
||||
or later to source a newer compiler.
|
||||
|
||||
<em id="fn3">3</em>: Older kernel versions may work for ARM64. However the
|
||||
Node.js test infrastructure only tests >= 4.5.
|
||||
|
||||
<em id="fn4">4</em>: On Windows, running Node.js in Windows terminal emulators
|
||||
like `mintty` requires the usage of [winpty](https://github.com/rprichard/winpty)
|
||||
for the tty channels to work (e.g. `winpty node.exe script.js`).
|
||||
In "Git bash" if you call the node shell alias (`node` without the `.exe`
|
||||
extension), `winpty` is used automatically.
|
||||
|
||||
<em id="fn5">5</em>: The Windows Subsystem for Linux (WSL) is not
|
||||
supported, but the GNU/Linux build process and binaries should work. The
|
||||
community will only address issues that reproduce on native GNU/Linux
|
||||
systems. Issues that only reproduce on WSL should be reported in the
|
||||
[WSL issue tracker](https://github.com/Microsoft/WSL/issues). Running the
|
||||
Windows binary (`node.exe`) in WSL is not recommended. It will not work
|
||||
without workarounds such as stdio redirection.
|
||||
|
||||
<em id="fn6">6</em>: Running Node.js on x86 Windows should work and binaries
|
||||
are provided. However, tests in our infrastructure only run on WoW64.
|
||||
Furthermore, compiling on x86 Windows is Experimental and
|
||||
may not be possible.
|
||||
|
||||
<em id="fn7">7</em>: The default FreeBSD 12.0 compiler is Clang 6.0.1, but
|
||||
FreeBSD 12.1 upgrades to 8.0.1. Other Clang/LLVM versions are available
|
||||
via the system's package manager, including Clang 9.0.
|
||||
|
||||
### Supported toolchains
|
||||
|
||||
Depending on the host platform, the selection of toolchains may vary.
|
||||
|
||||
| Operating System | Compiler Versions |
|
||||
| ---------------- | -------------------------------------------------------------- |
|
||||
| Linux | GCC >= 6.3 |
|
||||
| Windows | Visual Studio >= 2017 with the Windows 10 SDK on a 64-bit host |
|
||||
| macOS | Xcode >= 8 (Apple LLVM >= 8) |
|
||||
|
||||
### Official binary platforms and toolchains
|
||||
|
||||
Binaries at <https://nodejs.org/download/release/> are produced on:
|
||||
|
||||
| Binary package | Platform and Toolchain |
|
||||
| --------------------- | ------------------------------------------------------------------------ |
|
||||
| aix-ppc64 | AIX 7.1 TL05 on PPC64BE with GCC 6 |
|
||||
| darwin-x64 (and .pkg) | macOS 10.15, Xcode Command Line Tools 11 with -mmacosx-version-min=10.10 |
|
||||
| linux-arm64 | CentOS 7 with devtoolset-6 / GCC 6 |
|
||||
| linux-armv7l | Cross-compiled on Ubuntu 16.04 x64 with [custom GCC toolchain](https://github.com/rvagg/rpi-newer-crosstools) |
|
||||
| linux-ppc64le | CentOS 7 with devtoolset-6 / GCC 6 <sup>[7](#fn7)</sup> |
|
||||
| linux-s390x | RHEL 7 with devtoolset-6 / GCC 6 <sup>[7](#fn7)</sup> |
|
||||
| linux-x64 | CentOS 7 with devtoolset-6 / GCC 6 <sup>[7](#fn7)</sup> |
|
||||
| sunos-x64 | SmartOS 18 with GCC 7 |
|
||||
| win-x64 and win-x86 | Windows 2012 R2 (x64) with Visual Studio 2017 |
|
||||
|
||||
<em id="fn7">7</em>: The Enterprise Linux devtoolset-6 allows us to compile
|
||||
binaries with GCC 6 but linked to the glibc and libstdc++ versions of the host
|
||||
platforms (CentOS 7 / RHEL 7). Therefore, binaries produced on these systems
|
||||
are compatible with glibc >= 2.17 and libstdc++ >= 6.0.20 (`GLIBCXX_3.4.20`).
|
||||
These are available on distributions natively supporting GCC 4.9, such as
|
||||
Ubuntu 14.04 and Debian 8.
|
||||
|
||||
#### OpenSSL asm support
|
||||
|
||||
OpenSSL-1.1.1 requires the following assembler version for use of asm
|
||||
support on x86_64 and ia32.
|
||||
|
||||
For use of AVX-512,
|
||||
|
||||
* gas (GNU assembler) version 2.26 or higher
|
||||
* nasm version 2.11.8 or higher in Windows
|
||||
|
||||
AVX-512 is disabled for Skylake-X by OpenSSL-1.1.1.
|
||||
|
||||
For use of AVX2,
|
||||
|
||||
* gas (GNU assembler) version 2.23 or higher
|
||||
* Xcode version 5.0 or higher
|
||||
* llvm version 3.3 or higher
|
||||
* nasm version 2.10 or higher in Windows
|
||||
|
||||
Please refer to
|
||||
<https://www.openssl.org/docs/man1.1.1/man3/OPENSSL_ia32cap.html> for details.
|
||||
|
||||
If compiling without one of the above, use `configure` with the
|
||||
`--openssl-no-asm` flag. Otherwise, `configure` will fail.
|
||||
|
||||
### Previous versions of this document
|
||||
|
||||
Supported platforms and toolchains change with each major version of Node.js.
|
||||
This document is only valid for the current major version of Node.js.
|
||||
Consult previous versions of this document for older versions of Node.js:
|
||||
|
||||
* [Node.js 13](https://github.com/nodejs/node/blob/v13.x/BUILDING.md)
|
||||
* [Node.js 12](https://github.com/nodejs/node/blob/v12.x/BUILDING.md)
|
||||
* [Node.js 10](https://github.com/nodejs/node/blob/v10.x/BUILDING.md)
|
||||
* [Node.js 8](https://github.com/nodejs/node/blob/v8.x/BUILDING.md)
|
||||
|
||||
## Building Node.js on supported platforms
|
||||
|
||||
### Note about Python 2 and Python 3
|
||||
|
||||
The Node.js project uses Python as part of its build process and has
|
||||
historically only been Python 2 compatible.
|
||||
|
||||
Python 2 will reach its _end-of-life_ at the end of 2019 at which point the
|
||||
interpreter will cease receiving updates. See <https://python3statement.org/>
|
||||
for more information.
|
||||
|
||||
The Node.js project is in the process of transitioning its Python code to
|
||||
Python 3 compatibility. Installing both versions of Python while building
|
||||
and testing Node.js allows developers and end users to test, benchmark,
|
||||
and debug Node.js running on both versions to ensure a smooth and complete
|
||||
transition before the year-end deadline.
|
||||
|
||||
### Unix and macOS
|
||||
|
||||
#### Unix prerequisites
|
||||
|
||||
* `gcc` and `g++` >= 6.3 or newer, or
|
||||
* GNU Make 3.81 or newer
|
||||
* Python (see note above)
|
||||
* Python 2.7
|
||||
* Python 3.5, 3.6, and 3.7 are experimental.
|
||||
|
||||
Installation via Linux package manager can be achieved with:
|
||||
|
||||
* Ubuntu, Debian: `sudo apt-get install python g++ make`
|
||||
* Fedora: `sudo dnf install python gcc-c++ make`
|
||||
* CentOS and RHEL: `sudo yum install python gcc-c++ make`
|
||||
* OpenSUSE: `sudo zypper install python gcc-c++ make`
|
||||
* Arch Linux, Manjaro: `sudo pacman -S python gcc make`
|
||||
|
||||
FreeBSD and OpenBSD users may also need to install `libexecinfo`.
|
||||
|
||||
Python 3 users may also need to install `python3-distutils`.
|
||||
|
||||
#### macOS prerequisites
|
||||
|
||||
* Xcode Command Line Tools >= 8 for macOS
|
||||
* Python (see note above)
|
||||
* Python 2.7
|
||||
* Python 3.5, 3.6, and 3.7 are experimental.
|
||||
|
||||
macOS users can install the `Xcode Command Line Tools` by running
|
||||
`xcode-select --install`. Alternatively, if you already have the full Xcode
|
||||
installed, you can find them under the menu `Xcode -> Open Developer Tool ->
|
||||
More Developer Tools...`. This step will install `clang`, `clang++`, and
|
||||
`make`.
|
||||
|
||||
#### Building Node.js
|
||||
|
||||
If the path to your build directory contains a space, the build will likely
|
||||
fail.
|
||||
|
||||
To build Node.js:
|
||||
|
||||
```console
|
||||
$ ./configure
|
||||
$ make -j4
|
||||
```
|
||||
|
||||
If you run into a `No module named 'distutils.spawn'` error when executing
|
||||
`./configure`, please try `python3 -m pip install --upgrade setuptools` or
|
||||
`sudo apt install python3-distutils -y`.
|
||||
For more information, see <https://github.com/nodejs/node/issues/30189>.
|
||||
|
||||
The `-j4` option will cause `make` to run 4 simultaneous compilation jobs which
|
||||
may reduce build time. For more information, see the
|
||||
[GNU Make Documentation](https://www.gnu.org/software/make/manual/html_node/Parallel.html).
|
||||
|
||||
The above requires that `python` resolves to a supported version of
|
||||
Python. See [Prerequisites](#prerequisites).
|
||||
|
||||
After building, setting up [firewall rules](tools/macos-firewall.sh) can avoid
|
||||
popups asking to accept incoming network connections when running tests.
|
||||
|
||||
Running the following script on macOS will add the firewall rules for the
|
||||
executable `node` in the `out` directory and the symbolic `node` link in the
|
||||
project's root directory.
|
||||
|
||||
```console
|
||||
$ sudo ./tools/macos-firewall.sh
|
||||
```
|
||||
|
||||
#### Running Tests
|
||||
|
||||
To verify the build:
|
||||
|
||||
```console
|
||||
$ make test-only
|
||||
```
|
||||
|
||||
At this point, you are ready to make code changes and re-run the tests.
|
||||
|
||||
If you are running tests before submitting a Pull Request, the recommended
|
||||
command is:
|
||||
|
||||
```console
|
||||
$ make -j4 test
|
||||
```
|
||||
|
||||
`make -j4 test` does a full check on the codebase, including running linters and
|
||||
documentation tests.
|
||||
|
||||
Make sure the linter does not report any issues and that all tests pass. Please
|
||||
do not submit patches that fail either check.
|
||||
|
||||
If you want to run the linter without running tests, use
|
||||
`make lint`/`vcbuild lint`. It will lint JavaScript, C++, and Markdown files.
|
||||
|
||||
If you are updating tests and want to run tests in a single test file
|
||||
(e.g. `test/parallel/test-stream2-transform.js`):
|
||||
|
||||
```text
|
||||
$ python tools/test.py test/parallel/test-stream2-transform.js
|
||||
```
|
||||
|
||||
You can execute the entire suite of tests for a given subsystem
|
||||
by providing the name of a subsystem:
|
||||
|
||||
```text
|
||||
$ python tools/test.py -J --mode=release child-process
|
||||
```
|
||||
|
||||
If you want to check the other options, please refer to the help by using
|
||||
the `--help` option:
|
||||
|
||||
```text
|
||||
$ python tools/test.py --help
|
||||
```
|
||||
|
||||
You can usually run tests directly with node:
|
||||
|
||||
```text
|
||||
$ ./node ./test/parallel/test-stream2-transform.js
|
||||
```
|
||||
|
||||
Remember to recompile with `make -j4` in between test runs if you change code in
|
||||
the `lib` or `src` directories.
|
||||
|
||||
The tests attempt to detect support for IPv6 and exclude IPv6 tests if
|
||||
appropriate. If your main interface has IPv6 addresses, then your
|
||||
loopback interface must also have '::1' enabled. For some default installations
|
||||
on Ubuntu that does not seem to be the case. To enable '::1' on the
|
||||
loopback interface on Ubuntu:
|
||||
|
||||
```bash
|
||||
sudo sysctl -w net.ipv6.conf.lo.disable_ipv6=0
|
||||
```
|
||||
|
||||
You can use
|
||||
[node-code-ide-configs](https://github.com/nodejs/node-code-ide-configs)
|
||||
to run/debug tests, if your IDE configs are present.
|
||||
|
||||
#### Running Coverage
|
||||
|
||||
It's good practice to ensure any code you add or change is covered by tests.
|
||||
You can do so by running the test suite with coverage enabled:
|
||||
|
||||
```console
|
||||
$ ./configure --coverage
|
||||
$ make coverage
|
||||
```
|
||||
|
||||
A detailed coverage report will be written to `coverage/index.html` for
|
||||
JavaScript coverage and to `coverage/cxxcoverage.html` for C++ coverage
|
||||
(if you only want to run the JavaScript tests then you do not need to run
|
||||
the first command `./configure --coverage`).
|
||||
|
||||
_Generating a test coverage report can take several minutes._
|
||||
|
||||
To collect coverage for a subset of tests you can set the `CI_JS_SUITES` and
|
||||
`CI_NATIVE_SUITES` variables (to run specific suites, e.g., `child-process`, in
|
||||
isolation, unset the opposing `_SUITES` variable):
|
||||
|
||||
```text
|
||||
$ CI_JS_SUITES=child-process CI_NATIVE_SUITES= make coverage
|
||||
```
|
||||
|
||||
The above command executes tests for the `child-process` subsystem and
|
||||
outputs the resulting coverage report.
|
||||
|
||||
Alternatively, you can run `make coverage-run-js`, to execute JavaScript tests
|
||||
independently of the C++ test suite:
|
||||
|
||||
```text
|
||||
$ CI_JS_SUITES=fs CI_NATIVE_SUITES= make coverage-run-js
|
||||
```
|
||||
|
||||
The `make coverage` command downloads some tools to the project root directory.
|
||||
To clean up after generating the coverage reports:
|
||||
|
||||
```console
|
||||
$ make coverage-clean
|
||||
```
|
||||
|
||||
#### Building the documentation
|
||||
|
||||
To build the documentation:
|
||||
|
||||
This will build Node.js first (if necessary) and then use it to build the docs:
|
||||
|
||||
```console
|
||||
$ make doc
|
||||
```
|
||||
|
||||
If you have an existing Node.js build, you can build just the docs with:
|
||||
|
||||
```console
|
||||
$ NODE=/path/to/node make doc-only
|
||||
```
|
||||
|
||||
To read the documentation:
|
||||
|
||||
```console
|
||||
$ man doc/node.1
|
||||
```
|
||||
|
||||
If you prefer to read the documentation in a browser,
|
||||
run the following after `make doc` is finished:
|
||||
|
||||
```console
|
||||
$ make docopen
|
||||
```
|
||||
|
||||
This will open a browser with the documentation.
|
||||
|
||||
To test if Node.js was built correctly:
|
||||
|
||||
```console
|
||||
$ ./node -e "console.log('Hello from Node.js ' + process.version)"
|
||||
```
|
||||
|
||||
To install this version of Node.js into a system directory:
|
||||
|
||||
```console
|
||||
$ [sudo] make install
|
||||
```
|
||||
|
||||
#### Building a debug build
|
||||
|
||||
If you run into an issue where the information provided by the JS stack trace
|
||||
is not enough, or if you suspect the error happens outside of the JS VM, you
|
||||
can try to build a debug enabled binary:
|
||||
|
||||
```console
|
||||
$ ./configure --debug
|
||||
$ make -j4
|
||||
```
|
||||
|
||||
`make` with `./configure --debug` generates two binaries, the regular release
|
||||
one in `out/Release/node` and a debug binary in `out/Debug/node`, only the
|
||||
release version is actually installed when you run `make install`.
|
||||
|
||||
To use the debug build with all the normal dependencies overwrite the release
|
||||
version in the install directory:
|
||||
|
||||
``` console
|
||||
$ make install PREFIX=/opt/node-debug/
|
||||
$ cp -a -f out/Debug/node /opt/node-debug/node
|
||||
```
|
||||
|
||||
When using the debug binary, core dumps will be generated in case of crashes.
|
||||
These core dumps are useful for debugging when provided with the
|
||||
corresponding original debug binary and system information.
|
||||
|
||||
Reading the core dump requires `gdb` built on the same platform the core dump
|
||||
was captured on (i.e. 64-bit `gdb` for `node` built on a 64-bit system, Linux
|
||||
`gdb` for `node` built on Linux) otherwise you will get errors like
|
||||
`not in executable format: File format not recognized`.
|
||||
|
||||
Example of generating a backtrace from the core dump:
|
||||
|
||||
``` console
|
||||
$ gdb /opt/node-debug/node core.node.8.1535359906
|
||||
$ backtrace
|
||||
```
|
||||
|
||||
#### Building an ASAN build
|
||||
|
||||
[ASAN](https://github.com/google/sanitizers) can help detect various memory
|
||||
related bugs. ASAN builds are currently only supported on linux.
|
||||
If you want to check it on Windows or macOS or you want a consistent toolchain
|
||||
on Linux, you can try [Docker](https://www.docker.com/products/docker-desktop)
|
||||
(using an image like `gengjiawen/node-build:2020-02-14`).
|
||||
|
||||
The `--debug` is not necessary and will slow down build and testing, but it can
|
||||
show clear stacktrace if ASAN hits an issue.
|
||||
|
||||
``` console
|
||||
$ ./configure --debug --enable-asan && make -j4
|
||||
$ make test-only
|
||||
```
|
||||
|
||||
#### Troubleshooting Unix and macOS builds
|
||||
|
||||
Stale builds can sometimes result in `file not found` errors while building.
|
||||
This and some other problems can be resolved with `make distclean`. The
|
||||
`distclean` recipe aggressively removes build artifacts. You will need to
|
||||
build again (`make -j4`). Since all build artifacts have been removed, this
|
||||
rebuild may take a lot more time than previous builds. Additionally,
|
||||
`distclean` removes the file that stores the results of `./configure`. If you
|
||||
ran `./configure` with non-default options (such as `--debug`), you will need
|
||||
to run it again before invoking `make -j4`.
|
||||
|
||||
### Windows
|
||||
|
||||
#### Prerequisites
|
||||
|
||||
##### Option 1: Manual install
|
||||
|
||||
* [Python 2.7](https://www.python.org/downloads/)
|
||||
* The "Desktop development with C++" workload from
|
||||
[Visual Studio 2017](https://www.visualstudio.com/downloads/) or the
|
||||
"Visual C++ build tools" workload from the
|
||||
[Build Tools](https://www.visualstudio.com/downloads/#build-tools-for-visual-studio-2017),
|
||||
with the default optional components.
|
||||
* Basic Unix tools required for some tests,
|
||||
[Git for Windows](https://git-scm.com/download/win) includes Git Bash
|
||||
and tools which can be included in the global `PATH`.
|
||||
* The [NetWide Assembler](https://www.nasm.us/), for OpenSSL assembler modules.
|
||||
If not installed in the default location, it needs to be manually added
|
||||
to `PATH`. A build with the `openssl-no-asm` option does not need this, nor
|
||||
does a build targeting ARM64 Windows.
|
||||
|
||||
Optional requirements to build the MSI installer package:
|
||||
|
||||
* The [WiX Toolset v3.11](https://wixtoolset.org/releases/) and the
|
||||
[Wix Toolset Visual Studio 2017 Extension](https://marketplace.visualstudio.com/items?itemName=RobMensching.WixToolsetVisualStudio2017Extension).
|
||||
|
||||
Optional requirements for compiling for Windows 10 on ARM (ARM64):
|
||||
|
||||
* ARM64 Windows build machine
|
||||
* Due to a GYP limitation, this is required to run compiled code
|
||||
generation tools (like V8's builtins and mksnapshot tools)
|
||||
* Visual Studio 15.9.0 or newer
|
||||
* Visual Studio optional components
|
||||
* Visual C++ compilers and libraries for ARM64
|
||||
* Visual C++ ATL for ARM64
|
||||
* Windows 10 SDK 10.0.17763.0 or newer
|
||||
|
||||
##### Option 2: Automated install with Boxstarter
|
||||
|
||||
A [Boxstarter](https://boxstarter.org/) script can be used for easy setup of
|
||||
Windows systems with all the required prerequisites for Node.js development.
|
||||
This script will install the following [Chocolatey](https://chocolatey.org/)
|
||||
packages:
|
||||
|
||||
* [Git for Windows](https://chocolatey.org/packages/git) with the `git` and
|
||||
Unix tools added to the `PATH`.
|
||||
* [Python 3.x](https://chocolatey.org/packages/python) and
|
||||
[legacy Python](https://chocolatey.org/packages/python2)
|
||||
* [Visual Studio 2019 Build Tools](https://chocolatey.org/packages/visualstudio2019buildtools)
|
||||
with [Visual C++ workload](https://chocolatey.org/packages/visualstudio2017-workload-vctools)
|
||||
* [NetWide Assembler](https://chocolatey.org/packages/nasm)
|
||||
|
||||
To install Node.js prerequisites using
|
||||
[Boxstarter WebLauncher](https://boxstarter.org/WebLauncher), open
|
||||
<https://boxstarter.org/package/nr/url?https://raw.githubusercontent.com/nodejs/node/master/tools/bootstrap/windows_boxstarter>
|
||||
with Internet Explorer or Edge browser on the target machine.
|
||||
|
||||
Alternatively, you can use PowerShell. Run those commands from an elevated
|
||||
PowerShell terminal:
|
||||
|
||||
```powershell
|
||||
Set-ExecutionPolicy Unrestricted -Force
|
||||
iex ((New-Object System.Net.WebClient).DownloadString('https://boxstarter.org/bootstrapper.ps1'))
|
||||
get-boxstarter -Force
|
||||
Install-BoxstarterPackage https://raw.githubusercontent.com/nodejs/node/master/tools/bootstrap/windows_boxstarter -DisableReboots
|
||||
```
|
||||
|
||||
The entire installation using Boxstarter will take up approximately 10 GB of
|
||||
disk space.
|
||||
|
||||
#### Building Node.js
|
||||
|
||||
If the path to your build directory contains a space or a non-ASCII character,
|
||||
the build will likely fail.
|
||||
|
||||
```console
|
||||
> .\vcbuild
|
||||
```
|
||||
|
||||
To run the tests:
|
||||
|
||||
```console
|
||||
> .\vcbuild test
|
||||
```
|
||||
|
||||
To test if Node.js was built correctly:
|
||||
|
||||
```console
|
||||
> Release\node -e "console.log('Hello from Node.js', process.version)"
|
||||
```
|
||||
|
||||
### Android/Android-based devices (e.g. Firefox OS)
|
||||
|
||||
Android is not a supported platform. Patches to improve the Android build are
|
||||
welcome. There is no testing on Android in the current continuous integration
|
||||
environment. The participation of people dedicated and determined to improve
|
||||
Android building, testing, and support is encouraged.
|
||||
|
||||
Be sure you have downloaded and extracted
|
||||
[Android NDK](https://developer.android.com/tools/sdk/ndk/index.html) before in
|
||||
a folder. Then run:
|
||||
|
||||
```console
|
||||
$ ./android-configure /path/to/your/android-ndk
|
||||
$ make
|
||||
```
|
||||
|
||||
## `Intl` (ECMA-402) support
|
||||
|
||||
[Intl](https://github.com/nodejs/node/blob/master/doc/api/intl.md) support is
|
||||
enabled by default, with English data only.
|
||||
|
||||
### Default: `small-icu` (English only) support
|
||||
|
||||
By default, only English data is included, but
|
||||
the full `Intl` (ECMA-402) APIs. It does not need to download
|
||||
any dependencies to function. You can add full
|
||||
data at runtime.
|
||||
|
||||
### Build with full ICU support (all locales supported by ICU)
|
||||
|
||||
With the `--download=all`, this may download ICU if you don't have an
|
||||
ICU in `deps/icu`. (The embedded `small-icu` included in the default
|
||||
Node.js source does not include all locales.)
|
||||
|
||||
#### Unix/macOS
|
||||
|
||||
```console
|
||||
$ ./configure --with-intl=full-icu --download=all
|
||||
```
|
||||
|
||||
#### Windows
|
||||
|
||||
```console
|
||||
> .\vcbuild full-icu download-all
|
||||
```
|
||||
|
||||
### Building without Intl support
|
||||
|
||||
The `Intl` object will not be available, nor some other APIs such as
|
||||
`String.normalize`.
|
||||
|
||||
#### Unix/macOS
|
||||
|
||||
```console
|
||||
$ ./configure --without-intl
|
||||
```
|
||||
|
||||
#### Windows
|
||||
|
||||
```console
|
||||
> .\vcbuild without-intl
|
||||
```
|
||||
|
||||
### Use existing installed ICU (Unix/macOS only)
|
||||
|
||||
```console
|
||||
$ pkg-config --modversion icu-i18n && ./configure --with-intl=system-icu
|
||||
```
|
||||
|
||||
If you are cross-compiling, your `pkg-config` must be able to supply a path
|
||||
that works for both your host and target environments.
|
||||
|
||||
### Build with a specific ICU
|
||||
|
||||
You can find other ICU releases at
|
||||
[the ICU homepage](http://icu-project.org/download).
|
||||
Download the file named something like `icu4c-**##.#**-src.tgz` (or
|
||||
`.zip`).
|
||||
|
||||
To check the minimum recommended ICU, run `./configure --help` and see
|
||||
the help for the `--with-icu-source` option. A warning will be printed
|
||||
during configuration if the ICU version is too old.
|
||||
|
||||
#### Unix/macOS
|
||||
|
||||
From an already-unpacked ICU:
|
||||
|
||||
```console
|
||||
$ ./configure --with-intl=[small-icu,full-icu] --with-icu-source=/path/to/icu
|
||||
```
|
||||
|
||||
From a local ICU tarball:
|
||||
|
||||
```console
|
||||
$ ./configure --with-intl=[small-icu,full-icu] --with-icu-source=/path/to/icu.tgz
|
||||
```
|
||||
|
||||
From a tarball URL:
|
||||
|
||||
```console
|
||||
$ ./configure --with-intl=full-icu --with-icu-source=http://url/to/icu.tgz
|
||||
```
|
||||
|
||||
#### Windows
|
||||
|
||||
First unpack latest ICU to `deps/icu`
|
||||
[icu4c-**##.#**-src.tgz](http://icu-project.org/download) (or `.zip`)
|
||||
as `deps/icu` (You'll have: `deps/icu/source/...`)
|
||||
|
||||
```console
|
||||
> .\vcbuild full-icu
|
||||
```
|
||||
|
||||
## Building Node.js with FIPS-compliant OpenSSL
|
||||
|
||||
The current version of Node.js does not support FIPS.
|
||||
|
||||
## Building Node.js with external core modules
|
||||
|
||||
It is possible to specify one or more JavaScript text files to be bundled in
|
||||
the binary as built-in modules when building Node.js.
|
||||
|
||||
### Unix/macOS
|
||||
|
||||
This command will make `/root/myModule.js` available via
|
||||
`require('/root/myModule')` and `./myModule2.js` available via
|
||||
`require('myModule2')`.
|
||||
|
||||
```console
|
||||
$ ./configure --link-module '/root/myModule.js' --link-module './myModule2.js'
|
||||
```
|
||||
|
||||
### Windows
|
||||
|
||||
To make `./myModule.js` available via `require('myModule')` and
|
||||
`./myModule2.js` available via `require('myModule2')`:
|
||||
|
||||
```console
|
||||
> .\vcbuild link-module './myModule.js' link-module './myModule2.js'
|
||||
```
|
||||
|
||||
## Note for downstream distributors of Node.js
|
||||
|
||||
The Node.js ecosystem is reliant on ABI compatibility within a major release.
|
||||
To maintain ABI compatibility it is required that distributed builds of Node.js
|
||||
be built against the same version of dependencies, or similar versions that do
|
||||
not break their ABI compatibility, as those released by Node.js for any given
|
||||
`NODE_MODULE_VERSION` (located in `src/node_version.h`).
|
||||
|
||||
When Node.js is built (with an intention to distribute) with an ABI
|
||||
incompatible with the official Node.js builds (e.g. using a ABI incompatible
|
||||
version of a dependency), please reserve and use a custom `NODE_MODULE_VERSION`
|
||||
by opening a pull request against the registry available at
|
||||
<https://github.com/nodejs/node/blob/master/doc/abi_version_registry.json>.
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,4 @@
|
|||
# Code of Conduct
|
||||
|
||||
* [Node.js Code of Conduct](https://github.com/nodejs/admin/blob/master/CODE_OF_CONDUCT.md)
|
||||
* [Node.js Moderation Policy](https://github.com/nodejs/admin/blob/master/Moderation-Policy.md)
|
|
@ -0,0 +1,58 @@
|
|||
# Contributing to Node.js
|
||||
|
||||
* [Code of Conduct](#code-of-conduct)
|
||||
* [Issues](#issues)
|
||||
* [Pull Requests](#pull-requests)
|
||||
* [Developer's Certificate of Origin 1.1](#developers-certificate-of-origin)
|
||||
|
||||
## [Code of Conduct](./doc/guides/contributing/code-of-conduct.md)
|
||||
|
||||
The Node.js project has a
|
||||
[Code of Conduct](https://github.com/nodejs/admin/blob/master/CODE_OF_CONDUCT.md)
|
||||
to which all contributors must adhere.
|
||||
|
||||
See [details on our policy on Code of Conduct](./doc/guides/contributing/code-of-conduct.md).
|
||||
|
||||
## [Issues](./doc/guides/contributing/issues.md)
|
||||
|
||||
* [How to Contribute in Issues](./doc/guides/contributing/issues.md#how-to-contribute-in-issues)
|
||||
* [Asking for General Help](./doc/guides/contributing/issues.md#asking-for-general-help)
|
||||
* [Discussing non-technical topics](./doc/guides/contributing/issues.md#discussing-non-technical-topics)
|
||||
* [Submitting a Bug Report](./doc/guides/contributing/issues.md#submitting-a-bug-report)
|
||||
* [Triaging a Bug Report](./doc/guides/contributing/issues.md#triaging-a-bug-report)
|
||||
* [Resolving a Bug Report](./doc/guides/contributing/issues.md#resolving-a-bug-report)
|
||||
|
||||
## [Pull Requests](./doc/guides/contributing/pull-requests.md)
|
||||
|
||||
* [Dependencies](./doc/guides/contributing/pull-requests.md#dependencies)
|
||||
* [Setting up your local environment](./doc/guides/contributing/pull-requests.md#setting-up-your-local-environment)
|
||||
* [The Process of Making Changes](./doc/guides/contributing/pull-requests.md#the-process-of-making-changes)
|
||||
* [Reviewing Pull Requests](./doc/guides/contributing/pull-requests.md#reviewing-pull-requests)
|
||||
* [Notes](./doc/guides/contributing/pull-requests.md#notes)
|
||||
|
||||
<a id="developers-certificate-of-origin"></a>
|
||||
## Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
* (a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
* (b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
* (c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
* (d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
|
@ -0,0 +1,162 @@
|
|||
# Node.js Project Governance
|
||||
|
||||
<!-- TOC -->
|
||||
|
||||
* [Collaborators](#collaborators)
|
||||
* [Collaborator Activities](#collaborator-activities)
|
||||
* [Technical Steering Committee](#technical-steering-committee)
|
||||
* [TSC Meetings](#tsc-meetings)
|
||||
* [Collaborator Nominations](#collaborator-nominations)
|
||||
* [Onboarding](#onboarding)
|
||||
* [Consensus Seeking Process](#consensus-seeking-process)
|
||||
|
||||
<!-- /TOC -->
|
||||
|
||||
## Collaborators
|
||||
|
||||
Node.js Core Collaborators maintain the [nodejs/node][] GitHub repository.
|
||||
The GitHub team for Node.js Core Collaborators is @nodejs/collaborators.
|
||||
Collaborators have:
|
||||
|
||||
* Commit access to the [nodejs/node][] repository
|
||||
* Access to the Node.js continuous integration (CI) jobs
|
||||
|
||||
Both Collaborators and non-Collaborators may propose changes to the Node.js
|
||||
source code. The mechanism to propose such a change is a GitHub pull request.
|
||||
Collaborators review and merge (_land_) pull requests.
|
||||
|
||||
Two Collaborators must approve a pull request before the pull request can land.
|
||||
(One Collaborator approval is enough if the pull request has been open for more
|
||||
than 7 days.) Approving a pull request indicates that the Collaborator accepts
|
||||
responsibility for the change. Approval must be from Collaborators who are not
|
||||
authors of the change.
|
||||
|
||||
If a Collaborator opposes a proposed change, then the change cannot land. The
|
||||
exception is if the TSC votes to approve the change despite the opposition.
|
||||
Usually, involving the TSC is unnecessary. Often, discussions or further changes
|
||||
result in Collaborators removing their opposition.
|
||||
|
||||
See:
|
||||
|
||||
* [List of Collaborators](./README.md#current-project-team-members)
|
||||
* [A guide for Collaborators](./doc/guides/collaborator-guide.md)
|
||||
|
||||
### Collaborator Activities
|
||||
|
||||
* Helping users and novice contributors
|
||||
* Contributing code and documentation changes that improve the project
|
||||
* Reviewing and commenting on issues and pull requests
|
||||
* Participation in working groups
|
||||
* Merging pull requests
|
||||
|
||||
The TSC can remove inactive Collaborators or provide them with _Emeritus_
|
||||
status. Emeriti may request that the TSC restore them to active status.
|
||||
|
||||
## Technical Steering Committee
|
||||
|
||||
A subset of the Collaborators forms the Technical Steering Committee (TSC).
|
||||
The TSC has final authority over this project, including:
|
||||
|
||||
* Technical direction
|
||||
* Project governance and process (including this policy)
|
||||
* Contribution policy
|
||||
* GitHub repository hosting
|
||||
* Conduct guidelines
|
||||
* Maintaining the list of Collaborators
|
||||
|
||||
The current list of TSC members is in
|
||||
[the project README](./README.md#current-project-team-members).
|
||||
|
||||
The [TSC Charter][] governs the operations of the TSC. All changes to the
|
||||
Charter need approval by the OpenJS Foundation Board of Directors.
|
||||
|
||||
### TSC Meetings
|
||||
|
||||
The TSC meets in a voice conference call. Each year, the TSC elects a chair to
|
||||
run the meetings. The TSC streams its meetings for public viewing on YouTube or
|
||||
a similar service.
|
||||
|
||||
The TSC agenda includes issues that are at an impasse. The intention of the
|
||||
agenda is not to review or approve all patches. Collaborators review and approve
|
||||
patches on GitHub.
|
||||
|
||||
Any community member can create a GitHub issue asking that the TSC review
|
||||
something. If consensus-seeking fails for an issue, a Collaborator may apply the
|
||||
`tsc-agenda` label. That will add it to the TSC meeting agenda.
|
||||
|
||||
Before each TSC meeting, the meeting chair will share the agenda with members of
|
||||
the TSC. TSC members can also add items to the agenda at the beginning of each
|
||||
meeting. The meeting chair and the TSC cannot veto or remove items.
|
||||
|
||||
The TSC may invite people to take part in a non-voting capacity.
|
||||
|
||||
During the meeting, the TSC chair ensures that someone takes minutes. After the
|
||||
meeting, the TSC chair ensures that someone opens a pull request with the
|
||||
minutes.
|
||||
|
||||
The TSC seeks to resolve as many issues as possible outside meetings using
|
||||
[the TSC issue tracker](https://github.com/nodejs/TSC/issues). The process in
|
||||
the issue tracker is:
|
||||
|
||||
* A TSC member opens an issue explaining the proposal/issue and @-mentions
|
||||
@nodejs/tsc.
|
||||
* The proposal passes if, after 72 hours, there are two or more TSC approvals
|
||||
and no TSC opposition.
|
||||
* If there is an extended impasse, a TSC member may make a motion for a vote.
|
||||
|
||||
## Collaborator Nominations
|
||||
|
||||
Existing Collaborators can nominate someone to become a Collaborator. Nominees
|
||||
should have significant and valuable contributions across the Node.js
|
||||
organization.
|
||||
|
||||
To nominate a new Collaborator, open an issue in the [nodejs/node][] repository.
|
||||
Provide a summary of the nominee's contributions. For example:
|
||||
|
||||
* Commits in the [nodejs/node][] repository.
|
||||
* Use the link `https://github.com/nodejs/node/commits?author=GITHUB_ID`
|
||||
* Pull requests and issues opened in the [nodejs/node][] repository.
|
||||
* Use the link `https://github.com/nodejs/node/issues?q=author:GITHUB_ID`
|
||||
* Comments on pull requests and issues in the [nodejs/node][] repository
|
||||
* Use the link `https://github.com/nodejs/node/issues?q=commenter:GITHUB_ID`
|
||||
* Reviews on pull requests in the [nodejs/node][] repository
|
||||
* Use the link `https://github.com/nodejs/node/pulls?q=reviewed-by:GITHUB_ID`
|
||||
* Help provided to end-users and novice contributors
|
||||
* Pull requests and issues opened throughout the Node.js organization
|
||||
* Use the link `https://github.com/search?q=author:GITHUB_ID+org:nodejs`
|
||||
* Comments on pull requests and issues throughout the Node.js organization
|
||||
* Use the link `https://github.com/search?q=commenter:GITHUB_ID+org:nodejs`
|
||||
* Participation in other projects, teams, and working groups of the Node.js
|
||||
organization
|
||||
* Other participation in the wider Node.js community
|
||||
|
||||
Mention @nodejs/collaborators in the issue to notify other Collaborators about
|
||||
the nomination.
|
||||
|
||||
The nomination passes if no Collaborators oppose it after one week. Otherwise,
|
||||
the nomination fails.
|
||||
|
||||
There are steps a nominator can take in advance to make a nomination as
|
||||
frictionless as possible. Use the [Collaborators discussion page][] to request
|
||||
feedback from other Collaborators in private. A nominator may also work with the
|
||||
nominee to improve their contribution profile.
|
||||
|
||||
Collaborators might overlook someone with valuable contributions. In that case,
|
||||
the contributor may open an issue or contact a Collaborator to request a
|
||||
nomination.
|
||||
|
||||
### Onboarding
|
||||
|
||||
After the nomination passes, a TSC member onboards the new Collaborator. See
|
||||
[the onboarding guide](./onboarding.md) for details of the onboarding
|
||||
process.
|
||||
|
||||
## Consensus Seeking Process
|
||||
|
||||
The TSC follows a [Consensus Seeking][] decision-making model per the
|
||||
[TSC Charter][].
|
||||
|
||||
[Collaborators discussion page]: https://github.com/orgs/nodejs/teams/collaborators/discussions
|
||||
[Consensus Seeking]: https://en.wikipedia.org/wiki/Consensus-seeking_decision-making
|
||||
[TSC Charter]: https://github.com/nodejs/TSC/blob/master/TSC-Charter.md
|
||||
[nodejs/node]: https://github.com/nodejs/node
|
|
@ -0,0 +1,613 @@
|
|||
<!--lint disable no-literal-urls-->
|
||||
<p align="center">
|
||||
<a href="https://nodejs.org/">
|
||||
<img
|
||||
alt="Node.js"
|
||||
src="https://nodejs.org/static/images/logo-light.svg"
|
||||
width="400"
|
||||
/>
|
||||
</a>
|
||||
</p>
|
||||
|
||||
Node.js is an open-source, cross-platform, JavaScript runtime environment. It
|
||||
executes JavaScript code outside of a browser. For more information on using
|
||||
Node.js, see the [Node.js Website][].
|
||||
|
||||
The Node.js project uses an [open governance model](./GOVERNANCE.md). The
|
||||
[OpenJS Foundation][] provides support for the project.
|
||||
|
||||
**This project is bound by a [Code of Conduct][].**
|
||||
|
||||
# Table of Contents
|
||||
|
||||
* [Support](#support)
|
||||
* [Release Types](#release-types)
|
||||
* [Download](#download)
|
||||
* [Current and LTS Releases](#current-and-lts-releases)
|
||||
* [Nightly Releases](#nightly-releases)
|
||||
* [API Documentation](#api-documentation)
|
||||
* [Verifying Binaries](#verifying-binaries)
|
||||
* [Building Node.js](#building-nodejs)
|
||||
* [Security](#security)
|
||||
* [Contributing to Node.js](#contributing-to-nodejs)
|
||||
* [Current Project Team Members](#current-project-team-members)
|
||||
* [TSC (Technical Steering Committee)](#tsc-technical-steering-committee)
|
||||
* [Collaborators](#collaborators)
|
||||
* [Release Keys](#release-keys)
|
||||
|
||||
## Support
|
||||
|
||||
Looking for help? Check out the
|
||||
[instructions for getting support](.github/SUPPORT.md).
|
||||
|
||||
## Release Types
|
||||
|
||||
* **Current**: Under active development. Code for the Current release is in the
|
||||
branch for its major version number (for example,
|
||||
[v10.x](https://github.com/nodejs/node/tree/v10.x)). Node.js releases a new
|
||||
major version every 6 months, allowing for breaking changes. This happens in
|
||||
April and October every year. Releases appearing each October have a support
|
||||
life of 8 months. Releases appearing each April convert to LTS (see below)
|
||||
each October.
|
||||
* **LTS**: Releases that receive Long-term Support, with a focus on stability
|
||||
and security. Every even-numbered major version will become an LTS release.
|
||||
LTS releases receive 12 months of _Active LTS_ support and a further 18 months
|
||||
of _Maintenance_. LTS release lines have alphabetically-ordered codenames,
|
||||
beginning with v4 Argon. There are no breaking changes or feature additions,
|
||||
except in some special circumstances.
|
||||
* **Nightly**: Code from the Current branch built every 24-hours when there are
|
||||
changes. Use with caution.
|
||||
|
||||
Current and LTS releases follow [Semantic Versioning](https://semver.org). A
|
||||
member of the Release Team [signs](#release-keys) each Current and LTS release.
|
||||
For more information, see the
|
||||
[Release README](https://github.com/nodejs/Release#readme).
|
||||
|
||||
### Download
|
||||
|
||||
Binaries, installers, and source tarballs are available at
|
||||
<https://nodejs.org/en/download/>.
|
||||
|
||||
#### Current and LTS Releases
|
||||
<https://nodejs.org/download/release/>
|
||||
|
||||
The [latest](https://nodejs.org/download/release/latest/) directory is an
|
||||
alias for the latest Current release. The latest-_codename_ directory is an
|
||||
alias for the latest release from an LTS line. For example, the
|
||||
[latest-carbon](https://nodejs.org/download/release/latest-carbon/) directory
|
||||
contains the latest Carbon (Node.js 8) release.
|
||||
|
||||
#### Nightly Releases
|
||||
<https://nodejs.org/download/nightly/>
|
||||
|
||||
Each directory name and filename contains a date (in UTC) and the commit
|
||||
SHA at the HEAD of the release.
|
||||
|
||||
#### API Documentation
|
||||
|
||||
Documentation for the latest Current release is at <https://nodejs.org/api/>.
|
||||
Version-specific documentation is available in each release directory in the
|
||||
_docs_ subdirectory. Version-specific documentation is also at
|
||||
<https://nodejs.org/download/docs/>.
|
||||
|
||||
### Verifying Binaries
|
||||
|
||||
Download directories contain a `SHASUMS256.txt` file with SHA checksums for the
|
||||
files.
|
||||
|
||||
To download `SHASUMS256.txt` using `curl`:
|
||||
|
||||
```console
|
||||
$ curl -O https://nodejs.org/dist/vx.y.z/SHASUMS256.txt
|
||||
```
|
||||
|
||||
To check that a downloaded file matches the checksum, run
|
||||
it through `sha256sum` with a command such as:
|
||||
|
||||
```console
|
||||
$ grep node-vx.y.z.tar.gz SHASUMS256.txt | sha256sum -c -
|
||||
```
|
||||
|
||||
For Current and LTS, the GPG detached signature of `SHASUMS256.txt` is in
|
||||
`SHASUMS256.txt.sig`. You can use it with `gpg` to verify the integrity of
|
||||
`SHASUM256.txt`. You will first need to import
|
||||
[the GPG keys of individuals authorized to create releases](#release-keys). To
|
||||
import the keys:
|
||||
|
||||
```console
|
||||
$ gpg --keyserver pool.sks-keyservers.net --recv-keys DD8F2338BAE7501E3DD5AC78C273792F7D83545D
|
||||
```
|
||||
|
||||
See the bottom of this README for a full script to import active release keys.
|
||||
|
||||
Next, download the `SHASUMS256.txt.sig` for the release:
|
||||
|
||||
```console
|
||||
$ curl -O https://nodejs.org/dist/vx.y.z/SHASUMS256.txt.sig
|
||||
```
|
||||
|
||||
Then use `gpg --verify SHASUMS256.txt.sig SHASUMS256.txt` to verify
|
||||
the file's signature.
|
||||
|
||||
## Building Node.js
|
||||
|
||||
See [BUILDING.md](BUILDING.md) for instructions on how to build Node.js from
|
||||
source and a list of supported platforms.
|
||||
|
||||
## Security
|
||||
|
||||
For information on reporting security vulnerabilities in Node.js, see
|
||||
[SECURITY.md](./SECURITY.md).
|
||||
|
||||
## Contributing to Node.js
|
||||
|
||||
* [Contributing to the project][]
|
||||
* [Working Groups][]
|
||||
* [Strategic Initiatives][]
|
||||
|
||||
## Current Project Team Members
|
||||
|
||||
For information about the governance of the Node.js project, see
|
||||
[GOVERNANCE.md](./GOVERNANCE.md).
|
||||
|
||||
### TSC (Technical Steering Committee)
|
||||
|
||||
* [addaleax](https://github.com/addaleax) -
|
||||
**Anna Henningsen** <anna@addaleax.net> (she/her)
|
||||
* [apapirovski](https://github.com/apapirovski) -
|
||||
**Anatoli Papirovski** <apapirovski@mac.com> (he/him)
|
||||
* [BethGriggs](https://github.com/BethGriggs) -
|
||||
**Beth Griggs** <Bethany.Griggs@uk.ibm.com> (she/her)
|
||||
* [BridgeAR](https://github.com/BridgeAR) -
|
||||
**Ruben Bridgewater** <ruben@bridgewater.de> (he/him)
|
||||
* [ChALkeR](https://github.com/ChALkeR) -
|
||||
**Сковорода Никита Андреевич** <chalkerx@gmail.com> (he/him)
|
||||
* [cjihrig](https://github.com/cjihrig) -
|
||||
**Colin Ihrig** <cjihrig@gmail.com> (he/him)
|
||||
* [codebytere](https://github.com/codebytere) -
|
||||
**Shelley Vohr** <codebytere@gmail.com> (she/her)
|
||||
* [danbev](https://github.com/danbev) -
|
||||
**Daniel Bevenius** <daniel.bevenius@gmail.com> (he/him)
|
||||
* [fhinkel](https://github.com/fhinkel) -
|
||||
**Franziska Hinkelmann** <franziska.hinkelmann@gmail.com> (she/her)
|
||||
* [gabrielschulhof](https://github.com/gabrielschulhof) -
|
||||
**Gabriel Schulhof** <gabriel.schulhof@intel.com>
|
||||
* [gireeshpunathil](https://github.com/gireeshpunathil) -
|
||||
**Gireesh Punathil** <gpunathi@in.ibm.com> (he/him)
|
||||
* [jasnell](https://github.com/jasnell) -
|
||||
**James M Snell** <jasnell@gmail.com> (he/him)
|
||||
* [joyeecheung](https://github.com/joyeecheung) -
|
||||
**Joyee Cheung** <joyeec9h3@gmail.com> (she/her)
|
||||
* [mcollina](https://github.com/mcollina) -
|
||||
**Matteo Collina** <matteo.collina@gmail.com> (he/him)
|
||||
* [mhdawson](https://github.com/mhdawson) -
|
||||
**Michael Dawson** <michael_dawson@ca.ibm.com> (he/him)
|
||||
* [mmarchini](https://github.com/mmarchini) -
|
||||
**Matheus Marchini** <mat@mmarchini.me>
|
||||
* [MylesBorins](https://github.com/MylesBorins) -
|
||||
**Myles Borins** <myles.borins@gmail.com> (he/him)
|
||||
* [sam-github](https://github.com/sam-github) -
|
||||
**Sam Roberts** <vieuxtech@gmail.com>
|
||||
* [targos](https://github.com/targos) -
|
||||
**Michaël Zasso** <targos@protonmail.com> (he/him)
|
||||
* [tniessen](https://github.com/tniessen) -
|
||||
**Tobias Nießen** <tniessen@tnie.de>
|
||||
* [Trott](https://github.com/Trott) -
|
||||
**Rich Trott** <rtrott@gmail.com> (he/him)
|
||||
|
||||
### TSC Emeriti
|
||||
|
||||
* [bnoordhuis](https://github.com/bnoordhuis) -
|
||||
**Ben Noordhuis** <info@bnoordhuis.nl>
|
||||
* [chrisdickinson](https://github.com/chrisdickinson) -
|
||||
**Chris Dickinson** <christopher.s.dickinson@gmail.com>
|
||||
* [evanlucas](https://github.com/evanlucas) -
|
||||
**Evan Lucas** <evanlucas@me.com> (he/him)
|
||||
* [Fishrock123](https://github.com/Fishrock123) -
|
||||
**Jeremiah Senkpiel** <fishrock123@rocketmail.com> (he/they)
|
||||
* [gibfahn](https://github.com/gibfahn) -
|
||||
**Gibson Fahnestock** <gibfahn@gmail.com> (he/him)
|
||||
* [indutny](https://github.com/indutny) -
|
||||
**Fedor Indutny** <fedor.indutny@gmail.com>
|
||||
* [isaacs](https://github.com/isaacs) -
|
||||
**Isaac Z. Schlueter** <i@izs.me>
|
||||
* [joshgav](https://github.com/joshgav) -
|
||||
**Josh Gavant** <josh.gavant@outlook.com>
|
||||
* [mscdex](https://github.com/mscdex) -
|
||||
**Brian White** <mscdex@mscdex.net>
|
||||
* [nebrius](https://github.com/nebrius) -
|
||||
**Bryan Hughes** <bryan@nebri.us>
|
||||
* [ofrobots](https://github.com/ofrobots) -
|
||||
**Ali Ijaz Sheikh** <ofrobots@google.com> (he/him)
|
||||
* [orangemocha](https://github.com/orangemocha) -
|
||||
**Alexis Campailla** <orangemocha@nodejs.org>
|
||||
* [piscisaureus](https://github.com/piscisaureus) -
|
||||
**Bert Belder** <bertbelder@gmail.com>
|
||||
* [rvagg](https://github.com/rvagg) -
|
||||
**Rod Vagg** <r@va.gg>
|
||||
* [shigeki](https://github.com/shigeki) -
|
||||
**Shigeki Ohtsu** <ohtsu@ohtsu.org> (he/him)
|
||||
* [thefourtheye](https://github.com/thefourtheye) -
|
||||
**Sakthipriyan Vairamani** <thechargingvolcano@gmail.com> (he/him)
|
||||
* [TimothyGu](https://github.com/TimothyGu) -
|
||||
**Tiancheng "Timothy" Gu** <timothygu99@gmail.com> (he/him)
|
||||
* [trevnorris](https://github.com/trevnorris) -
|
||||
**Trevor Norris** <trev.norris@gmail.com>
|
||||
|
||||
### Collaborators
|
||||
|
||||
* [addaleax](https://github.com/addaleax) -
|
||||
**Anna Henningsen** <anna@addaleax.net> (she/her)
|
||||
* [ak239](https://github.com/ak239) -
|
||||
**Aleksei Koziatinskii** <ak239spb@gmail.com>
|
||||
* [AndreasMadsen](https://github.com/AndreasMadsen) -
|
||||
**Andreas Madsen** <amwebdk@gmail.com> (he/him)
|
||||
* [antsmartian](https://github.com/antsmartian) -
|
||||
**Anto Aravinth** <anto.aravinth.cse@gmail.com> (he/him)
|
||||
* [apapirovski](https://github.com/apapirovski) -
|
||||
**Anatoli Papirovski** <apapirovski@mac.com> (he/him)
|
||||
* [bcoe](https://github.com/bcoe) -
|
||||
**Ben Coe** <bencoe@gmail.com> (he/him)
|
||||
* [bengl](https://github.com/bengl) -
|
||||
**Bryan English** <bryan@bryanenglish.com> (he/him)
|
||||
* [benjamingr](https://github.com/benjamingr) -
|
||||
**Benjamin Gruenbaum** <benjamingr@gmail.com>
|
||||
* [BethGriggs](https://github.com/BethGriggs) -
|
||||
**Beth Griggs** <Bethany.Griggs@uk.ibm.com> (she/her)
|
||||
* [bmeck](https://github.com/bmeck) -
|
||||
**Bradley Farias** <bradley.meck@gmail.com>
|
||||
* [bmeurer](https://github.com/bmeurer) -
|
||||
**Benedikt Meurer** <benedikt.meurer@gmail.com>
|
||||
* [bnoordhuis](https://github.com/bnoordhuis) -
|
||||
**Ben Noordhuis** <info@bnoordhuis.nl>
|
||||
* [boneskull](https://github.com/boneskull) -
|
||||
**Christopher Hiller** <boneskull@boneskull.com> (he/him)
|
||||
* [BridgeAR](https://github.com/BridgeAR) -
|
||||
**Ruben Bridgewater** <ruben@bridgewater.de> (he/him)
|
||||
* [bzoz](https://github.com/bzoz) -
|
||||
**Bartosz Sosnowski** <bartosz@janeasystems.com>
|
||||
* [cclauss](https://github.com/cclauss) -
|
||||
**Christian Clauss** <cclauss@me.com> (he/him)
|
||||
* [ChALkeR](https://github.com/ChALkeR) -
|
||||
**Сковорода Никита Андреевич** <chalkerx@gmail.com> (he/him)
|
||||
* [cjihrig](https://github.com/cjihrig) -
|
||||
**Colin Ihrig** <cjihrig@gmail.com> (he/him)
|
||||
* [codebytere](https://github.com/codebytere) -
|
||||
**Shelley Vohr** <codebytere@gmail.com> (she/her)
|
||||
* [danbev](https://github.com/danbev) -
|
||||
**Daniel Bevenius** <daniel.bevenius@gmail.com> (he/him)
|
||||
* [davisjam](https://github.com/davisjam) -
|
||||
**Jamie Davis** <davisjam@vt.edu> (he/him)
|
||||
* [devnexen](https://github.com/devnexen) -
|
||||
**David Carlier** <devnexen@gmail.com>
|
||||
* [devsnek](https://github.com/devsnek) -
|
||||
**Gus Caplan** <me@gus.host> (he/him)
|
||||
* [digitalinfinity](https://github.com/digitalinfinity) -
|
||||
**Hitesh Kanwathirtha** <digitalinfinity@gmail.com> (he/him)
|
||||
* [edsadr](https://github.com/edsadr) -
|
||||
**Adrian Estrada** <edsadr@gmail.com> (he/him)
|
||||
* [eugeneo](https://github.com/eugeneo) -
|
||||
**Eugene Ostroukhov** <eostroukhov@google.com>
|
||||
* [evanlucas](https://github.com/evanlucas) -
|
||||
**Evan Lucas** <evanlucas@me.com> (he/him)
|
||||
* [fhinkel](https://github.com/fhinkel) -
|
||||
**Franziska Hinkelmann** <franziska.hinkelmann@gmail.com> (she/her)
|
||||
* [Fishrock123](https://github.com/Fishrock123) -
|
||||
**Jeremiah Senkpiel** <fishrock123@rocketmail.com> (he/they)
|
||||
* [Flarna](https://github.com/Flarna) -
|
||||
**Gerhard Stöbich** <deb2001-github@yahoo.de> (he/they)
|
||||
* [gabrielschulhof](https://github.com/gabrielschulhof) -
|
||||
**Gabriel Schulhof** <gabriel.schulhof@intel.com>
|
||||
* [gdams](https://github.com/gdams) -
|
||||
**George Adams** <george.adams@uk.ibm.com> (he/him)
|
||||
* [geek](https://github.com/geek) -
|
||||
**Wyatt Preul** <wpreul@gmail.com>
|
||||
* [gengjiawen](https://github.com/gengjiawen) -
|
||||
**Jiawen Geng** <technicalcute@gmail.com>
|
||||
* [GeoffreyBooth](https://github.com/geoffreybooth) -
|
||||
**Geoffrey Booth** <webmaster@geoffreybooth.com> (he/him)
|
||||
* [gibfahn](https://github.com/gibfahn) -
|
||||
**Gibson Fahnestock** <gibfahn@gmail.com> (he/him)
|
||||
* [gireeshpunathil](https://github.com/gireeshpunathil) -
|
||||
**Gireesh Punathil** <gpunathi@in.ibm.com> (he/him)
|
||||
* [guybedford](https://github.com/guybedford) -
|
||||
**Guy Bedford** <guybedford@gmail.com> (he/him)
|
||||
* [hashseed](https://github.com/hashseed) -
|
||||
**Yang Guo** <yangguo@chromium.org> (he/him)
|
||||
* [himself65](https://github.com/himself65) -
|
||||
**Zeyu Yang** <himself65@outlook.com> (he/him)
|
||||
* [hiroppy](https://github.com/hiroppy) -
|
||||
**Yuta Hiroto** <hello@hiroppy.me> (he/him)
|
||||
* [indutny](https://github.com/indutny) -
|
||||
**Fedor Indutny** <fedor.indutny@gmail.com>
|
||||
* [JacksonTian](https://github.com/JacksonTian) -
|
||||
**Jackson Tian** <shyvo1987@gmail.com>
|
||||
* [jasnell](https://github.com/jasnell) -
|
||||
**James M Snell** <jasnell@gmail.com> (he/him)
|
||||
* [jdalton](https://github.com/jdalton) -
|
||||
**John-David Dalton** <john.david.dalton@gmail.com>
|
||||
* [jkrems](https://github.com/jkrems) -
|
||||
**Jan Krems** <jan.krems@gmail.com> (he/him)
|
||||
* [joaocgreis](https://github.com/joaocgreis) -
|
||||
**João Reis** <reis@janeasystems.com>
|
||||
* [joyeecheung](https://github.com/joyeecheung) -
|
||||
**Joyee Cheung** <joyeec9h3@gmail.com> (she/her)
|
||||
* [juanarbol](https://github.com/juanarbol) -
|
||||
**Juan José Arboleda** <soyjuanarbol@gmail.com> (he/him)
|
||||
* [JungMinu](https://github.com/JungMinu) -
|
||||
**Minwoo Jung** <nodecorelab@gmail.com> (he/him)
|
||||
* [kfarnung](https://github.com/kfarnung) -
|
||||
**Kyle Farnung** <kfarnung@microsoft.com> (he/him)
|
||||
* [lance](https://github.com/lance) -
|
||||
**Lance Ball** <lball@redhat.com> (he/him)
|
||||
* [legendecas](https://github.com/legendecas) -
|
||||
**Chengzhong Wu** <legendecas@gmail.com> (he/him)
|
||||
* [Leko](https://github.com/Leko) -
|
||||
**Shingo Inoue** <leko.noor@gmail.com> (he/him)
|
||||
* [lpinca](https://github.com/lpinca) -
|
||||
**Luigi Pinca** <luigipinca@gmail.com> (he/him)
|
||||
* [lundibundi](https://github.com/lundibundi) -
|
||||
**Denys Otrishko** <shishugi@gmail.com> (he/him)
|
||||
* [mafintosh](https://github.com/mafintosh) -
|
||||
**Mathias Buus** <mathiasbuus@gmail.com> (he/him)
|
||||
* [mcollina](https://github.com/mcollina) -
|
||||
**Matteo Collina** <matteo.collina@gmail.com> (he/him)
|
||||
* [mhdawson](https://github.com/mhdawson) -
|
||||
**Michael Dawson** <michael_dawson@ca.ibm.com> (he/him)
|
||||
* [mildsunrise](https://github.com/mildsunrise) -
|
||||
**Alba Mendez** <me@alba.sh> (she/her)
|
||||
* [misterdjules](https://github.com/misterdjules) -
|
||||
**Julien Gilli** <jgilli@nodejs.org>
|
||||
* [mmarchini](https://github.com/mmarchini) -
|
||||
**Matheus Marchini** <mat@mmarchini.me>
|
||||
* [mscdex](https://github.com/mscdex) -
|
||||
**Brian White** <mscdex@mscdex.net>
|
||||
* [MylesBorins](https://github.com/MylesBorins) -
|
||||
**Myles Borins** <myles.borins@gmail.com> (he/him)
|
||||
* [ofrobots](https://github.com/ofrobots) -
|
||||
**Ali Ijaz Sheikh** <ofrobots@google.com> (he/him)
|
||||
* [oyyd](https://github.com/oyyd) -
|
||||
**Ouyang Yadong** <oyydoibh@gmail.com> (he/him)
|
||||
* [psmarshall](https://github.com/psmarshall) -
|
||||
**Peter Marshall** <petermarshall@chromium.org> (he/him)
|
||||
* [puzpuzpuz](https://github.com/puzpuzpuz) -
|
||||
**Andrey Pechkurov** <apechkurov@gmail.com> (he/him)
|
||||
* [Qard](https://github.com/Qard) -
|
||||
**Stephen Belanger** <admin@stephenbelanger.com> (he/him)
|
||||
* [refack](https://github.com/refack) -
|
||||
**Refael Ackermann (רפאל פלחי)** <refack@gmail.com> (he/him/הוא/אתה)
|
||||
* [richardlau](https://github.com/richardlau) -
|
||||
**Richard Lau** <riclau@uk.ibm.com>
|
||||
* [ronag](https://github.com/ronag) -
|
||||
**Robert Nagy** <ronagy@icloud.com>
|
||||
* [ronkorving](https://github.com/ronkorving) -
|
||||
**Ron Korving** <ron@ronkorving.nl>
|
||||
* [rubys](https://github.com/rubys) -
|
||||
**Sam Ruby** <rubys@intertwingly.net>
|
||||
* [rvagg](https://github.com/rvagg) -
|
||||
**Rod Vagg** <rod@vagg.org>
|
||||
* [ryzokuken](https://github.com/ryzokuken) -
|
||||
**Ujjwal Sharma** <ryzokuken@disroot.org> (he/him)
|
||||
* [saghul](https://github.com/saghul) -
|
||||
**Saúl Ibarra Corretgé** <saghul@gmail.com>
|
||||
* [sam-github](https://github.com/sam-github) -
|
||||
**Sam Roberts** <vieuxtech@gmail.com>
|
||||
* [santigimeno](https://github.com/santigimeno) -
|
||||
**Santiago Gimeno** <santiago.gimeno@gmail.com>
|
||||
* [sebdeckers](https://github.com/sebdeckers) -
|
||||
**Sebastiaan Deckers** <sebdeckers83@gmail.com>
|
||||
* [seishun](https://github.com/seishun) -
|
||||
**Nikolai Vavilov** <vvnicholas@gmail.com>
|
||||
* [shigeki](https://github.com/shigeki) -
|
||||
**Shigeki Ohtsu** <ohtsu@ohtsu.org> (he/him)
|
||||
* [shisama](https://github.com/shisama) -
|
||||
**Masashi Hirano** <shisama07@gmail.com> (he/him)
|
||||
* [silverwind](https://github.com/silverwind) -
|
||||
**Roman Reiss** <me@silverwind.io>
|
||||
* [srl295](https://github.com/srl295) -
|
||||
**Steven R Loomis** <srloomis@us.ibm.com>
|
||||
* [starkwang](https://github.com/starkwang) -
|
||||
**Weijia Wang** <starkwang@126.com>
|
||||
* [targos](https://github.com/targos) -
|
||||
**Michaël Zasso** <targos@protonmail.com> (he/him)
|
||||
* [thefourtheye](https://github.com/thefourtheye) -
|
||||
**Sakthipriyan Vairamani** <thechargingvolcano@gmail.com> (he/him)
|
||||
* [TimothyGu](https://github.com/TimothyGu) -
|
||||
**Tiancheng "Timothy" Gu** <timothygu99@gmail.com> (he/him)
|
||||
* [tniessen](https://github.com/tniessen) -
|
||||
**Tobias Nießen** <tniessen@tnie.de>
|
||||
* [trivikr](https://github.com/trivikr) -
|
||||
**Trivikram Kamat** <trivikr.dev@gmail.com>
|
||||
* [Trott](https://github.com/Trott) -
|
||||
**Rich Trott** <rtrott@gmail.com> (he/him)
|
||||
* [vdeturckheim](https://github.com/vdeturckheim) -
|
||||
**Vladimir de Turckheim** <vlad2t@hotmail.com> (he/him)
|
||||
* [watilde](https://github.com/watilde) -
|
||||
**Daijiro Wachi** <daijiro.wachi@gmail.com> (he/him)
|
||||
* [watson](https://github.com/watson) -
|
||||
**Thomas Watson** <w@tson.dk>
|
||||
* [XadillaX](https://github.com/XadillaX) -
|
||||
**Khaidi Chu** <i@2333.moe> (he/him)
|
||||
* [yhwang](https://github.com/yhwang) -
|
||||
**Yihong Wang** <yh.wang@ibm.com>
|
||||
* [yorkie](https://github.com/yorkie) -
|
||||
**Yorkie Liu** <yorkiefixer@gmail.com>
|
||||
* [yosuke-furukawa](https://github.com/yosuke-furukawa) -
|
||||
**Yosuke Furukawa** <yosuke.furukawa@gmail.com>
|
||||
* [ZYSzys](https://github.com/ZYSzys) -
|
||||
**Yongsheng Zhang** <zyszys98@gmail.com> (he/him)
|
||||
|
||||
### Collaborator Emeriti
|
||||
|
||||
* [andrasq](https://github.com/andrasq) -
|
||||
**Andras** <andras@kinvey.com>
|
||||
* [AnnaMag](https://github.com/AnnaMag) -
|
||||
**Anna M. Kedzierska** <anna.m.kedzierska@gmail.com>
|
||||
* [aqrln](https://github.com/aqrln) -
|
||||
**Alexey Orlenko** <eaglexrlnk@gmail.com> (he/him)
|
||||
* [brendanashworth](https://github.com/brendanashworth) -
|
||||
**Brendan Ashworth** <brendan.ashworth@me.com>
|
||||
* [calvinmetcalf](https://github.com/calvinmetcalf) -
|
||||
**Calvin Metcalf** <calvin.metcalf@gmail.com>
|
||||
* [chrisdickinson](https://github.com/chrisdickinson) -
|
||||
**Chris Dickinson** <christopher.s.dickinson@gmail.com>
|
||||
* [claudiorodriguez](https://github.com/claudiorodriguez) -
|
||||
**Claudio Rodriguez** <cjrodr@yahoo.com>
|
||||
* [DavidCai1993](https://github.com/DavidCai1993) -
|
||||
**David Cai** <davidcai1993@yahoo.com> (he/him)
|
||||
* [eljefedelrodeodeljefe](https://github.com/eljefedelrodeodeljefe) -
|
||||
**Robert Jefe Lindstaedt** <robert.lindstaedt@gmail.com>
|
||||
* [estliberitas](https://github.com/estliberitas) -
|
||||
**Alexander Makarenko** <estliberitas@gmail.com>
|
||||
* [firedfox](https://github.com/firedfox) -
|
||||
**Daniel Wang** <wangyang0123@gmail.com>
|
||||
* [glentiki](https://github.com/glentiki) -
|
||||
**Glen Keane** <glenkeane.94@gmail.com> (he/him)
|
||||
* [iarna](https://github.com/iarna) -
|
||||
**Rebecca Turner** <me@re-becca.org>
|
||||
* [imran-iq](https://github.com/imran-iq) -
|
||||
**Imran Iqbal** <imran@imraniqbal.org>
|
||||
* [imyller](https://github.com/imyller) -
|
||||
**Ilkka Myller** <ilkka.myller@nodefield.com>
|
||||
* [isaacs](https://github.com/isaacs) -
|
||||
**Isaac Z. Schlueter** <i@izs.me>
|
||||
* [italoacasas](https://github.com/italoacasas) -
|
||||
**Italo A. Casas** <me@italoacasas.com> (he/him)
|
||||
* [jasongin](https://github.com/jasongin) -
|
||||
**Jason Ginchereau** <jasongin@microsoft.com>
|
||||
* [jbergstroem](https://github.com/jbergstroem) -
|
||||
**Johan Bergström** <bugs@bergstroem.nu>
|
||||
* [jhamhader](https://github.com/jhamhader) -
|
||||
**Yuval Brik** <yuval@brik.org.il>
|
||||
* [joshgav](https://github.com/joshgav) -
|
||||
**Josh Gavant** <josh.gavant@outlook.com>
|
||||
* [julianduque](https://github.com/julianduque) -
|
||||
**Julian Duque** <julianduquej@gmail.com> (he/him)
|
||||
* [kunalspathak](https://github.com/kunalspathak) -
|
||||
**Kunal Pathak** <kunal.pathak@microsoft.com>
|
||||
* [lucamaraschi](https://github.com/lucamaraschi) -
|
||||
**Luca Maraschi** <luca.maraschi@gmail.com> (he/him)
|
||||
* [lxe](https://github.com/lxe) -
|
||||
**Aleksey Smolenchuk** <lxe@lxe.co>
|
||||
* [maclover7](https://github.com/maclover7) -
|
||||
**Jon Moss** <me@jonathanmoss.me> (he/him)
|
||||
* [matthewloring](https://github.com/matthewloring) -
|
||||
**Matthew Loring** <mattloring@google.com>
|
||||
* [micnic](https://github.com/micnic) -
|
||||
**Nicu Micleușanu** <micnic90@gmail.com> (he/him)
|
||||
* [mikeal](https://github.com/mikeal) -
|
||||
**Mikeal Rogers** <mikeal.rogers@gmail.com>
|
||||
* [monsanto](https://github.com/monsanto) -
|
||||
**Christopher Monsanto** <chris@monsan.to>
|
||||
* [MoonBall](https://github.com/MoonBall) -
|
||||
**Chen Gang** <gangc.cxy@foxmail.com>
|
||||
* [not-an-aardvark](https://github.com/not-an-aardvark) -
|
||||
**Teddy Katz** <teddy.katz@gmail.com> (he/him)
|
||||
* [Olegas](https://github.com/Olegas) -
|
||||
**Oleg Elifantiev** <oleg@elifantiev.ru>
|
||||
* [orangemocha](https://github.com/orangemocha) -
|
||||
**Alexis Campailla** <orangemocha@nodejs.org>
|
||||
* [othiym23](https://github.com/othiym23) -
|
||||
**Forrest L Norvell** <ogd@aoaioxxysz.net> (he/him)
|
||||
* [petkaantonov](https://github.com/petkaantonov) -
|
||||
**Petka Antonov** <petka_antonov@hotmail.com>
|
||||
* [phillipj](https://github.com/phillipj) -
|
||||
**Phillip Johnsen** <johphi@gmail.com>
|
||||
* [piscisaureus](https://github.com/piscisaureus) -
|
||||
**Bert Belder** <bertbelder@gmail.com>
|
||||
* [pmq20](https://github.com/pmq20) -
|
||||
**Minqi Pan** <pmq2001@gmail.com>
|
||||
* [princejwesley](https://github.com/princejwesley) -
|
||||
**Prince John Wesley** <princejohnwesley@gmail.com>
|
||||
* [rlidwka](https://github.com/rlidwka) -
|
||||
**Alex Kocharin** <alex@kocharin.ru>
|
||||
* [rmg](https://github.com/rmg) -
|
||||
**Ryan Graham** <r.m.graham@gmail.com>
|
||||
* [robertkowalski](https://github.com/robertkowalski) -
|
||||
**Robert Kowalski** <rok@kowalski.gd>
|
||||
* [romankl](https://github.com/romankl) -
|
||||
**Roman Klauke** <romaaan.git@gmail.com>
|
||||
* [RReverser](https://github.com/RReverser) -
|
||||
**Ingvar Stepanyan** <me@rreverser.com>
|
||||
* [stefanmb](https://github.com/stefanmb) -
|
||||
**Stefan Budeanu** <stefan@budeanu.com>
|
||||
* [tellnes](https://github.com/tellnes) -
|
||||
**Christian Tellnes** <christian@tellnes.no>
|
||||
* [thlorenz](https://github.com/thlorenz) -
|
||||
**Thorsten Lorenz** <thlorenz@gmx.de>
|
||||
* [trevnorris](https://github.com/trevnorris) -
|
||||
**Trevor Norris** <trev.norris@gmail.com>
|
||||
* [tunniclm](https://github.com/tunniclm) -
|
||||
**Mike Tunnicliffe** <m.j.tunnicliffe@gmail.com>
|
||||
* [vkurchatkin](https://github.com/vkurchatkin) -
|
||||
**Vladimir Kurchatkin** <vladimir.kurchatkin@gmail.com>
|
||||
* [vsemozhetbyt](https://github.com/vsemozhetbyt) -
|
||||
**Vse Mozhet Byt** <vsemozhetbyt@gmail.com> (he/him)
|
||||
* [whitlockjc](https://github.com/whitlockjc) -
|
||||
**Jeremy Whitlock** <jwhitlock@apache.org>
|
||||
|
||||
Collaborators follow the [Collaborator Guide](./doc/guides/collaborator-guide.md) in
|
||||
maintaining the Node.js project.
|
||||
|
||||
### Release Keys
|
||||
|
||||
Primary GPG keys for Node.js Releasers (some Releasers sign with subkeys):
|
||||
|
||||
* **Beth Griggs** <bethany.griggs@uk.ibm.com>
|
||||
`4ED778F539E3634C779C87C6D7062848A1AB005C`
|
||||
* **Colin Ihrig** <cjihrig@gmail.com>
|
||||
`94AE36675C464D64BAFA68DD7434390BDBE9B9C5`
|
||||
* **James M Snell** <jasnell@keybase.io>
|
||||
`71DCFD284A79C3B38668286BC97EC7A07EDE3FC1`
|
||||
* **Michaël Zasso** <targos@protonmail.com>
|
||||
`8FCCA13FEF1D0C2E91008E09770F7A9A5AE15600`
|
||||
* **Myles Borins** <myles.borins@gmail.com>
|
||||
`C4F0DFFF4E8C1A8236409D08E73BC641CC11F4C8`
|
||||
* **Rod Vagg** <rod@vagg.org>
|
||||
`DD8F2338BAE7501E3DD5AC78C273792F7D83545D`
|
||||
* **Ruben Bridgewater** <ruben@bridgewater.de>
|
||||
`A48C2BEE680E841632CD4E44F07496B3EB3C1762`
|
||||
* **Shelley Vohr** <shelley.vohr@gmail.com>
|
||||
`B9E2F5981AA6E0CD28160D9FF13993A75599653C`
|
||||
|
||||
To import the full set of trusted release keys:
|
||||
|
||||
```shell
|
||||
gpg --keyserver pool.sks-keyservers.net --recv-keys 4ED778F539E3634C779C87C6D7062848A1AB005C
|
||||
gpg --keyserver pool.sks-keyservers.net --recv-keys 94AE36675C464D64BAFA68DD7434390BDBE9B9C5
|
||||
gpg --keyserver pool.sks-keyservers.net --recv-keys 71DCFD284A79C3B38668286BC97EC7A07EDE3FC1
|
||||
gpg --keyserver pool.sks-keyservers.net --recv-keys 8FCCA13FEF1D0C2E91008E09770F7A9A5AE15600
|
||||
gpg --keyserver pool.sks-keyservers.net --recv-keys C4F0DFFF4E8C1A8236409D08E73BC641CC11F4C8
|
||||
gpg --keyserver pool.sks-keyservers.net --recv-keys DD8F2338BAE7501E3DD5AC78C273792F7D83545D
|
||||
gpg --keyserver pool.sks-keyservers.net --recv-keys A48C2BEE680E841632CD4E44F07496B3EB3C1762
|
||||
gpg --keyserver pool.sks-keyservers.net --recv-keys B9E2F5981AA6E0CD28160D9FF13993A75599653C
|
||||
```
|
||||
|
||||
See the section above on [Verifying Binaries](#verifying-binaries) for how to
|
||||
use these keys to verify a downloaded file.
|
||||
|
||||
Other keys used to sign some previous releases:
|
||||
|
||||
* **Chris Dickinson** <christopher.s.dickinson@gmail.com>
|
||||
`9554F04D7259F04124DE6B476D5A82AC7E37093B`
|
||||
* **Evan Lucas** <evanlucas@me.com>
|
||||
`B9AE9905FFD7803F25714661B63B535A4C206CA9`
|
||||
* **Gibson Fahnestock** <gibfahn@gmail.com>
|
||||
`77984A986EBC2AA786BC0F66B01FBB92821C587A`
|
||||
* **Isaac Z. Schlueter** <i@izs.me>
|
||||
`93C7E9E91B49E432C2F75674B0A78B0A6C481CF6`
|
||||
* **Italo A. Casas** <me@italoacasas.com>
|
||||
`56730D5401028683275BD23C23EFEFE93C4CFFFE`
|
||||
* **Jeremiah Senkpiel** <fishrock@keybase.io>
|
||||
`FD3A5288F042B6850C66B31F09FE44734EB7990E`
|
||||
* **Julien Gilli** <jgilli@fastmail.fm>
|
||||
`114F43EE0176B71C7BC219DD50A3051F888C628D`
|
||||
* **Timothy J Fontaine** <tjfontaine@gmail.com>
|
||||
`7937DFD2AB06298B2293C3187D33FF9D0246406D`
|
||||
|
||||
[Code of Conduct]: https://github.com/nodejs/admin/blob/master/CODE_OF_CONDUCT.md
|
||||
[Contributing to the project]: CONTRIBUTING.md
|
||||
[Node.js Website]: https://nodejs.org/
|
||||
[OpenJS Foundation]: http://openjs.foundation/
|
||||
[Working Groups]: https://github.com/nodejs/TSC/blob/master/WORKING_GROUPS.md
|
||||
[Strategic Initiatives]: https://github.com/nodejs/TSC/blob/master/Strategic-Initiatives.md
|
|
@ -0,0 +1,74 @@
|
|||
# Security
|
||||
|
||||
## Reporting a Bug in Node.js
|
||||
|
||||
Report security bugs in Node.js via [HackerOne](https://hackerone.com/nodejs).
|
||||
|
||||
Your report will be acknowledged within 24 hours, and you’ll receive a more
|
||||
detailed response to your report within 48 hours indicating the next steps in
|
||||
handling your submission.
|
||||
|
||||
After the initial reply to your report, the security team will endeavor to keep
|
||||
you informed of the progress being made towards a fix and full announcement,
|
||||
and may ask for additional information or guidance surrounding the reported
|
||||
issue.
|
||||
|
||||
### Node.js Bug Bounty Program
|
||||
|
||||
The Node.js project engages in an official bug bounty program for security
|
||||
researchers and responsible public disclosures. The program is managed through
|
||||
the HackerOne platform. See <https://hackerone.com/nodejs> for further details.
|
||||
|
||||
## Reporting a Bug in a third party module
|
||||
|
||||
Security bugs in third party modules should be reported to their respective
|
||||
maintainers and should also be coordinated through the Node.js Ecosystem
|
||||
Security Team via [HackerOne](https://hackerone.com/nodejs-ecosystem).
|
||||
|
||||
Details regarding this process can be found in the
|
||||
[Security Working Group repository](https://github.com/nodejs/security-wg/blob/master/processes/third_party_vuln_process.md).
|
||||
|
||||
Thank you for improving the security of Node.js and its ecosystem. Your efforts
|
||||
and responsible disclosure are greatly appreciated and will be acknowledged.
|
||||
|
||||
## Disclosure Policy
|
||||
|
||||
Here is the security disclosure policy for Node.js
|
||||
|
||||
* The security report is received and is assigned a primary handler. This
|
||||
person will coordinate the fix and release process. The problem is confirmed
|
||||
and a list of all affected versions is determined. Code is audited to find
|
||||
any potential similar problems. Fixes are prepared for all releases which are
|
||||
still under maintenance. These fixes are not committed to the public
|
||||
repository but rather held locally pending the announcement.
|
||||
|
||||
* A suggested embargo date for this vulnerability is chosen and a CVE (Common
|
||||
Vulnerabilities and Exposures (CVE®)) is requested for the vulnerability.
|
||||
|
||||
* On the embargo date, the Node.js security mailing list is sent a copy of the
|
||||
announcement. The changes are pushed to the public repository and new builds
|
||||
are deployed to nodejs.org. Within 6 hours of the mailing list being
|
||||
notified, a copy of the advisory will be published on the Node.js blog.
|
||||
|
||||
* Typically the embargo date will be set 72 hours from the time the CVE is
|
||||
issued. However, this may vary depending on the severity of the bug or
|
||||
difficulty in applying a fix.
|
||||
|
||||
* This process can take some time, especially when coordination is required
|
||||
with maintainers of other projects. Every effort will be made to handle the
|
||||
bug in as timely a manner as possible; however, it’s important that we follow
|
||||
the release process above to ensure that the disclosure is handled in a
|
||||
consistent manner.
|
||||
|
||||
## Receiving Security Updates
|
||||
|
||||
Security notifications will be distributed via the following methods.
|
||||
|
||||
* <https://groups.google.com/group/nodejs-sec>
|
||||
* <https://nodejs.org/en/blog/>
|
||||
|
||||
## Comments on this Policy
|
||||
|
||||
If you have suggestions on how this process could be improved please submit a
|
||||
[pull request](https://github.com/nodejs/nodejs.org) or
|
||||
[file an issue](https://github.com/nodejs/security-wg/issues/new) to discuss.
|
|
@ -0,0 +1,85 @@
|
|||
#!/bin/bash
|
||||
|
||||
# In order to cross-compile node for Android using NDK, run:
|
||||
# source android-configure <path_to_ndk> [arch]
|
||||
#
|
||||
# By running android-configure with source, will allow environment variables to
|
||||
# be persistent in current session. This is useful for installing native node
|
||||
# modules with npm. Also, don't forget to set the arch in npm config using
|
||||
# 'npm config set arch=<arch>'
|
||||
|
||||
if [ $# -ne 3 ]; then
|
||||
echo "$0 should have 3 parameters: ndk_path, target_arch and sdk_version"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NDK_PATH=$1
|
||||
ARCH="$2"
|
||||
ANDROID_SDK_VERSION=$3
|
||||
|
||||
if [ $ANDROID_SDK_VERSION -lt 23 ]; then
|
||||
echo "$ANDROID_SDK_VERSION should equal or later than 23(Android 6.0)"
|
||||
fi
|
||||
|
||||
CC_VER="4.9"
|
||||
|
||||
case $ARCH in
|
||||
arm)
|
||||
DEST_CPU="arm"
|
||||
TOOLCHAIN_NAME="armv7-linux-androideabi"
|
||||
;;
|
||||
x86)
|
||||
DEST_CPU="ia32"
|
||||
TOOLCHAIN_NAME="i686-linux-android"
|
||||
;;
|
||||
x86_64)
|
||||
DEST_CPU="x64"
|
||||
TOOLCHAIN_NAME="x86_64-linux-android"
|
||||
ARCH="x64"
|
||||
;;
|
||||
arm64|aarch64)
|
||||
DEST_CPU="arm64"
|
||||
TOOLCHAIN_NAME="aarch64-linux-android"
|
||||
ARCH="arm64"
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported architecture provided: $ARCH"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
HOST_OS="linux"
|
||||
HOST_ARCH="x86_64"
|
||||
export CC_host=$(which gcc)
|
||||
export CXX_host=$(which g++)
|
||||
|
||||
host_gcc_version=$($CC_host --version | grep gcc | awk '{print $NF}')
|
||||
major=$(echo $host_gcc_version | awk -F . '{print $1}')
|
||||
minor=$(echo $host_gcc_version | awk -F . '{print $2}')
|
||||
if [ -z $major ] || [ -z $minor ] || [ $major -lt 6 ] || [ $major -eq 6 -a $minor -lt 3 ]; then
|
||||
echo "host gcc $host_gcc_version is too old, need gcc 6.3.0"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SUFFIX="$TOOLCHAIN_NAME$ANDROID_SDK_VERSION"
|
||||
TOOLCHAIN=$NDK_PATH/toolchains/llvm/prebuilt/$HOST_OS-$HOST_ARCH
|
||||
|
||||
export PATH=$TOOLCHAIN/bin:$PATH
|
||||
export CC=$TOOLCHAIN/bin/$SUFFIX-clang
|
||||
export CXX=$TOOLCHAIN/bin/$SUFFIX-clang++
|
||||
|
||||
|
||||
GYP_DEFINES="target_arch=$ARCH"
|
||||
GYP_DEFINES+=" v8_target_arch=$ARCH"
|
||||
GYP_DEFINES+=" android_target_arch=$ARCH"
|
||||
GYP_DEFINES+=" host_os=$HOST_OS OS=android"
|
||||
export GYP_DEFINES
|
||||
|
||||
if [ -f "configure" ]; then
|
||||
./configure \
|
||||
--dest-cpu=$DEST_CPU \
|
||||
--dest-os=android \
|
||||
--without-snapshot \
|
||||
--openssl-no-asm \
|
||||
--cross-compiling
|
||||
fi
|
|
@ -0,0 +1,96 @@
|
|||
# Node.js Core Benchmarks
|
||||
|
||||
This folder contains code and data used to measure performance
|
||||
of different Node.js implementations and different ways of
|
||||
writing JavaScript run by the built-in JavaScript engine.
|
||||
|
||||
For a detailed guide on how to write and run benchmarks in this
|
||||
directory, see [the guide on benchmarks](../doc/guides/writing-and-running-benchmarks.md).
|
||||
|
||||
## Table of Contents
|
||||
|
||||
* [Benchmark directories](#benchmark-directories)
|
||||
* [Common API](#common-api)
|
||||
|
||||
## Benchmark Directories
|
||||
|
||||
| Directory | Purpose |
|
||||
| --------------- | ---------------------------------------------------------------------------------------------------------------- |
|
||||
| assert | Benchmarks for the `assert` subsystem. |
|
||||
| buffers | Benchmarks for the `buffer` subsystem. |
|
||||
| child\_process | Benchmarks for the `child_process` subsystem. |
|
||||
| crypto | Benchmarks for the `crypto` subsystem. |
|
||||
| dgram | Benchmarks for the `dgram` subsystem. |
|
||||
| domain | Benchmarks for the `domain` subsystem. |
|
||||
| es | Benchmarks for various new ECMAScript features and their pre-ES2015 counterparts. |
|
||||
| events | Benchmarks for the `events` subsystem. |
|
||||
| fixtures | Benchmarks fixtures used in various benchmarks throughout the benchmark suite. |
|
||||
| fs | Benchmarks for the `fs` subsystem. |
|
||||
| http | Benchmarks for the `http` subsystem. |
|
||||
| http2 | Benchmarks for the `http2` subsystem. |
|
||||
| misc | Miscellaneous benchmarks and benchmarks for shared internal modules. |
|
||||
| module | Benchmarks for the `module` subsystem. |
|
||||
| net | Benchmarks for the `net` subsystem. |
|
||||
| path | Benchmarks for the `path` subsystem. |
|
||||
| process | Benchmarks for the `process` subsystem. |
|
||||
| querystring | Benchmarks for the `querystring` subsystem. |
|
||||
| streams | Benchmarks for the `streams` subsystem. |
|
||||
| string\_decoder | Benchmarks for the `string_decoder` subsystem. |
|
||||
| timers | Benchmarks for the `timers` subsystem, including `setTimeout`, `setInterval`, .etc. |
|
||||
| tls | Benchmarks for the `tls` subsystem. |
|
||||
| url | Benchmarks for the `url` subsystem, including the legacy `url` implementation and the WHATWG URL implementation. |
|
||||
| util | Benchmarks for the `util` subsystem. |
|
||||
| vm | Benchmarks for the `vm` subsystem. |
|
||||
|
||||
### Other Top-level files
|
||||
|
||||
The top-level files include common dependencies of the benchmarks
|
||||
and the tools for launching benchmarks and visualizing their output.
|
||||
The actual benchmark scripts should be placed in their corresponding
|
||||
directories.
|
||||
|
||||
* `_benchmark_progress.js`: implements the progress bar displayed
|
||||
when running `compare.js`
|
||||
* `_cli.js`: parses the command line arguments passed to `compare.js`,
|
||||
`run.js` and `scatter.js`
|
||||
* `_cli.R`: parses the command line arguments passed to `compare.R`
|
||||
* `_http-benchmarkers.js`: selects and runs external tools for benchmarking
|
||||
the `http` subsystem.
|
||||
* `common.js`: see [Common API](#common-api).
|
||||
* `compare.js`: command line tool for comparing performance between different
|
||||
Node.js binaries.
|
||||
* `compare.R`: R script for statistically analyzing the output of
|
||||
`compare.js`
|
||||
* `run.js`: command line tool for running individual benchmark suite(s).
|
||||
* `scatter.js`: command line tool for comparing the performance
|
||||
between different parameters in benchmark configurations,
|
||||
for example to analyze the time complexity.
|
||||
* `scatter.R`: R script for visualizing the output of `scatter.js` with
|
||||
scatter plots.
|
||||
|
||||
## Common API
|
||||
|
||||
The common.js module is used by benchmarks for consistency across repeated
|
||||
tasks. It has a number of helpful functions and properties to help with
|
||||
writing benchmarks.
|
||||
|
||||
### `createBenchmark(fn, configs[, options])`
|
||||
|
||||
See [the guide on writing benchmarks](../doc/guides/writing-and-running-benchmarks.md#basics-of-a-benchmark).
|
||||
|
||||
### `default_http_benchmarker`
|
||||
|
||||
The default benchmarker used to run HTTP benchmarks.
|
||||
See [the guide on writing HTTP benchmarks](../doc/guides/writing-and-running-benchmarks.md#creating-an-http-benchmark).
|
||||
|
||||
### `PORT`
|
||||
|
||||
The default port used to run HTTP benchmarks.
|
||||
See [the guide on writing HTTP benchmarks](../doc/guides/writing-and-running-benchmarks.md#creating-an-http-benchmark).
|
||||
|
||||
### `sendResult(data)`
|
||||
|
||||
Used in special benchmarks that can't use `createBenchmark` and the object
|
||||
it returns to accomplish what they need. This function reports timing
|
||||
data to the parent process (usually created by running `compare.js`, `run.js` or
|
||||
`scatter.js`).
|
|
@ -0,0 +1,120 @@
|
|||
'use strict';
|
||||
|
||||
const readline = require('readline');
|
||||
|
||||
function pad(input, minLength, fill) {
|
||||
const result = String(input);
|
||||
const padding = fill.repeat(Math.max(0, minLength - result.length));
|
||||
return `${padding}${result}`;
|
||||
}
|
||||
|
||||
function fraction(numerator, denominator) {
|
||||
const fdenominator = String(denominator);
|
||||
const fnumerator = pad(numerator, fdenominator.length, ' ');
|
||||
return `${fnumerator}/${fdenominator}`;
|
||||
}
|
||||
|
||||
function getTime(diff) {
|
||||
const time = Math.ceil(diff[0] + diff[1] / 1e9);
|
||||
const hours = pad(Math.floor(time / 3600), 2, '0');
|
||||
const minutes = pad(Math.floor((time % 3600) / 60), 2, '0');
|
||||
const seconds = pad((time % 3600) % 60, 2, '0');
|
||||
return `${hours}:${minutes}:${seconds}`;
|
||||
}
|
||||
|
||||
// A run is an item in the job queue: { binary, filename, iter }
|
||||
// A config is an item in the subqueue: { binary, filename, iter, configs }
|
||||
class BenchmarkProgress {
|
||||
constructor(queue, benchmarks) {
|
||||
this.queue = queue; // Scheduled runs.
|
||||
this.benchmarks = benchmarks; // Filenames of scheduled benchmarks.
|
||||
this.completedRuns = 0; // Number of completed runs.
|
||||
this.scheduledRuns = queue.length; // Number of scheduled runs.
|
||||
// Time when starting to run benchmarks.
|
||||
this.startTime = process.hrtime();
|
||||
// Number of times each file will be run (roughly).
|
||||
this.runsPerFile = queue.length / benchmarks.length;
|
||||
this.currentFile = ''; // Filename of current benchmark.
|
||||
// Number of configurations already run for the current file.
|
||||
this.completedConfig = 0;
|
||||
// Total number of configurations for the current file
|
||||
this.scheduledConfig = 0;
|
||||
this.interval; // Updates the elapsed time.
|
||||
}
|
||||
|
||||
startQueue(index) {
|
||||
this.kStartOfQueue = index;
|
||||
this.currentFile = this.queue[index].filename;
|
||||
this.interval = setInterval(() => {
|
||||
if (this.completedRuns === this.scheduledRuns) {
|
||||
clearInterval(this.interval);
|
||||
} else {
|
||||
this.updateProgress();
|
||||
}
|
||||
}, 1000);
|
||||
}
|
||||
|
||||
startSubqueue(data, index) {
|
||||
// This subqueue is generated by a new benchmark
|
||||
if (data.name !== this.currentFile || index === this.kStartOfQueue) {
|
||||
this.currentFile = data.name;
|
||||
this.scheduledConfig = data.queueLength;
|
||||
}
|
||||
this.completedConfig = 0;
|
||||
this.updateProgress();
|
||||
}
|
||||
|
||||
completeConfig() {
|
||||
this.completedConfig++;
|
||||
this.updateProgress();
|
||||
}
|
||||
|
||||
completeRun() {
|
||||
this.completedRuns++;
|
||||
this.updateProgress();
|
||||
}
|
||||
|
||||
getProgress() {
|
||||
// Get time as soon as possible.
|
||||
const diff = process.hrtime(this.startTime);
|
||||
|
||||
const completedRuns = this.completedRuns;
|
||||
const scheduledRuns = this.scheduledRuns;
|
||||
const finished = completedRuns === scheduledRuns;
|
||||
|
||||
// Calculate numbers for fractions.
|
||||
const runsPerFile = this.runsPerFile;
|
||||
const completedFiles = Math.floor(completedRuns / runsPerFile);
|
||||
const scheduledFiles = this.benchmarks.length;
|
||||
const completedRunsForFile =
|
||||
finished ? runsPerFile : completedRuns % runsPerFile;
|
||||
const completedConfig = this.completedConfig;
|
||||
const scheduledConfig = this.scheduledConfig;
|
||||
|
||||
// Calculate the percentage.
|
||||
let runRate = 0; // Rate of current incomplete run.
|
||||
if (completedConfig !== scheduledConfig) {
|
||||
runRate = completedConfig / scheduledConfig;
|
||||
}
|
||||
const completedRate = ((completedRuns + runRate) / scheduledRuns);
|
||||
const percent = pad(Math.floor(completedRate * 100), 3, ' ');
|
||||
|
||||
const caption = finished ? 'Done\n' : this.currentFile;
|
||||
return `[${getTime(diff)}|% ${percent}| ` +
|
||||
`${fraction(completedFiles, scheduledFiles)} files | ` +
|
||||
`${fraction(completedRunsForFile, runsPerFile)} runs | ` +
|
||||
`${fraction(completedConfig, scheduledConfig)} configs]: ` +
|
||||
`${caption} `;
|
||||
}
|
||||
|
||||
updateProgress() {
|
||||
if (!process.stderr.isTTY || process.stdout.isTTY) {
|
||||
return;
|
||||
}
|
||||
readline.clearLine(process.stderr);
|
||||
readline.cursorTo(process.stderr, 0);
|
||||
process.stderr.write(this.getProgress());
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = BenchmarkProgress;
|
|
@ -0,0 +1,24 @@
|
|||
|
||||
args = commandArgs(TRUE);
|
||||
|
||||
args.options = list();
|
||||
|
||||
temp.option.key = NULL;
|
||||
|
||||
for (arg in args) {
|
||||
# Optional arguments declaration
|
||||
if (substring(arg, 1, 1) == '-') {
|
||||
temp.option.key = substring(arg, 2);
|
||||
if (substring(arg, 2, 2) == '-') {
|
||||
temp.option.key = substring(arg, 3);
|
||||
}
|
||||
|
||||
args.options[[temp.option.key]] = TRUE;
|
||||
}
|
||||
# Optional arguments value
|
||||
else if (!is.null(temp.option.key)) {
|
||||
args.options[[temp.option.key]] = arg;
|
||||
|
||||
temp.option.key = NULL;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,127 @@
|
|||
'use strict';
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
// Create an object of all benchmark scripts
|
||||
const benchmarks = {};
|
||||
fs.readdirSync(__dirname)
|
||||
.filter((name) => {
|
||||
return name !== 'fixtures' &&
|
||||
fs.statSync(path.resolve(__dirname, name)).isDirectory();
|
||||
})
|
||||
.forEach((category) => {
|
||||
benchmarks[category] = fs.readdirSync(path.resolve(__dirname, category))
|
||||
.filter((filename) => filename[0] !== '.' && filename[0] !== '_');
|
||||
});
|
||||
|
||||
function CLI(usage, settings) {
|
||||
if (process.argv.length < 3) {
|
||||
this.abort(usage); // Abort will exit the process
|
||||
}
|
||||
|
||||
this.usage = usage;
|
||||
this.optional = {};
|
||||
this.items = [];
|
||||
this.test = false;
|
||||
|
||||
for (const argName of settings.arrayArgs) {
|
||||
this.optional[argName] = [];
|
||||
}
|
||||
|
||||
let currentOptional = null;
|
||||
let mode = 'both'; // Possible states are: [both, option, item]
|
||||
|
||||
for (const arg of process.argv.slice(2)) {
|
||||
if (arg === '--') {
|
||||
// Only items can follow --
|
||||
mode = 'item';
|
||||
} else if (mode === 'both' && arg[0] === '-') {
|
||||
// Optional arguments declaration
|
||||
|
||||
if (arg[1] === '-') {
|
||||
currentOptional = arg.slice(2);
|
||||
} else {
|
||||
currentOptional = arg.slice(1);
|
||||
}
|
||||
|
||||
if (settings.boolArgs && settings.boolArgs.includes(currentOptional)) {
|
||||
this.optional[currentOptional] = true;
|
||||
mode = 'both';
|
||||
} else {
|
||||
// Expect the next value to be option related (either -- or the value)
|
||||
mode = 'option';
|
||||
}
|
||||
} else if (mode === 'option') {
|
||||
// Optional arguments value
|
||||
|
||||
if (settings.arrayArgs.includes(currentOptional)) {
|
||||
this.optional[currentOptional].push(arg);
|
||||
} else {
|
||||
this.optional[currentOptional] = arg;
|
||||
}
|
||||
|
||||
// The next value can be either an option or an item
|
||||
mode = 'both';
|
||||
} else if (arg === 'test') {
|
||||
this.test = true;
|
||||
} else if (['both', 'item'].includes(mode)) {
|
||||
// item arguments
|
||||
this.items.push(arg);
|
||||
|
||||
// The next value must be an item
|
||||
mode = 'item';
|
||||
} else {
|
||||
// Bad case, abort
|
||||
this.abort(usage);
|
||||
}
|
||||
}
|
||||
}
|
||||
module.exports = CLI;
|
||||
|
||||
CLI.prototype.abort = function(msg) {
|
||||
console.error(msg);
|
||||
process.exit(1);
|
||||
};
|
||||
|
||||
CLI.prototype.benchmarks = function() {
|
||||
const paths = [];
|
||||
|
||||
if (this.items.includes('all')) {
|
||||
this.items = Object.keys(benchmarks);
|
||||
}
|
||||
|
||||
for (const category of this.items) {
|
||||
if (benchmarks[category] === undefined) {
|
||||
console.error(`The "${category}" category does not exist.`);
|
||||
process.exit(1);
|
||||
}
|
||||
for (const scripts of benchmarks[category]) {
|
||||
if (this.shouldSkip(scripts)) continue;
|
||||
|
||||
paths.push(path.join(category, scripts));
|
||||
}
|
||||
}
|
||||
|
||||
return paths;
|
||||
};
|
||||
|
||||
CLI.prototype.shouldSkip = function(scripts) {
|
||||
const filters = this.optional.filter || [];
|
||||
const excludes = this.optional.exclude || [];
|
||||
let skip = filters.length > 0;
|
||||
|
||||
for (const filter of filters) {
|
||||
if (scripts.lastIndexOf(filter) !== -1) {
|
||||
skip = false;
|
||||
}
|
||||
}
|
||||
|
||||
for (const exclude of excludes) {
|
||||
if (scripts.lastIndexOf(exclude) !== -1) {
|
||||
skip = true;
|
||||
}
|
||||
}
|
||||
|
||||
return skip;
|
||||
};
|
|
@ -0,0 +1,252 @@
|
|||
'use strict';
|
||||
|
||||
const child_process = require('child_process');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
|
||||
const requirementsURL =
|
||||
'https://github.com/nodejs/node/blob/master/benchmark/writing-and-running-benchmarks.md#http-benchmark-requirements';
|
||||
|
||||
// The port used by servers and wrk
|
||||
exports.PORT = Number(process.env.PORT) || 12346;
|
||||
|
||||
class AutocannonBenchmarker {
|
||||
constructor() {
|
||||
this.name = 'autocannon';
|
||||
this.executable =
|
||||
process.platform === 'win32' ? 'autocannon.cmd' : 'autocannon';
|
||||
const result = child_process.spawnSync(this.executable, ['-h']);
|
||||
this.present = !(result.error && result.error.code === 'ENOENT');
|
||||
}
|
||||
|
||||
create(options) {
|
||||
const args = [
|
||||
'-d', options.duration,
|
||||
'-c', options.connections,
|
||||
'-j',
|
||||
'-n',
|
||||
];
|
||||
for (const field in options.headers) {
|
||||
args.push('-H', `${field}=${options.headers[field]}`);
|
||||
}
|
||||
args.push(`http://127.0.0.1:${options.port}${options.path}`);
|
||||
const child = child_process.spawn(this.executable, args);
|
||||
return child;
|
||||
}
|
||||
|
||||
processResults(output) {
|
||||
let result;
|
||||
try {
|
||||
result = JSON.parse(output);
|
||||
} catch {
|
||||
return undefined;
|
||||
}
|
||||
if (!result || !result.requests || !result.requests.average) {
|
||||
return undefined;
|
||||
}
|
||||
return result.requests.average;
|
||||
}
|
||||
}
|
||||
|
||||
class WrkBenchmarker {
|
||||
constructor() {
|
||||
this.name = 'wrk';
|
||||
this.executable = 'wrk';
|
||||
const result = child_process.spawnSync(this.executable, ['-h']);
|
||||
this.present = !(result.error && result.error.code === 'ENOENT');
|
||||
}
|
||||
|
||||
create(options) {
|
||||
const duration = typeof options.duration === 'number' ?
|
||||
Math.max(options.duration, 1) :
|
||||
options.duration;
|
||||
const args = [
|
||||
'-d', duration,
|
||||
'-c', options.connections,
|
||||
'-t', Math.min(options.connections, require('os').cpus().length || 8),
|
||||
`http://127.0.0.1:${options.port}${options.path}`,
|
||||
];
|
||||
for (const field in options.headers) {
|
||||
args.push('-H', `${field}: ${options.headers[field]}`);
|
||||
}
|
||||
const child = child_process.spawn(this.executable, args);
|
||||
return child;
|
||||
}
|
||||
|
||||
processResults(output) {
|
||||
const throughputRe = /Requests\/sec:[ \t]+([0-9.]+)/;
|
||||
const match = output.match(throughputRe);
|
||||
const throughput = match && +match[1];
|
||||
if (!isFinite(throughput)) {
|
||||
return undefined;
|
||||
}
|
||||
return throughput;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple, single-threaded benchmarker for testing if the benchmark
|
||||
* works
|
||||
*/
|
||||
class TestDoubleBenchmarker {
|
||||
constructor(type) {
|
||||
// `type` is the type of benchmarker. Possible values are 'http' and
|
||||
// 'http2'.
|
||||
this.name = `test-double-${type}`;
|
||||
this.executable = path.resolve(__dirname, '_test-double-benchmarker.js');
|
||||
this.present = fs.existsSync(this.executable);
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
create(options) {
|
||||
process.env.duration = process.env.duration || options.duration || 5;
|
||||
|
||||
const env = {
|
||||
test_url: `http://127.0.0.1:${options.port}${options.path}`,
|
||||
...process.env
|
||||
};
|
||||
|
||||
const child = child_process.fork(this.executable,
|
||||
[this.type],
|
||||
{ silent: true, env });
|
||||
return child;
|
||||
}
|
||||
|
||||
processResults(output) {
|
||||
let result;
|
||||
try {
|
||||
result = JSON.parse(output);
|
||||
} catch {
|
||||
return undefined;
|
||||
}
|
||||
return result.throughput;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* HTTP/2 Benchmarker
|
||||
*/
|
||||
class H2LoadBenchmarker {
|
||||
constructor() {
|
||||
this.name = 'h2load';
|
||||
this.executable = 'h2load';
|
||||
const result = child_process.spawnSync(this.executable, ['-h']);
|
||||
this.present = !(result.error && result.error.code === 'ENOENT');
|
||||
}
|
||||
|
||||
create(options) {
|
||||
const args = [];
|
||||
if (typeof options.requests === 'number')
|
||||
args.push('-n', options.requests);
|
||||
if (typeof options.clients === 'number')
|
||||
args.push('-c', options.clients);
|
||||
if (typeof options.threads === 'number')
|
||||
args.push('-t', options.threads);
|
||||
if (typeof options.maxConcurrentStreams === 'number')
|
||||
args.push('-m', options.maxConcurrentStreams);
|
||||
if (typeof options.initialWindowSize === 'number')
|
||||
args.push('-w', options.initialWindowSize);
|
||||
if (typeof options.sessionInitialWindowSize === 'number')
|
||||
args.push('-W', options.sessionInitialWindowSize);
|
||||
if (typeof options.rate === 'number')
|
||||
args.push('-r', options.rate);
|
||||
if (typeof options.ratePeriod === 'number')
|
||||
args.push(`--rate-period=${options.ratePeriod}`);
|
||||
if (typeof options.duration === 'number')
|
||||
args.push('-T', options.duration);
|
||||
if (typeof options.timeout === 'number')
|
||||
args.push('-N', options.timeout);
|
||||
if (typeof options.headerTableSize === 'number')
|
||||
args.push(`--header-table-size=${options.headerTableSize}`);
|
||||
if (typeof options.encoderHeaderTableSize === 'number') {
|
||||
args.push(
|
||||
`--encoder-header-table-size=${options.encoderHeaderTableSize}`);
|
||||
}
|
||||
const scheme = options.scheme || 'http';
|
||||
const host = options.host || '127.0.0.1';
|
||||
args.push(`${scheme}://${host}:${options.port}${options.path}`);
|
||||
const child = child_process.spawn(this.executable, args);
|
||||
return child;
|
||||
}
|
||||
|
||||
processResults(output) {
|
||||
const rex = /(\d+(?:\.\d+)) req\/s/;
|
||||
return rex.exec(output)[1];
|
||||
}
|
||||
}
|
||||
|
||||
const http_benchmarkers = [
|
||||
new WrkBenchmarker(),
|
||||
new AutocannonBenchmarker(),
|
||||
new TestDoubleBenchmarker('http'),
|
||||
new TestDoubleBenchmarker('http2'),
|
||||
new H2LoadBenchmarker(),
|
||||
];
|
||||
|
||||
const benchmarkers = {};
|
||||
|
||||
http_benchmarkers.forEach((benchmarker) => {
|
||||
benchmarkers[benchmarker.name] = benchmarker;
|
||||
if (!exports.default_http_benchmarker && benchmarker.present) {
|
||||
exports.default_http_benchmarker = benchmarker.name;
|
||||
}
|
||||
});
|
||||
|
||||
exports.run = function(options, callback) {
|
||||
options = {
|
||||
port: exports.PORT,
|
||||
path: '/',
|
||||
connections: 100,
|
||||
duration: 5,
|
||||
benchmarker: exports.default_http_benchmarker,
|
||||
...options
|
||||
};
|
||||
if (!options.benchmarker) {
|
||||
callback(new Error('Could not locate required http benchmarker. See ' +
|
||||
`${requirementsURL} for further instructions.`));
|
||||
return;
|
||||
}
|
||||
const benchmarker = benchmarkers[options.benchmarker];
|
||||
if (!benchmarker) {
|
||||
callback(new Error(`Requested benchmarker '${options.benchmarker}' ` +
|
||||
'is not supported'));
|
||||
return;
|
||||
}
|
||||
if (!benchmarker.present) {
|
||||
callback(new Error(`Requested benchmarker '${options.benchmarker}' ` +
|
||||
'is not installed'));
|
||||
return;
|
||||
}
|
||||
|
||||
const benchmarker_start = process.hrtime();
|
||||
|
||||
const child = benchmarker.create(options);
|
||||
|
||||
child.stderr.pipe(process.stderr);
|
||||
|
||||
let stdout = '';
|
||||
child.stdout.setEncoding('utf8');
|
||||
child.stdout.on('data', (chunk) => stdout += chunk);
|
||||
|
||||
child.once('close', (code) => {
|
||||
const elapsed = process.hrtime(benchmarker_start);
|
||||
if (code) {
|
||||
let error_message = `${options.benchmarker} failed with ${code}.`;
|
||||
if (stdout !== '') {
|
||||
error_message += ` Output: ${stdout}`;
|
||||
}
|
||||
callback(new Error(error_message), code);
|
||||
return;
|
||||
}
|
||||
|
||||
const result = benchmarker.processResults(stdout);
|
||||
if (result === undefined) {
|
||||
callback(new Error(
|
||||
`${options.benchmarker} produced strange output: ${stdout}`), code);
|
||||
return;
|
||||
}
|
||||
|
||||
callback(null, code, options.benchmarker, result, elapsed);
|
||||
});
|
||||
|
||||
};
|
|
@ -0,0 +1,45 @@
|
|||
'use strict';
|
||||
|
||||
const myModule = process.argv[2];
|
||||
if (!['http', 'http2'].includes(myModule)) {
|
||||
throw new Error(`Invalid module for benchmark test double: ${myModule}`);
|
||||
}
|
||||
|
||||
const http = require(myModule);
|
||||
|
||||
const duration = +process.env.duration;
|
||||
const url = process.env.test_url;
|
||||
|
||||
const start = process.hrtime();
|
||||
let throughput = 0;
|
||||
|
||||
function request(res, client) {
|
||||
res.resume();
|
||||
res.on('error', () => {});
|
||||
res.on('end', () => {
|
||||
throughput++;
|
||||
const [sec, nanosec] = process.hrtime(start);
|
||||
const ms = sec * 1000 + nanosec / 1e6;
|
||||
if (ms < duration * 1000) {
|
||||
run();
|
||||
} else {
|
||||
console.log(JSON.stringify({ throughput }));
|
||||
if (client) {
|
||||
client.destroy();
|
||||
process.exit(0);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function run() {
|
||||
if (http.get) { // HTTP
|
||||
http.get(url, request);
|
||||
} else { // HTTP/2
|
||||
const client = http.connect(url);
|
||||
client.on('error', (e) => { throw e; });
|
||||
request(client.request(), client);
|
||||
}
|
||||
}
|
||||
|
||||
run();
|
|
@ -0,0 +1,32 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
const assert = require('assert');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
n: [2e4],
|
||||
len: [1e2, 1e3],
|
||||
strict: [0, 1],
|
||||
method: ['deepEqual', 'notDeepEqual'],
|
||||
});
|
||||
|
||||
function main({ len, n, method, strict }) {
|
||||
const data = Buffer.allocUnsafe(len + 1);
|
||||
const actual = Buffer.alloc(len);
|
||||
const expected = Buffer.alloc(len);
|
||||
const expectedWrong = Buffer.alloc(len + 1);
|
||||
data.copy(actual);
|
||||
data.copy(expected);
|
||||
data.copy(expectedWrong);
|
||||
|
||||
if (strict) {
|
||||
method = method.replace('eep', 'eepStrict');
|
||||
}
|
||||
const fn = assert[method];
|
||||
const value2 = method.includes('not') ? expectedWrong : expected;
|
||||
|
||||
bench.start();
|
||||
for (let i = 0; i < n; ++i) {
|
||||
fn(actual, value2);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const { deepEqual, deepStrictEqual, notDeepEqual, notDeepStrictEqual } =
|
||||
require('assert');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
n: [5e2],
|
||||
len: [5e2],
|
||||
strict: [0, 1],
|
||||
method: [
|
||||
'deepEqual_primitiveOnly',
|
||||
'deepEqual_objectOnly',
|
||||
'deepEqual_mixed',
|
||||
'notDeepEqual_primitiveOnly',
|
||||
'notDeepEqual_objectOnly',
|
||||
'notDeepEqual_mixed',
|
||||
],
|
||||
});
|
||||
|
||||
function benchmark(method, n, values, values2) {
|
||||
const actual = new Map(values);
|
||||
// Prevent reference equal elements
|
||||
const deepCopy = JSON.parse(JSON.stringify(values2 ? values2 : values));
|
||||
const expected = new Map(deepCopy);
|
||||
bench.start();
|
||||
for (let i = 0; i < n; ++i) {
|
||||
method(actual, expected);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function main({ n, len, method, strict }) {
|
||||
const array = Array(len).fill(1);
|
||||
|
||||
switch (method) {
|
||||
case 'deepEqual_primitiveOnly': {
|
||||
const values = array.map((_, i) => [`str_${i}`, 123]);
|
||||
benchmark(strict ? deepStrictEqual : deepEqual, n, values);
|
||||
break;
|
||||
}
|
||||
case 'deepEqual_objectOnly': {
|
||||
const values = array.map((_, i) => [[`str_${i}`, 1], 123]);
|
||||
benchmark(strict ? deepStrictEqual : deepEqual, n, values);
|
||||
break;
|
||||
}
|
||||
case 'deepEqual_mixed': {
|
||||
const values = array.map(
|
||||
(_, i) => [i % 2 ? [`str_${i}`, 1] : `str_${i}`, 123]
|
||||
);
|
||||
benchmark(strict ? deepStrictEqual : deepEqual, n, values);
|
||||
break;
|
||||
}
|
||||
case 'notDeepEqual_primitiveOnly': {
|
||||
const values = array.map((_, i) => [`str_${i}`, 123]);
|
||||
const values2 = values.slice(0);
|
||||
values2[Math.floor(len / 2)] = ['w00t', 123];
|
||||
benchmark(strict ? notDeepStrictEqual : notDeepEqual, n, values, values2);
|
||||
break;
|
||||
}
|
||||
case 'notDeepEqual_objectOnly': {
|
||||
const values = array.map((_, i) => [[`str_${i}`, 1], 123]);
|
||||
const values2 = values.slice(0);
|
||||
values2[Math.floor(len / 2)] = [['w00t'], 123];
|
||||
benchmark(strict ? notDeepStrictEqual : notDeepEqual, n, values, values2);
|
||||
break;
|
||||
}
|
||||
case 'notDeepEqual_mixed': {
|
||||
const values = array.map(
|
||||
(_, i) => [i % 2 ? [`str_${i}`, 1] : `str_${i}`, 123]
|
||||
);
|
||||
const values2 = values.slice(0);
|
||||
values2[0] = ['w00t', 123];
|
||||
benchmark(strict ? notDeepStrictEqual : notDeepEqual, n, values, values2);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw new Error(`Unsupported method ${method}`);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const assert = require('assert');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
n: [5e3],
|
||||
size: [1e2, 1e3, 5e4],
|
||||
strict: [0, 1],
|
||||
method: ['deepEqual', 'notDeepEqual'],
|
||||
});
|
||||
|
||||
function createObj(source, add = '') {
|
||||
return source.map((n) => ({
|
||||
foo: 'yarp',
|
||||
nope: {
|
||||
bar: `123${add}`,
|
||||
a: [1, 2, 3],
|
||||
baz: n,
|
||||
c: {},
|
||||
b: [],
|
||||
},
|
||||
}));
|
||||
}
|
||||
|
||||
function main({ size, n, method, strict }) {
|
||||
// TODO: Fix this "hack". `n` should not be manipulated.
|
||||
n = Math.min(Math.ceil(n / size), 20);
|
||||
|
||||
const source = Array.apply(null, Array(size));
|
||||
const actual = createObj(source);
|
||||
const expected = createObj(source);
|
||||
const expectedWrong = createObj(source, '4');
|
||||
|
||||
if (strict) {
|
||||
method = method.replace('eep', 'eepStrict');
|
||||
}
|
||||
const fn = assert[method];
|
||||
const value2 = method.includes('not') ? expectedWrong : expected;
|
||||
|
||||
bench.start();
|
||||
for (let i = 0; i < n; ++i) {
|
||||
fn(actual, value2);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const { deepEqual, deepStrictEqual, notDeepEqual, notDeepStrictEqual } =
|
||||
require('assert');
|
||||
|
||||
const primValues = {
|
||||
'string': 'a',
|
||||
'number': 1,
|
||||
'object': { 0: 'a' },
|
||||
'array': [1, 2, 3],
|
||||
};
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
primitive: Object.keys(primValues),
|
||||
n: [25],
|
||||
len: [2e4],
|
||||
strict: [0, 1],
|
||||
method: [
|
||||
'deepEqual_Array',
|
||||
'notDeepEqual_Array',
|
||||
'deepEqual_Set',
|
||||
'notDeepEqual_Set',
|
||||
],
|
||||
});
|
||||
|
||||
function run(fn, n, actual, expected) {
|
||||
bench.start();
|
||||
for (let i = 0; i < n; ++i) {
|
||||
fn(actual, expected);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function main({ n, len, primitive, method, strict }) {
|
||||
const prim = primValues[primitive];
|
||||
const actual = [];
|
||||
const expected = [];
|
||||
const expectedWrong = [];
|
||||
|
||||
for (let x = 0; x < len; x++) {
|
||||
actual.push(prim);
|
||||
expected.push(prim);
|
||||
expectedWrong.push(prim);
|
||||
}
|
||||
expectedWrong.pop();
|
||||
expectedWrong.push('b');
|
||||
|
||||
// Note: primitives are only added once to a set
|
||||
const actualSet = new Set(actual);
|
||||
const expectedSet = new Set(expected);
|
||||
const expectedWrongSet = new Set(expectedWrong);
|
||||
|
||||
switch (method) {
|
||||
case 'deepEqual_Array':
|
||||
run(strict ? deepStrictEqual : deepEqual, n, actual, expected);
|
||||
break;
|
||||
case 'notDeepEqual_Array':
|
||||
run(strict ? notDeepStrictEqual : notDeepEqual, n, actual, expectedWrong);
|
||||
break;
|
||||
case 'deepEqual_Set':
|
||||
run(strict ? deepStrictEqual : deepEqual, n, actualSet, expectedSet);
|
||||
break;
|
||||
case 'notDeepEqual_Set':
|
||||
run(strict ? notDeepStrictEqual : notDeepEqual,
|
||||
n, actualSet, expectedWrongSet);
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unsupported method "${method}"`);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
const assert = require('assert');
|
||||
|
||||
const primValues = {
|
||||
'string': 'a',
|
||||
'number': 1,
|
||||
'object': { 0: 'a' },
|
||||
'array': [1, 2, 3],
|
||||
};
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
primitive: Object.keys(primValues),
|
||||
n: [2e4],
|
||||
strict: [0, 1],
|
||||
method: ['deepEqual', 'notDeepEqual'],
|
||||
});
|
||||
|
||||
function main({ n, primitive, method, strict }) {
|
||||
const prim = primValues[primitive];
|
||||
const actual = prim;
|
||||
const expected = prim;
|
||||
const expectedWrong = 'b';
|
||||
|
||||
if (strict) {
|
||||
method = method.replace('eep', 'eepStrict');
|
||||
}
|
||||
const fn = assert[method];
|
||||
const value2 = method.includes('not') ? expectedWrong : expected;
|
||||
|
||||
bench.start();
|
||||
for (let i = 0; i < n; ++i) {
|
||||
fn([actual], [value2]);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const { deepEqual, deepStrictEqual, notDeepEqual, notDeepStrictEqual } =
|
||||
require('assert');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
n: [5e2],
|
||||
len: [5e2],
|
||||
strict: [0, 1],
|
||||
method: [
|
||||
'deepEqual_primitiveOnly',
|
||||
'deepEqual_objectOnly',
|
||||
'deepEqual_mixed',
|
||||
'notDeepEqual_primitiveOnly',
|
||||
'notDeepEqual_objectOnly',
|
||||
'notDeepEqual_mixed',
|
||||
],
|
||||
});
|
||||
|
||||
function benchmark(method, n, values, values2) {
|
||||
const actual = new Set(values);
|
||||
// Prevent reference equal elements
|
||||
const deepCopy = JSON.parse(JSON.stringify(values2 ? values2 : values));
|
||||
const expected = new Set(deepCopy);
|
||||
bench.start();
|
||||
for (let i = 0; i < n; ++i) {
|
||||
method(actual, expected);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function main({ n, len, method, strict }) {
|
||||
const array = Array(len).fill(1);
|
||||
|
||||
switch (method) {
|
||||
case 'deepEqual_primitiveOnly': {
|
||||
const values = array.map((_, i) => `str_${i}`);
|
||||
benchmark(strict ? deepStrictEqual : deepEqual, n, values);
|
||||
break;
|
||||
}
|
||||
case 'deepEqual_objectOnly': {
|
||||
const values = array.map((_, i) => [`str_${i}`, null]);
|
||||
benchmark(strict ? deepStrictEqual : deepEqual, n, values);
|
||||
break;
|
||||
}
|
||||
case 'deepEqual_mixed': {
|
||||
const values = array.map((_, i) => {
|
||||
return i % 2 ? [`str_${i}`, null] : `str_${i}`;
|
||||
});
|
||||
benchmark(strict ? deepStrictEqual : deepEqual, n, values);
|
||||
break;
|
||||
}
|
||||
case 'notDeepEqual_primitiveOnly': {
|
||||
const values = array.map((_, i) => `str_${i}`);
|
||||
const values2 = values.slice(0);
|
||||
values2[Math.floor(len / 2)] = 'w00t';
|
||||
benchmark(strict ? notDeepStrictEqual : notDeepEqual, n, values, values2);
|
||||
break;
|
||||
}
|
||||
case 'notDeepEqual_objectOnly': {
|
||||
const values = array.map((_, i) => [`str_${i}`, null]);
|
||||
const values2 = values.slice(0);
|
||||
values2[Math.floor(len / 2)] = ['w00t'];
|
||||
benchmark(strict ? notDeepStrictEqual : notDeepEqual, n, values, values2);
|
||||
break;
|
||||
}
|
||||
case 'notDeepEqual_mixed': {
|
||||
const values = array.map((_, i) => {
|
||||
return i % 2 ? [`str_${i}`, null] : `str_${i}`;
|
||||
});
|
||||
const values2 = values.slice();
|
||||
values2[0] = 'w00t';
|
||||
benchmark(strict ? notDeepStrictEqual : notDeepEqual, n, values, values2);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw new Error(`Unsupported method "${method}"`);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
const assert = require('assert');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
type: [
|
||||
'Int8Array',
|
||||
'Uint8Array',
|
||||
'Float32Array',
|
||||
'Float64Array',
|
||||
'Uint8ClampedArray',
|
||||
],
|
||||
n: [5e2],
|
||||
strict: [0, 1],
|
||||
method: [
|
||||
'deepEqual',
|
||||
'notDeepEqual',
|
||||
],
|
||||
len: [1e2, 5e3],
|
||||
});
|
||||
|
||||
function main({ type, n, len, method, strict }) {
|
||||
const clazz = global[type];
|
||||
const actual = new clazz(len);
|
||||
const expected = new clazz(len);
|
||||
const expectedWrong = new clazz(len);
|
||||
const wrongIndex = Math.floor(len / 2);
|
||||
expectedWrong[wrongIndex] = 123;
|
||||
|
||||
if (strict) {
|
||||
method = method.replace('eep', 'eepStrict');
|
||||
}
|
||||
const fn = assert[method];
|
||||
const value2 = method.includes('not') ? expectedWrong : expected;
|
||||
|
||||
bench.start();
|
||||
for (let i = 0; i < n; ++i) {
|
||||
actual[0] = i;
|
||||
value2[0] = i;
|
||||
fn(actual, value2);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const assert = require('assert');
|
||||
|
||||
const bench = common.createBenchmark(main, { n: [1e5] });
|
||||
|
||||
function main({ n }) {
|
||||
bench.start();
|
||||
for (let i = 0; i < n; ++i) {
|
||||
if (i % 2 === 0)
|
||||
assert(true);
|
||||
else
|
||||
assert(true, 'foo bar baz');
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const { throws, doesNotThrow } = require('assert');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
n: [1e4],
|
||||
method: [ 'doesNotThrow', 'throws_TypeError', 'throws_RegExp' ],
|
||||
});
|
||||
|
||||
function main({ n, method }) {
|
||||
const throwError = () => { throw new TypeError('foobar'); };
|
||||
const doNotThrowError = () => { return 'foobar'; };
|
||||
const regExp = /foobar/;
|
||||
const message = 'failure';
|
||||
|
||||
switch (method) {
|
||||
case 'doesNotThrow':
|
||||
bench.start();
|
||||
for (let i = 0; i < n; ++i) {
|
||||
doesNotThrow(doNotThrowError);
|
||||
}
|
||||
bench.end(n);
|
||||
break;
|
||||
case 'throws_TypeError':
|
||||
bench.start();
|
||||
for (let i = 0; i < n; ++i) {
|
||||
throws(throwError, TypeError, message);
|
||||
}
|
||||
bench.end(n);
|
||||
break;
|
||||
case 'throws_RegExp':
|
||||
bench.start();
|
||||
for (let i = 0; i < n; ++i) {
|
||||
throws(throwError, regExp, message);
|
||||
}
|
||||
bench.end(n);
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unsupported method ${method}`);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,184 @@
|
|||
'use strict';
|
||||
|
||||
const { promisify } = require('util');
|
||||
const { readFile } = require('fs');
|
||||
const sleep = promisify(setTimeout);
|
||||
const read = promisify(readFile);
|
||||
const common = require('../common.js');
|
||||
const {
|
||||
createHook,
|
||||
executionAsyncResource,
|
||||
executionAsyncId,
|
||||
AsyncLocalStorage
|
||||
} = require('async_hooks');
|
||||
const { createServer } = require('http');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
type: ['async-resource', 'destroy', 'async-local-storage'],
|
||||
asyncMethod: ['callbacks', 'async'],
|
||||
path: '/',
|
||||
connections: 500,
|
||||
duration: 5,
|
||||
n: [1e6]
|
||||
});
|
||||
|
||||
function buildCurrentResource(getServe) {
|
||||
const server = createServer(getServe(getCLS, setCLS));
|
||||
const hook = createHook({ init });
|
||||
const cls = Symbol('cls');
|
||||
hook.enable();
|
||||
|
||||
return {
|
||||
server,
|
||||
close
|
||||
};
|
||||
|
||||
function getCLS() {
|
||||
const resource = executionAsyncResource();
|
||||
if (!resource[cls]) {
|
||||
return null;
|
||||
}
|
||||
return resource[cls].state;
|
||||
}
|
||||
|
||||
function setCLS(state) {
|
||||
const resource = executionAsyncResource();
|
||||
if (!resource[cls]) {
|
||||
resource[cls] = { state };
|
||||
} else {
|
||||
resource[cls].state = state;
|
||||
}
|
||||
}
|
||||
|
||||
function init(asyncId, type, triggerAsyncId, resource) {
|
||||
const cr = executionAsyncResource();
|
||||
if (cr !== null) {
|
||||
resource[cls] = cr[cls];
|
||||
}
|
||||
}
|
||||
|
||||
function close() {
|
||||
hook.disable();
|
||||
server.close();
|
||||
}
|
||||
}
|
||||
|
||||
function buildDestroy(getServe) {
|
||||
const transactions = new Map();
|
||||
const server = createServer(getServe(getCLS, setCLS));
|
||||
const hook = createHook({ init, destroy });
|
||||
hook.enable();
|
||||
|
||||
return {
|
||||
server,
|
||||
close
|
||||
};
|
||||
|
||||
function getCLS() {
|
||||
const asyncId = executionAsyncId();
|
||||
return transactions.has(asyncId) ? transactions.get(asyncId) : null;
|
||||
}
|
||||
|
||||
function setCLS(value) {
|
||||
const asyncId = executionAsyncId();
|
||||
transactions.set(asyncId, value);
|
||||
}
|
||||
|
||||
function init(asyncId, type, triggerAsyncId, resource) {
|
||||
transactions.set(asyncId, getCLS());
|
||||
}
|
||||
|
||||
function destroy(asyncId) {
|
||||
transactions.delete(asyncId);
|
||||
}
|
||||
|
||||
function close() {
|
||||
hook.disable();
|
||||
server.close();
|
||||
}
|
||||
}
|
||||
|
||||
function buildAsyncLocalStorage(getServe) {
|
||||
const asyncLocalStorage = new AsyncLocalStorage();
|
||||
const server = createServer((req, res) => {
|
||||
asyncLocalStorage.run({}, () => {
|
||||
getServe(getCLS, setCLS)(req, res);
|
||||
});
|
||||
});
|
||||
|
||||
return {
|
||||
server,
|
||||
close
|
||||
};
|
||||
|
||||
function getCLS() {
|
||||
const store = asyncLocalStorage.getStore();
|
||||
if (store === undefined) {
|
||||
return null;
|
||||
}
|
||||
return store.state;
|
||||
}
|
||||
|
||||
function setCLS(state) {
|
||||
const store = asyncLocalStorage.getStore();
|
||||
if (store === undefined) {
|
||||
return;
|
||||
}
|
||||
store.state = state;
|
||||
}
|
||||
|
||||
function close() {
|
||||
asyncLocalStorage.disable();
|
||||
server.close();
|
||||
}
|
||||
}
|
||||
|
||||
function getServeAwait(getCLS, setCLS) {
|
||||
return async function serve(req, res) {
|
||||
setCLS(Math.random());
|
||||
await sleep(10);
|
||||
await read(__filename);
|
||||
res.setHeader('content-type', 'application/json');
|
||||
res.end(JSON.stringify({ cls: getCLS() }));
|
||||
};
|
||||
}
|
||||
|
||||
function getServeCallbacks(getCLS, setCLS) {
|
||||
return function serve(req, res) {
|
||||
setCLS(Math.random());
|
||||
setTimeout(() => {
|
||||
readFile(__filename, () => {
|
||||
res.setHeader('content-type', 'application/json');
|
||||
res.end(JSON.stringify({ cls: getCLS() }));
|
||||
});
|
||||
}, 10);
|
||||
};
|
||||
}
|
||||
|
||||
const types = {
|
||||
'async-resource': buildCurrentResource,
|
||||
'destroy': buildDestroy,
|
||||
'async-local-storage': buildAsyncLocalStorage
|
||||
};
|
||||
|
||||
const asyncMethods = {
|
||||
'callbacks': getServeCallbacks,
|
||||
'async': getServeAwait
|
||||
};
|
||||
|
||||
function main({ type, asyncMethod, connections, duration, path }) {
|
||||
const { server, close } = types[type](asyncMethods[asyncMethod]);
|
||||
|
||||
server
|
||||
.listen(common.PORT)
|
||||
.on('listening', () => {
|
||||
|
||||
bench.http({
|
||||
path,
|
||||
connections,
|
||||
duration
|
||||
}, () => {
|
||||
close();
|
||||
});
|
||||
});
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
const { createHook, AsyncResource } = require('async_hooks');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
n: [1e6],
|
||||
method: [
|
||||
'trackingEnabled',
|
||||
'trackingEnabledWithDestroyHook',
|
||||
'trackingDisabled',
|
||||
]
|
||||
}, {
|
||||
flags: ['--expose-gc']
|
||||
});
|
||||
|
||||
function endAfterGC(n) {
|
||||
setImmediate(() => {
|
||||
global.gc();
|
||||
setImmediate(() => {
|
||||
bench.end(n);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function main({ n, method }) {
|
||||
switch (method) {
|
||||
case 'trackingEnabled':
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
new AsyncResource('foobar');
|
||||
}
|
||||
endAfterGC(n);
|
||||
break;
|
||||
case 'trackingEnabledWithDestroyHook':
|
||||
createHook({ destroy: () => {} }).enable();
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
new AsyncResource('foobar');
|
||||
}
|
||||
endAfterGC(n);
|
||||
break;
|
||||
case 'trackingDisabled':
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
new AsyncResource('foobar', { requireManualDestroy: true });
|
||||
}
|
||||
endAfterGC(n);
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unsupported method "${method}"`);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
asyncHooks: ['init', 'before', 'after', 'all', 'disabled', 'none'],
|
||||
connections: [50, 500],
|
||||
duration: 5
|
||||
});
|
||||
|
||||
function main({ asyncHooks, connections, duration }) {
|
||||
if (asyncHooks !== 'none') {
|
||||
let hooks = {
|
||||
init() {},
|
||||
before() {},
|
||||
after() {},
|
||||
destroy() {},
|
||||
promiseResolve() {}
|
||||
};
|
||||
if (asyncHooks !== 'all' || asyncHooks !== 'disabled') {
|
||||
hooks = {
|
||||
[asyncHooks]: () => {}
|
||||
};
|
||||
}
|
||||
const hook = require('async_hooks').createHook(hooks);
|
||||
if (asyncHooks !== 'disabled') {
|
||||
hook.enable();
|
||||
}
|
||||
}
|
||||
const server = require('../fixtures/simple-http-server.js')
|
||||
.listen(common.PORT)
|
||||
.on('listening', () => {
|
||||
const path = '/buffer/4/4/normal/1';
|
||||
|
||||
bench.http({
|
||||
connections,
|
||||
path,
|
||||
duration
|
||||
}, () => {
|
||||
server.close();
|
||||
});
|
||||
});
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
const { createHook } = require('async_hooks');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
n: [1e6],
|
||||
asyncHooks: [
|
||||
'enabled',
|
||||
'disabled',
|
||||
]
|
||||
});
|
||||
|
||||
async function run(n) {
|
||||
for (let i = 0; i < n; i++) {
|
||||
await new Promise((resolve) => resolve())
|
||||
.then(() => { throw new Error('foobar'); })
|
||||
.catch((e) => e);
|
||||
}
|
||||
}
|
||||
|
||||
function main({ n, asyncHooks }) {
|
||||
const hook = createHook({ promiseResolve() {} });
|
||||
if (asyncHooks !== 'disabled') {
|
||||
hook.enable();
|
||||
}
|
||||
bench.start();
|
||||
run(n).then(() => {
|
||||
bench.end(n);
|
||||
});
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
charsPerLine: [76],
|
||||
linesCount: [8 << 16],
|
||||
n: [32],
|
||||
});
|
||||
|
||||
function main({ charsPerLine, linesCount, n }) {
|
||||
const bytesCount = charsPerLine * linesCount / 4 * 3;
|
||||
|
||||
const line = `${'abcd'.repeat(charsPerLine / 4)}\n`;
|
||||
const data = line.repeat(linesCount);
|
||||
// eslint-disable-next-line node-core/no-unescaped-regexp-dot
|
||||
data.match(/./); // Flatten the string
|
||||
const buffer = Buffer.alloc(bytesCount, line, 'base64');
|
||||
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
buffer.base64Write(data, 0, bytesCount);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
'use strict';
|
||||
const assert = require('assert');
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
n: [32],
|
||||
size: [8 << 20]
|
||||
});
|
||||
|
||||
function main({ n, size }) {
|
||||
const s = 'abcd'.repeat(size);
|
||||
const encodedSize = s.length * 3 / 4;
|
||||
// eslint-disable-next-line node-core/no-unescaped-regexp-dot
|
||||
s.match(/./); // Flatten string.
|
||||
assert.strictEqual(s.length % 4, 0);
|
||||
const b = Buffer.allocUnsafe(encodedSize);
|
||||
b.write(s, 0, encodedSize, 'base64');
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i += 1) b.base64Write(s, 0, s.length);
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
// Copyright Joyent, Inc. and other Node contributors.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the
|
||||
// "Software"), to deal in the Software without restriction, including
|
||||
// without limitation the rights to use, copy, modify, merge, publish,
|
||||
// distribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
// persons to whom the Software is furnished to do so, subject to the
|
||||
// following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included
|
||||
// in all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
|
||||
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
// USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
'use strict';
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
len: [64 * 1024 * 1024],
|
||||
n: [32]
|
||||
}, {
|
||||
test: { len: 256 }
|
||||
});
|
||||
|
||||
function main({ n, len }) {
|
||||
const b = Buffer.allocUnsafe(len);
|
||||
let s = '';
|
||||
let i;
|
||||
for (i = 0; i < 256; ++i) s += String.fromCharCode(i);
|
||||
for (i = 0; i < len; i += 256) b.write(s, i, 256, 'ascii');
|
||||
bench.start();
|
||||
for (i = 0; i < n; ++i) b.toString('base64');
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
'use strict';
|
||||
const common = require('../common');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
encoding: ['utf8', 'base64', 'buffer'],
|
||||
len: [2, 16, 256], // x16
|
||||
n: [4e6]
|
||||
});
|
||||
|
||||
// 16 chars each
|
||||
const chars = [
|
||||
'hello brendan!!!', // 1 byte
|
||||
'ΰαβγδεζηθικλμνξο', // 2 bytes
|
||||
'挰挱挲挳挴挵挶挷挸挹挺挻挼挽挾挿', // 3 bytes
|
||||
'𠜎𠜱𠝹𠱓𠱸𠲖𠳏𠳕𠴕𠵼𠵿𠸎𠸏𠹷𠺝𠺢', // 4 bytes
|
||||
];
|
||||
|
||||
function main({ n, len, encoding }) {
|
||||
let strings = [];
|
||||
let results = [len * 16];
|
||||
if (encoding === 'buffer') {
|
||||
strings = [Buffer.alloc(len * 16, 'a')];
|
||||
} else {
|
||||
for (const string of chars) {
|
||||
// Strings must be built differently, depending on encoding
|
||||
const data = string.repeat(len);
|
||||
if (encoding === 'utf8') {
|
||||
strings.push(data);
|
||||
} else if (encoding === 'base64') {
|
||||
// Base64 strings will be much longer than their UTF8 counterparts
|
||||
strings.push(Buffer.from(data, 'utf8').toString('base64'));
|
||||
}
|
||||
}
|
||||
|
||||
// Check the result to ensure it is *properly* optimized
|
||||
results = strings.map((val) => Buffer.byteLength(val, encoding));
|
||||
}
|
||||
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
const index = n % strings.length;
|
||||
// Go!
|
||||
const r = Buffer.byteLength(strings[index], encoding);
|
||||
|
||||
if (r !== results[index])
|
||||
throw new Error('incorrect return value');
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
size: [16, 512, 4096, 16386],
|
||||
args: [1, 2, 5],
|
||||
n: [1e6]
|
||||
});
|
||||
|
||||
function main({ n, size, args }) {
|
||||
const b0 = Buffer.alloc(size, 'a');
|
||||
const b1 = Buffer.alloc(size, 'a');
|
||||
const b0Len = b0.length;
|
||||
const b1Len = b1.length;
|
||||
|
||||
b1[size - 1] = 'b'.charCodeAt(0);
|
||||
|
||||
switch (args) {
|
||||
case 2:
|
||||
b0.compare(b1, 0);
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
b0.compare(b1, 0);
|
||||
}
|
||||
bench.end(n);
|
||||
break;
|
||||
case 3:
|
||||
b0.compare(b1, 0, b1Len);
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
b0.compare(b1, 0, b1Len);
|
||||
}
|
||||
bench.end(n);
|
||||
break;
|
||||
case 4:
|
||||
b0.compare(b1, 0, b1Len, 0);
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
b0.compare(b1, 0, b1Len, 0);
|
||||
}
|
||||
bench.end(n);
|
||||
break;
|
||||
case 5:
|
||||
b0.compare(b1, 0, b1Len, 0, b0Len);
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
b0.compare(b1, 0, b1Len, 0, b0Len);
|
||||
}
|
||||
bench.end(n);
|
||||
break;
|
||||
default:
|
||||
b0.compare(b1);
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
b0.compare(b1);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
method: ['offset', 'slice'],
|
||||
size: [16, 512, 4096, 16386],
|
||||
n: [1e6]
|
||||
});
|
||||
|
||||
function compareUsingSlice(b0, b1, len, iter) {
|
||||
for (let i = 0; i < iter; i++)
|
||||
Buffer.compare(b0.slice(1, len), b1.slice(1, len));
|
||||
}
|
||||
|
||||
function compareUsingOffset(b0, b1, len, iter) {
|
||||
for (let i = 0; i < iter; i++)
|
||||
b0.compare(b1, 1, len, 1, len);
|
||||
}
|
||||
|
||||
function main({ n, size, method }) {
|
||||
const fn = method === 'slice' ? compareUsingSlice : compareUsingOffset;
|
||||
bench.start();
|
||||
fn(Buffer.alloc(size, 'a'),
|
||||
Buffer.alloc(size, 'b'),
|
||||
size >> 1,
|
||||
n);
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
// Copyright Joyent, Inc. and other Node contributors.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the
|
||||
// "Software"), to deal in the Software without restriction, including
|
||||
// without limitation the rights to use, copy, modify, merge, publish,
|
||||
// distribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
// persons to whom the Software is furnished to do so, subject to the
|
||||
// following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included
|
||||
// in all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
|
||||
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
// USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
'use strict';
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
size: [16, 512, 4096, 16386],
|
||||
n: [1e6]
|
||||
});
|
||||
|
||||
function main({ n, size }) {
|
||||
const b0 = Buffer.alloc(size, 'a');
|
||||
const b1 = Buffer.alloc(size, 'a');
|
||||
|
||||
b1[size - 1] = 'b'.charCodeAt(0);
|
||||
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
Buffer.compare(b0, b1);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
extraSize: [1, 256, 4 * 256],
|
||||
n: [8e5]
|
||||
});
|
||||
|
||||
function main({ n, extraSize }) {
|
||||
const pieces = 4;
|
||||
const pieceSize = 256;
|
||||
|
||||
const list = Array.from({ length: pieces })
|
||||
.fill(Buffer.allocUnsafe(pieceSize));
|
||||
|
||||
const totalLength = (pieces * pieceSize) + extraSize;
|
||||
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
Buffer.concat(list, totalLength);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
pieces: [4, 16],
|
||||
pieceSize: [1, 16, 256],
|
||||
withTotalLength: [0, 1],
|
||||
n: [8e5]
|
||||
});
|
||||
|
||||
function main({ n, pieces, pieceSize, withTotalLength }) {
|
||||
const list = Array.from({ length: pieces })
|
||||
.fill(Buffer.allocUnsafe(pieceSize));
|
||||
|
||||
const totalLength = withTotalLength ? pieces * pieceSize : undefined;
|
||||
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
Buffer.concat(list, totalLength);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
bytes: [0, 8, 128, 32 * 1024],
|
||||
partial: ['true', 'false'],
|
||||
n: [6e6]
|
||||
});
|
||||
|
||||
function main({ n, bytes, partial }) {
|
||||
const source = Buffer.allocUnsafe(bytes);
|
||||
const target = Buffer.allocUnsafe(bytes);
|
||||
const sourceStart = (partial === 'true' ? Math.floor(bytes / 2) : 0);
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
source.copy(target, 0, sourceStart);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const assert = require('assert');
|
||||
const bench = common.createBenchmark(main, {
|
||||
type: [
|
||||
'fast-alloc',
|
||||
'fast-alloc-fill',
|
||||
'fast-allocUnsafe',
|
||||
'slow-allocUnsafe',
|
||||
],
|
||||
len: [10, 1024, 4096, 8192],
|
||||
n: [6e5]
|
||||
});
|
||||
|
||||
function main({ len, n, type }) {
|
||||
let fn, i;
|
||||
switch (type) {
|
||||
case 'fast-alloc':
|
||||
fn = Buffer.alloc;
|
||||
break;
|
||||
case 'fast-alloc-fill':
|
||||
bench.start();
|
||||
for (i = 0; i < n; i++) {
|
||||
Buffer.alloc(len, 0);
|
||||
}
|
||||
bench.end(n);
|
||||
return;
|
||||
case 'fast-allocUnsafe':
|
||||
fn = Buffer.allocUnsafe;
|
||||
break;
|
||||
case 'slow-allocUnsafe':
|
||||
fn = Buffer.allocUnsafeSlow;
|
||||
break;
|
||||
default:
|
||||
assert.fail('Should not get here');
|
||||
}
|
||||
|
||||
bench.start();
|
||||
for (i = 0; i < n; i++) {
|
||||
fn(len);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
size: [0, 512, 16386],
|
||||
difflen: ['true', 'false'],
|
||||
n: [1e6]
|
||||
});
|
||||
|
||||
function main({ n, size, difflen }) {
|
||||
const b0 = Buffer.alloc(size, 'a');
|
||||
const b1 = Buffer.alloc(size + (difflen === 'true' ? 1 : 0), 'a');
|
||||
|
||||
if (b1.length > 0)
|
||||
b1[b1.length - 1] = 'b'.charCodeAt(0);
|
||||
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
b0.equals(b1);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
type: [
|
||||
'fill(0)',
|
||||
'fill("")',
|
||||
'fill(100)',
|
||||
'fill(400)',
|
||||
'fill("t")',
|
||||
'fill("test")',
|
||||
'fill("t", "utf8")',
|
||||
'fill("t", 0, "utf8")',
|
||||
'fill("t", 0)',
|
||||
'fill(Buffer.alloc(1), 0)',
|
||||
],
|
||||
size: [2 ** 13, 2 ** 16],
|
||||
n: [2e4]
|
||||
});
|
||||
|
||||
function main({ n, type, size }) {
|
||||
const buffer = Buffer.allocUnsafe(size);
|
||||
const testFunction = new Function('b', `
|
||||
for (var i = 0; i < ${n}; i++) {
|
||||
b.${type};
|
||||
}
|
||||
`);
|
||||
bench.start();
|
||||
testFunction(buffer);
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,121 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const assert = require('assert');
|
||||
const bench = common.createBenchmark(main, {
|
||||
source: [
|
||||
'array',
|
||||
'arraybuffer',
|
||||
'arraybuffer-middle',
|
||||
'buffer',
|
||||
'string',
|
||||
'string-utf8',
|
||||
'string-base64',
|
||||
'object',
|
||||
'uint8array',
|
||||
'uint16array',
|
||||
],
|
||||
len: [100, 2048],
|
||||
n: [8e5]
|
||||
});
|
||||
|
||||
function main({ len, n, source }) {
|
||||
let i = 0;
|
||||
|
||||
switch (source) {
|
||||
case 'array': {
|
||||
const array = new Array(len).fill(42);
|
||||
bench.start();
|
||||
for (i = 0; i < n; i++) {
|
||||
Buffer.from(array);
|
||||
}
|
||||
bench.end(n);
|
||||
break;
|
||||
}
|
||||
case 'arraybuffer': {
|
||||
const arrayBuf = new ArrayBuffer(len);
|
||||
bench.start();
|
||||
for (i = 0; i < n; i++) {
|
||||
Buffer.from(arrayBuf);
|
||||
}
|
||||
bench.end(n);
|
||||
break;
|
||||
}
|
||||
case 'arraybuffer-middle': {
|
||||
const arrayBuf = new ArrayBuffer(len);
|
||||
const offset = ~~(len / 4);
|
||||
const length = ~~(len / 2);
|
||||
bench.start();
|
||||
for (i = 0; i < n; i++) {
|
||||
Buffer.from(arrayBuf, offset, length);
|
||||
}
|
||||
bench.end(n);
|
||||
break;
|
||||
}
|
||||
case 'buffer': {
|
||||
const buffer = Buffer.allocUnsafe(len);
|
||||
bench.start();
|
||||
for (i = 0; i < n; i++) {
|
||||
Buffer.from(buffer);
|
||||
}
|
||||
bench.end(n);
|
||||
break;
|
||||
}
|
||||
case 'uint8array': {
|
||||
const uint8array = new Uint8Array(len);
|
||||
bench.start();
|
||||
for (i = 0; i < n; i++) {
|
||||
Buffer.from(uint8array);
|
||||
}
|
||||
bench.end(n);
|
||||
break;
|
||||
}
|
||||
case 'uint16array': {
|
||||
const uint16array = new Uint16Array(len);
|
||||
bench.start();
|
||||
for (i = 0; i < n; i++) {
|
||||
Buffer.from(uint16array);
|
||||
}
|
||||
bench.end(n);
|
||||
break;
|
||||
}
|
||||
case 'string': {
|
||||
const str = 'a'.repeat(len);
|
||||
bench.start();
|
||||
for (i = 0; i < n; i++) {
|
||||
Buffer.from(str);
|
||||
}
|
||||
bench.end(n);
|
||||
break;
|
||||
}
|
||||
case 'string-utf8': {
|
||||
const str = 'a'.repeat(len);
|
||||
bench.start();
|
||||
for (i = 0; i < n; i++) {
|
||||
Buffer.from(str, 'utf8');
|
||||
}
|
||||
bench.end(n);
|
||||
break;
|
||||
}
|
||||
case 'string-base64': {
|
||||
const str = 'a'.repeat(len);
|
||||
bench.start();
|
||||
for (i = 0; i < n; i++) {
|
||||
Buffer.from(str, 'base64');
|
||||
}
|
||||
bench.end(n);
|
||||
break;
|
||||
}
|
||||
case 'object': {
|
||||
const obj = { length: null }; // Results in a new, empty Buffer
|
||||
bench.start();
|
||||
for (i = 0; i < n; i++) {
|
||||
Buffer.from(obj);
|
||||
}
|
||||
bench.end(n);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
assert.fail('Should not get here');
|
||||
}
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
len: [64, 1024],
|
||||
n: [1e6]
|
||||
});
|
||||
|
||||
function main({ len, n }) {
|
||||
const buf = Buffer.alloc(len);
|
||||
|
||||
for (let i = 0; i < buf.length; i++)
|
||||
buf[i] = i & 0xff;
|
||||
|
||||
const hex = buf.toString('hex');
|
||||
|
||||
bench.start();
|
||||
|
||||
for (let i = 0; i < n; i += 1)
|
||||
Buffer.from(hex, 'hex');
|
||||
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
value: ['@'.charCodeAt(0)],
|
||||
n: [1e6]
|
||||
});
|
||||
|
||||
function main({ n, value }) {
|
||||
const aliceBuffer = fs.readFileSync(
|
||||
path.resolve(__dirname, '../fixtures/alice.html')
|
||||
);
|
||||
|
||||
let count = 0;
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
count += aliceBuffer.indexOf(value, 0, undefined);
|
||||
}
|
||||
bench.end(n);
|
||||
return count;
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const searchStrings = [
|
||||
'@',
|
||||
'SQ',
|
||||
'--l',
|
||||
'Alice',
|
||||
'Gryphon',
|
||||
'Ou est ma chatte?',
|
||||
'found it very',
|
||||
'neighbouring pool',
|
||||
'aaaaaaaaaaaaaaaaa',
|
||||
'venture to go near the house till she had brought herself down to',
|
||||
'</i> to the Caterpillar',
|
||||
];
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
search: searchStrings,
|
||||
encoding: ['utf8', 'ucs2'],
|
||||
type: ['buffer', 'string'],
|
||||
n: [5e4]
|
||||
});
|
||||
|
||||
function main({ n, search, encoding, type }) {
|
||||
let aliceBuffer = fs.readFileSync(
|
||||
path.resolve(__dirname, '../fixtures/alice.html')
|
||||
);
|
||||
|
||||
if (encoding === 'undefined') {
|
||||
encoding = undefined;
|
||||
}
|
||||
|
||||
if (encoding === 'ucs2') {
|
||||
aliceBuffer = Buffer.from(aliceBuffer.toString(), encoding);
|
||||
}
|
||||
|
||||
if (type === 'buffer') {
|
||||
search = Buffer.from(Buffer.from(search).toString(), encoding);
|
||||
}
|
||||
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
aliceBuffer.indexOf(search, 0, encoding);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
'use strict';
|
||||
const SlowBuffer = require('buffer').SlowBuffer;
|
||||
const common = require('../common.js');
|
||||
const assert = require('assert');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
size: [512, 4096, 16386],
|
||||
type: ['fast'],
|
||||
method: ['for', 'forOf', 'iterator'],
|
||||
n: [1e3]
|
||||
});
|
||||
|
||||
const methods = {
|
||||
'for': benchFor,
|
||||
'forOf': benchForOf,
|
||||
'iterator': benchIterator
|
||||
};
|
||||
|
||||
function main({ size, type, method, n }) {
|
||||
const buffer = type === 'fast' ?
|
||||
Buffer.alloc(size) :
|
||||
SlowBuffer(size).fill(0);
|
||||
|
||||
const fn = methods[method];
|
||||
|
||||
bench.start();
|
||||
fn(buffer, n);
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function benchFor(buffer, n) {
|
||||
for (let k = 0; k < n; k++) {
|
||||
for (let i = 0; i < buffer.length; i++) {
|
||||
assert(buffer[i] === 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function benchForOf(buffer, n) {
|
||||
for (let k = 0; k < n; k++) {
|
||||
for (const b of buffer) {
|
||||
assert(b === 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function benchIterator(buffer, n) {
|
||||
for (let k = 0; k < n; k++) {
|
||||
const iter = buffer[Symbol.iterator]();
|
||||
let cur = iter.next();
|
||||
|
||||
while (!cur.done) {
|
||||
assert(cur.value === 0);
|
||||
cur = iter.next();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
encoding: [
|
||||
'ascii',
|
||||
'base64',
|
||||
'BASE64',
|
||||
'binary',
|
||||
'hex',
|
||||
'HEX',
|
||||
'latin1',
|
||||
'LATIN1',
|
||||
'UCS-2',
|
||||
'UCS2',
|
||||
'utf-16le',
|
||||
'UTF-16LE',
|
||||
'utf-8',
|
||||
'utf16le',
|
||||
'UTF16LE',
|
||||
'utf8',
|
||||
'UTF8',
|
||||
],
|
||||
n: [1e6]
|
||||
}, {
|
||||
flags: ['--expose-internals']
|
||||
});
|
||||
|
||||
function main({ encoding, n }) {
|
||||
const { normalizeEncoding } = require('internal/util');
|
||||
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
normalizeEncoding(encoding);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
type: ['Double', 'Float'],
|
||||
endian: ['LE'],
|
||||
value: ['zero', 'big', 'small', 'inf', 'nan'],
|
||||
n: [1e6]
|
||||
});
|
||||
|
||||
function main({ n, type, endian, value }) {
|
||||
const buff = Buffer.alloc(8);
|
||||
const fn = `read${type}${endian}`;
|
||||
const values = {
|
||||
Double: {
|
||||
zero: 0,
|
||||
big: 2 ** 1023,
|
||||
small: 2 ** -1074,
|
||||
inf: Infinity,
|
||||
nan: NaN,
|
||||
},
|
||||
Float: {
|
||||
zero: 0,
|
||||
big: 2 ** 127,
|
||||
small: 2 ** -149,
|
||||
inf: Infinity,
|
||||
nan: NaN,
|
||||
},
|
||||
};
|
||||
|
||||
buff[`write${type}${endian}`](values[type][value], 0);
|
||||
|
||||
bench.start();
|
||||
for (let i = 0; i !== n; i++) {
|
||||
buff[fn](0);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
|
||||
const types = [
|
||||
'IntBE',
|
||||
'IntLE',
|
||||
'UIntBE',
|
||||
'UIntLE',
|
||||
];
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
buffer: ['fast'],
|
||||
type: types,
|
||||
n: [1e6],
|
||||
byteLength: [1, 2, 3, 4, 5, 6]
|
||||
});
|
||||
|
||||
function main({ n, buf, type, byteLength }) {
|
||||
const buff = buf === 'fast' ?
|
||||
Buffer.alloc(8) :
|
||||
require('buffer').SlowBuffer(8);
|
||||
const fn = `read${type}`;
|
||||
|
||||
buff.writeDoubleLE(0, 0);
|
||||
bench.start();
|
||||
for (let i = 0; i !== n; i++) {
|
||||
buff[fn](0, byteLength);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
|
||||
const types = [
|
||||
'BigUInt64LE',
|
||||
'BigUInt64BE',
|
||||
'BigInt64LE',
|
||||
'BigInt64BE',
|
||||
'UInt8',
|
||||
'UInt16LE',
|
||||
'UInt16BE',
|
||||
'UInt32LE',
|
||||
'UInt32BE',
|
||||
'Int8',
|
||||
'Int16LE',
|
||||
'Int16BE',
|
||||
'Int32LE',
|
||||
'Int32BE',
|
||||
];
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
buffer: ['fast'],
|
||||
type: types,
|
||||
n: [1e6]
|
||||
});
|
||||
|
||||
function main({ n, buf, type }) {
|
||||
const buff = buf === 'fast' ?
|
||||
Buffer.alloc(8) :
|
||||
require('buffer').SlowBuffer(8);
|
||||
const fn = `read${type}`;
|
||||
|
||||
buff.writeDoubleLE(0, 0);
|
||||
bench.start();
|
||||
|
||||
for (let i = 0; i !== n; i++) {
|
||||
buff[fn](0);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
const SlowBuffer = require('buffer').SlowBuffer;
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
type: ['fast', 'slow'],
|
||||
n: [1e6]
|
||||
});
|
||||
|
||||
const buf = Buffer.allocUnsafe(1024);
|
||||
const slowBuf = new SlowBuffer(1024);
|
||||
|
||||
function main({ n, type }) {
|
||||
const b = type === 'fast' ? buf : slowBuf;
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
b.slice(10, 256);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
aligned: ['true', 'false'],
|
||||
method: ['swap16', 'swap32', 'swap64'/* , 'htons', 'htonl', 'htonll' */],
|
||||
len: [64, 256, 768, 1024, 2056, 8192],
|
||||
n: [1e6]
|
||||
}, {
|
||||
test: { len: 16 }
|
||||
});
|
||||
|
||||
// The htons and htonl methods below are used to benchmark the
|
||||
// performance difference between doing the byteswap in pure
|
||||
// javascript regardless of Buffer size as opposed to dropping
|
||||
// down to the native layer for larger Buffer sizes. Commented
|
||||
// out by default because they are slow for big buffers. If
|
||||
// re-evaluating the crossover point, uncomment those methods
|
||||
// and comment out their implementations in lib/buffer.js so
|
||||
// C++ version will always be used.
|
||||
|
||||
function swap(b, n, m) {
|
||||
const i = b[n];
|
||||
b[n] = b[m];
|
||||
b[m] = i;
|
||||
}
|
||||
|
||||
Buffer.prototype.htons = function htons() {
|
||||
if (this.length % 2 !== 0)
|
||||
throw new RangeError();
|
||||
for (let i = 0; i < this.length; i += 2) {
|
||||
swap(this, i, i + 1);
|
||||
}
|
||||
return this;
|
||||
};
|
||||
|
||||
Buffer.prototype.htonl = function htonl() {
|
||||
if (this.length % 4 !== 0)
|
||||
throw new RangeError();
|
||||
for (let i = 0; i < this.length; i += 4) {
|
||||
swap(this, i, i + 3);
|
||||
swap(this, i + 1, i + 2);
|
||||
}
|
||||
return this;
|
||||
};
|
||||
|
||||
Buffer.prototype.htonll = function htonll() {
|
||||
if (this.length % 8 !== 0)
|
||||
throw new RangeError();
|
||||
for (let i = 0; i < this.length; i += 8) {
|
||||
swap(this, i, i + 7);
|
||||
swap(this, i + 1, i + 6);
|
||||
swap(this, i + 2, i + 5);
|
||||
swap(this, i + 3, i + 4);
|
||||
}
|
||||
return this;
|
||||
};
|
||||
|
||||
function createBuffer(len, aligned) {
|
||||
len += aligned ? 0 : 1;
|
||||
const buf = Buffer.allocUnsafe(len);
|
||||
for (let i = 1; i <= len; i++)
|
||||
buf[i - 1] = i;
|
||||
return aligned ? buf : buf.slice(1);
|
||||
}
|
||||
|
||||
function genMethod(method) {
|
||||
const fnString = `
|
||||
return function ${method}(n, buf) {
|
||||
for (let i = 0; i <= n; i++)
|
||||
buf.${method}();
|
||||
}`;
|
||||
return (new Function(fnString))();
|
||||
}
|
||||
|
||||
function main({ method, len, n, aligned = 'true' }) {
|
||||
const buf = createBuffer(len, aligned === 'true');
|
||||
const bufferSwap = genMethod(method);
|
||||
|
||||
bufferSwap(n, buf);
|
||||
bench.start();
|
||||
bufferSwap(n, buf);
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
n: [1e4],
|
||||
len: [0, 256, 4 * 1024]
|
||||
});
|
||||
|
||||
function main({ n, len }) {
|
||||
const buf = Buffer.allocUnsafe(len);
|
||||
|
||||
bench.start();
|
||||
for (let i = 0; i < n; ++i)
|
||||
buf.toJSON();
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
encoding: ['utf8', 'ascii', 'latin1', 'hex', 'UCS-2'],
|
||||
args: [0, 1, 3],
|
||||
len: [1, 64, 1024],
|
||||
n: [1e6]
|
||||
});
|
||||
|
||||
function main({ encoding, args, len, n }) {
|
||||
const buf = Buffer.alloc(len, 42);
|
||||
|
||||
if (encoding.length === 0)
|
||||
encoding = undefined;
|
||||
|
||||
switch (args) {
|
||||
case 1:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i += 1)
|
||||
buf.toString(encoding);
|
||||
bench.end(n);
|
||||
break;
|
||||
case 2:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i += 1)
|
||||
buf.toString(encoding, 0);
|
||||
bench.end(n);
|
||||
break;
|
||||
case 3:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i += 1)
|
||||
buf.toString(encoding, 0, len);
|
||||
bench.end(n);
|
||||
break;
|
||||
default:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i += 1)
|
||||
buf.toString();
|
||||
bench.end(n);
|
||||
break;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const bench = common.createBenchmark(main, {
|
||||
encoding: [
|
||||
'', 'utf8', 'ascii', 'hex', 'utf16le', 'latin1',
|
||||
],
|
||||
args: [ '', 'offset', 'offset+length' ],
|
||||
len: [2048],
|
||||
n: [1e6]
|
||||
});
|
||||
|
||||
function main({ len, n, encoding, args }) {
|
||||
let string;
|
||||
let start = 0;
|
||||
const buf = Buffer.allocUnsafe(len);
|
||||
|
||||
switch (args) {
|
||||
case 'offset':
|
||||
string = 'a'.repeat(Math.floor(len / 2));
|
||||
start = len - string.length;
|
||||
if (encoding) {
|
||||
bench.start();
|
||||
for (let i = 0; i < n; ++i) {
|
||||
buf.write(string, start, encoding);
|
||||
}
|
||||
bench.end(n);
|
||||
} else {
|
||||
bench.start();
|
||||
for (let i = 0; i < n; ++i) {
|
||||
buf.write(string, start);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
break;
|
||||
case 'offset+length':
|
||||
string = 'a'.repeat(len);
|
||||
if (encoding) {
|
||||
bench.start();
|
||||
for (let i = 0; i < n; ++i) {
|
||||
buf.write(string, 0, buf.length, encoding);
|
||||
}
|
||||
bench.end(n);
|
||||
} else {
|
||||
bench.start();
|
||||
for (let i = 0; i < n; ++i) {
|
||||
buf.write(string, 0, buf.length);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
string = 'a'.repeat(len);
|
||||
if (encoding) {
|
||||
bench.start();
|
||||
for (let i = 0; i < n; ++i) {
|
||||
buf.write(string, encoding);
|
||||
}
|
||||
bench.end(n);
|
||||
} else {
|
||||
bench.start();
|
||||
for (let i = 0; i < n; ++i) {
|
||||
buf.write(string);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
|
||||
const types = [
|
||||
'BigUInt64LE',
|
||||
'BigUInt64BE',
|
||||
'BigInt64LE',
|
||||
'BigInt64BE',
|
||||
'UInt8',
|
||||
'UInt16LE',
|
||||
'UInt16BE',
|
||||
'UInt32LE',
|
||||
'UInt32BE',
|
||||
'UIntLE',
|
||||
'UIntBE',
|
||||
'Int8',
|
||||
'Int16LE',
|
||||
'Int16BE',
|
||||
'Int32LE',
|
||||
'Int32BE',
|
||||
'IntLE',
|
||||
'IntBE',
|
||||
'FloatLE',
|
||||
'FloatBE',
|
||||
'DoubleLE',
|
||||
'DoubleBE',
|
||||
];
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
buffer: ['fast'],
|
||||
type: types,
|
||||
n: [1e6]
|
||||
});
|
||||
|
||||
const INT8 = 0x7f;
|
||||
const INT16 = 0x7fff;
|
||||
const INT32 = 0x7fffffff;
|
||||
const INT48 = 0x7fffffffffff;
|
||||
const INT64 = 0x7fffffffffffffffn;
|
||||
const UINT8 = 0xff;
|
||||
const UINT16 = 0xffff;
|
||||
const UINT32 = 0xffffffff;
|
||||
const UINT64 = 0xffffffffffffffffn;
|
||||
|
||||
const mod = {
|
||||
writeBigInt64BE: INT64,
|
||||
writeBigInt64LE: INT64,
|
||||
writeBigUInt64BE: UINT64,
|
||||
writeBigUInt64LE: UINT64,
|
||||
writeInt8: INT8,
|
||||
writeInt16BE: INT16,
|
||||
writeInt16LE: INT16,
|
||||
writeInt32BE: INT32,
|
||||
writeInt32LE: INT32,
|
||||
writeUInt8: UINT8,
|
||||
writeUInt16BE: UINT16,
|
||||
writeUInt16LE: UINT16,
|
||||
writeUInt32BE: UINT32,
|
||||
writeUInt32LE: UINT32,
|
||||
writeUIntLE: INT8,
|
||||
writeUIntBE: INT16,
|
||||
writeIntLE: INT32,
|
||||
writeIntBE: INT48
|
||||
};
|
||||
|
||||
const byteLength = {
|
||||
writeUIntLE: 1,
|
||||
writeUIntBE: 2,
|
||||
writeIntLE: 4,
|
||||
writeIntBE: 6
|
||||
};
|
||||
|
||||
function main({ n, buf, type }) {
|
||||
const buff = buf === 'fast' ?
|
||||
Buffer.alloc(8) :
|
||||
require('buffer').SlowBuffer(8);
|
||||
const fn = `write${type}`;
|
||||
|
||||
if (!/\d/.test(fn))
|
||||
benchSpecialInt(buff, fn, n);
|
||||
else if (/BigU?Int/.test(fn))
|
||||
benchBigInt(buff, fn, BigInt(n));
|
||||
else if (/Int/.test(fn))
|
||||
benchInt(buff, fn, n);
|
||||
else
|
||||
benchFloat(buff, fn, n);
|
||||
}
|
||||
|
||||
function benchBigInt(buff, fn, n) {
|
||||
const m = mod[fn];
|
||||
bench.start();
|
||||
for (let i = 0n; i !== n; i++) {
|
||||
buff[fn](i & m, 0);
|
||||
}
|
||||
bench.end(Number(n));
|
||||
}
|
||||
|
||||
function benchInt(buff, fn, n) {
|
||||
const m = mod[fn];
|
||||
bench.start();
|
||||
for (let i = 0; i !== n; i++) {
|
||||
buff[fn](i & m, 0);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function benchSpecialInt(buff, fn, n) {
|
||||
const m = mod[fn];
|
||||
const byte = byteLength[fn];
|
||||
bench.start();
|
||||
for (let i = 0; i !== n; i++) {
|
||||
buff[fn](i & m, 0, byte);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function benchFloat(buff, fn, n) {
|
||||
bench.start();
|
||||
for (let i = 0; i !== n; i++) {
|
||||
buff[fn](i, 0);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
n: [1e6],
|
||||
type: ['buffer', 'string']
|
||||
});
|
||||
|
||||
const zeroBuffer = Buffer.alloc(0);
|
||||
const zeroString = '';
|
||||
|
||||
function main({ n, type }) {
|
||||
const data = type === 'buffer' ? zeroBuffer : zeroString;
|
||||
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) Buffer.from(data);
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
|
||||
const types = [
|
||||
'Uint8',
|
||||
'Uint16LE',
|
||||
'Uint16BE',
|
||||
'Uint32LE',
|
||||
'Uint32BE',
|
||||
'Int8',
|
||||
'Int16LE',
|
||||
'Int16BE',
|
||||
'Int32LE',
|
||||
'Int32BE',
|
||||
'Float32LE',
|
||||
'Float32BE',
|
||||
'Float64LE',
|
||||
'Float64BE',
|
||||
];
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
type: types,
|
||||
n: [1e6]
|
||||
});
|
||||
|
||||
const INT8 = 0x7f;
|
||||
const INT16 = 0x7fff;
|
||||
const INT32 = 0x7fffffff;
|
||||
const UINT8 = INT8 * 2;
|
||||
const UINT16 = INT16 * 2;
|
||||
const UINT32 = INT32 * 2;
|
||||
|
||||
const mod = {
|
||||
setInt8: INT8,
|
||||
setInt16: INT16,
|
||||
setInt32: INT32,
|
||||
setUint8: UINT8,
|
||||
setUint16: UINT16,
|
||||
setUint32: UINT32
|
||||
};
|
||||
|
||||
function main({ n, type }) {
|
||||
const ab = new ArrayBuffer(8);
|
||||
const dv = new DataView(ab, 0, 8);
|
||||
const le = /LE$/.test(type);
|
||||
const fn = `set${type.replace(/[LB]E$/, '')}`;
|
||||
|
||||
if (/int/i.test(fn))
|
||||
benchInt(dv, fn, n, le);
|
||||
else
|
||||
benchFloat(dv, fn, n, le);
|
||||
}
|
||||
|
||||
function benchInt(dv, fn, len, le) {
|
||||
const m = mod[fn];
|
||||
const method = dv[fn];
|
||||
bench.start();
|
||||
for (let i = 0; i < len; i++) {
|
||||
method.call(dv, 0, i % m, le);
|
||||
}
|
||||
bench.end(len);
|
||||
}
|
||||
|
||||
function benchFloat(dv, fn, len, le) {
|
||||
const method = dv[fn];
|
||||
bench.start();
|
||||
for (let i = 0; i < len; i++) {
|
||||
method.call(dv, 0, i * 0.1, le);
|
||||
}
|
||||
bench.end(len);
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
const { exec, execSync } = require('child_process');
|
||||
const isWindows = process.platform === 'win32';
|
||||
|
||||
const messagesLength = [64, 256, 1024, 4096];
|
||||
// Windows does not support command lines longer than 8191 characters
|
||||
if (!isWindows) messagesLength.push(32768);
|
||||
|
||||
const bench = common.createBenchmark(childProcessExecStdout, {
|
||||
len: messagesLength,
|
||||
dur: [5]
|
||||
});
|
||||
|
||||
function childProcessExecStdout({ dur, len }) {
|
||||
bench.start();
|
||||
|
||||
const maxDuration = dur * 1000;
|
||||
const cmd = `yes "${'.'.repeat(len)}"`;
|
||||
const child = exec(cmd, { 'stdio': ['ignore', 'pipe', 'ignore'] });
|
||||
|
||||
let bytes = 0;
|
||||
child.stdout.on('data', (msg) => {
|
||||
bytes += msg.length;
|
||||
});
|
||||
|
||||
setTimeout(() => {
|
||||
bench.end(bytes);
|
||||
if (isWindows) {
|
||||
// Sometimes there's a yes.exe process left hanging around on Windows.
|
||||
try {
|
||||
execSync(`taskkill /f /t /pid ${child.pid}`);
|
||||
} catch {
|
||||
// This is a best effort kill. stderr is piped to parent for tracing.
|
||||
}
|
||||
} else {
|
||||
child.kill();
|
||||
}
|
||||
}, maxDuration);
|
||||
}
|
|
@ -0,0 +1,142 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const cp = require('child_process');
|
||||
|
||||
const command = 'echo';
|
||||
const args = ['hello'];
|
||||
const options = {};
|
||||
const cb = () => {};
|
||||
|
||||
const configs = {
|
||||
n: [1e3],
|
||||
methodName: [
|
||||
'exec', 'execSync',
|
||||
'execFile', 'execFileSync',
|
||||
'spawn', 'spawnSync',
|
||||
],
|
||||
params: [1, 2, 3, 4],
|
||||
};
|
||||
|
||||
const bench = common.createBenchmark(main, configs);
|
||||
|
||||
function main({ n, methodName, params }) {
|
||||
const method = cp[methodName];
|
||||
|
||||
switch (methodName) {
|
||||
case 'exec':
|
||||
switch (params) {
|
||||
case 1:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) method(command).kill();
|
||||
bench.end(n);
|
||||
break;
|
||||
case 2:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) method(command, options).kill();
|
||||
bench.end(n);
|
||||
break;
|
||||
case 3:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) method(command, options, cb).kill();
|
||||
bench.end(n);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 'execSync':
|
||||
switch (params) {
|
||||
case 1:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) method(command);
|
||||
bench.end(n);
|
||||
break;
|
||||
case 2:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) method(command, options);
|
||||
bench.end(n);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 'execFile':
|
||||
switch (params) {
|
||||
case 1:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) method(command).kill();
|
||||
bench.end(n);
|
||||
break;
|
||||
case 2:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) method(command, args).kill();
|
||||
bench.end(n);
|
||||
break;
|
||||
case 3:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) method(command, args, options).kill();
|
||||
bench.end(n);
|
||||
break;
|
||||
case 4:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) method(command, args, options, cb).kill();
|
||||
bench.end(n);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 'execFileSync':
|
||||
switch (params) {
|
||||
case 1:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) method(command);
|
||||
bench.end(n);
|
||||
break;
|
||||
case 2:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) method(command, args);
|
||||
bench.end(n);
|
||||
break;
|
||||
case 3:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) method(command, args, options);
|
||||
bench.end(n);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 'spawn':
|
||||
switch (params) {
|
||||
case 1:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) method(command).kill();
|
||||
bench.end(n);
|
||||
break;
|
||||
case 2:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) method(command, args).kill();
|
||||
bench.end(n);
|
||||
break;
|
||||
case 3:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) method(command, args, options).kill();
|
||||
bench.end(n);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 'spawnSync':
|
||||
switch (params) {
|
||||
case 1:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) method(command);
|
||||
bench.end(n);
|
||||
break;
|
||||
case 2:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) method(command, args);
|
||||
bench.end(n);
|
||||
break;
|
||||
case 3:
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) method(command, args, options);
|
||||
bench.end(n);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
'use strict';
|
||||
if (process.argv[2] === 'child') {
|
||||
const len = +process.argv[3];
|
||||
const msg = '.'.repeat(len);
|
||||
const send = () => {
|
||||
while (process.send(msg));
|
||||
// Wait: backlog of unsent messages exceeds threshold
|
||||
setImmediate(send);
|
||||
};
|
||||
send();
|
||||
} else {
|
||||
const common = require('../common.js');
|
||||
const bench = common.createBenchmark(main, {
|
||||
len: [
|
||||
64, 256, 1024, 4096, 16384, 65536,
|
||||
65536 << 4, 65536 << 8,
|
||||
],
|
||||
dur: [5]
|
||||
});
|
||||
const spawn = require('child_process').spawn;
|
||||
|
||||
function main({ dur, len }) {
|
||||
bench.start();
|
||||
|
||||
const options = { 'stdio': ['ignore', 1, 2, 'ipc'] };
|
||||
const child = spawn(process.argv[0],
|
||||
[process.argv[1], 'child', len], options);
|
||||
|
||||
let bytes = 0;
|
||||
child.on('message', (msg) => { bytes += msg.length; });
|
||||
|
||||
setTimeout(() => {
|
||||
child.kill();
|
||||
bench.end(bytes);
|
||||
}, dur * 1000);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
|
||||
// This benchmark uses `yes` to a create noisy child_processes with varying
|
||||
// output message lengths, and tries to read 8GB of output
|
||||
|
||||
const os = require('os');
|
||||
const child_process = require('child_process');
|
||||
|
||||
const messagesLength = [64, 256, 1024, 4096];
|
||||
// Windows does not support that long arguments
|
||||
if (os.platform() !== 'win32')
|
||||
messagesLength.push(32768);
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
len: messagesLength,
|
||||
dur: [5]
|
||||
});
|
||||
|
||||
function main({ dur, len }) {
|
||||
bench.start();
|
||||
|
||||
const msg = `"${'.'.repeat(len)}"`;
|
||||
const options = { 'stdio': ['ignore', 'pipe', 'ignore'] };
|
||||
const child = child_process.spawn('yes', [msg], options);
|
||||
|
||||
let bytes = 0;
|
||||
child.stdout.on('data', (msg) => {
|
||||
bytes += msg.length;
|
||||
});
|
||||
|
||||
setTimeout(() => {
|
||||
if (process.platform === 'win32') {
|
||||
// Sometimes there's a yes.exe process left hanging around on Windows...
|
||||
child_process.execSync(`taskkill /f /t /pid ${child.pid}`);
|
||||
} else {
|
||||
child.kill();
|
||||
}
|
||||
const gbits = (bytes * 8) / (1024 * 1024 * 1024);
|
||||
bench.end(gbits);
|
||||
}, dur * 1000);
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
const bench = common.createBenchmark(main, {
|
||||
n: [1000]
|
||||
});
|
||||
|
||||
const spawn = require('child_process').spawn;
|
||||
function main({ n }) {
|
||||
bench.start();
|
||||
go(n, n);
|
||||
}
|
||||
|
||||
function go(n, left) {
|
||||
if (--left === 0)
|
||||
return bench.end(n);
|
||||
|
||||
const child = spawn('echo', ['hello']);
|
||||
child.on('exit', (code) => {
|
||||
if (code)
|
||||
process.exit(code);
|
||||
else
|
||||
go(n, left);
|
||||
});
|
||||
}
|
|
@ -0,0 +1,75 @@
|
|||
'use strict';
|
||||
|
||||
const cluster = require('cluster');
|
||||
if (cluster.isMaster) {
|
||||
const common = require('../common.js');
|
||||
const bench = common.createBenchmark(main, {
|
||||
workers: [1],
|
||||
payload: ['string', 'object'],
|
||||
sendsPerBroadcast: [1, 10],
|
||||
serialization: ['json', 'advanced'],
|
||||
n: [1e5]
|
||||
});
|
||||
|
||||
function main({
|
||||
n,
|
||||
workers,
|
||||
sendsPerBroadcast,
|
||||
payload,
|
||||
serialization
|
||||
}) {
|
||||
const expectedPerBroadcast = sendsPerBroadcast * workers;
|
||||
let readies = 0;
|
||||
let broadcasts = 0;
|
||||
let msgCount = 0;
|
||||
let data;
|
||||
|
||||
cluster.settings.serialization = serialization;
|
||||
|
||||
switch (payload) {
|
||||
case 'string':
|
||||
data = 'hello world!';
|
||||
break;
|
||||
case 'object':
|
||||
data = { action: 'pewpewpew', powerLevel: 9001 };
|
||||
break;
|
||||
default:
|
||||
throw new Error('Unsupported payload type');
|
||||
}
|
||||
|
||||
for (let i = 0; i < workers; ++i)
|
||||
cluster.fork().on('online', onOnline).on('message', onMessage);
|
||||
|
||||
function onOnline() {
|
||||
if (++readies === workers) {
|
||||
bench.start();
|
||||
broadcast();
|
||||
}
|
||||
}
|
||||
|
||||
function broadcast() {
|
||||
if (broadcasts++ === n) {
|
||||
bench.end(n);
|
||||
for (const id in cluster.workers)
|
||||
cluster.workers[id].disconnect();
|
||||
return;
|
||||
}
|
||||
for (const id in cluster.workers) {
|
||||
const worker = cluster.workers[id];
|
||||
for (let i = 0; i < sendsPerBroadcast; ++i)
|
||||
worker.send(data);
|
||||
}
|
||||
}
|
||||
|
||||
function onMessage() {
|
||||
if (++msgCount === expectedPerBroadcast) {
|
||||
msgCount = 0;
|
||||
broadcast();
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
process.on('message', (msg) => {
|
||||
process.send(msg);
|
||||
});
|
||||
}
|
|
@ -0,0 +1,395 @@
|
|||
'use strict';
|
||||
|
||||
const child_process = require('child_process');
|
||||
const http_benchmarkers = require('./_http-benchmarkers.js');
|
||||
|
||||
class Benchmark {
|
||||
// Used to make sure a benchmark only start a timer once
|
||||
#started = false;
|
||||
|
||||
// Indicate that the benchmark ended
|
||||
#ended = false;
|
||||
|
||||
// Holds process.hrtime value
|
||||
#time = [0, 0];
|
||||
|
||||
// Use the file name as the name of the benchmark
|
||||
name = require.main.filename.slice(__dirname.length + 1);
|
||||
|
||||
// Execution arguments i.e. flags used to run the jobs
|
||||
flags = process.env.NODE_BENCHMARK_FLAGS ?
|
||||
process.env.NODE_BENCHMARK_FLAGS.split(/\s+/) :
|
||||
[];
|
||||
|
||||
constructor(fn, configs, options = {}) {
|
||||
// Parse job-specific configuration from the command line arguments
|
||||
const argv = process.argv.slice(2);
|
||||
const parsed_args = this._parseArgs(argv, configs, options);
|
||||
this.options = parsed_args.cli;
|
||||
this.extra_options = parsed_args.extra;
|
||||
if (options.flags) {
|
||||
this.flags = this.flags.concat(options.flags);
|
||||
}
|
||||
|
||||
// The configuration list as a queue of jobs
|
||||
this.queue = this._queue(this.options);
|
||||
|
||||
// The configuration of the current job, head of the queue
|
||||
this.config = this.queue[0];
|
||||
|
||||
process.nextTick(() => {
|
||||
if (process.env.hasOwnProperty('NODE_RUN_BENCHMARK_FN')) {
|
||||
fn(this.config);
|
||||
} else {
|
||||
// _run will use fork() to create a new process for each configuration
|
||||
// combination.
|
||||
this._run();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
_parseArgs(argv, configs, options) {
|
||||
const cliOptions = {};
|
||||
|
||||
// Check for the test mode first.
|
||||
const testIndex = argv.indexOf('--test');
|
||||
if (testIndex !== -1) {
|
||||
for (const [key, rawValue] of Object.entries(configs)) {
|
||||
let value = Array.isArray(rawValue) ? rawValue[0] : rawValue;
|
||||
// Set numbers to one by default to reduce the runtime.
|
||||
if (typeof value === 'number') {
|
||||
if (key === 'dur' || key === 'duration') {
|
||||
value = 0.05;
|
||||
} else if (value > 1) {
|
||||
value = 1;
|
||||
}
|
||||
}
|
||||
cliOptions[key] = [value];
|
||||
}
|
||||
// Override specific test options.
|
||||
if (options.test) {
|
||||
for (const [key, value] of Object.entries(options.test)) {
|
||||
cliOptions[key] = Array.isArray(value) ? value : [value];
|
||||
}
|
||||
}
|
||||
argv.splice(testIndex, 1);
|
||||
} else {
|
||||
// Accept single values instead of arrays.
|
||||
for (const [key, value] of Object.entries(configs)) {
|
||||
if (!Array.isArray(value))
|
||||
configs[key] = [value];
|
||||
}
|
||||
}
|
||||
|
||||
const extraOptions = {};
|
||||
const validArgRE = /^(.+?)=([\s\S]*)$/;
|
||||
// Parse configuration arguments
|
||||
for (const arg of argv) {
|
||||
const match = arg.match(validArgRE);
|
||||
if (!match) {
|
||||
console.error(`bad argument: ${arg}`);
|
||||
process.exit(1);
|
||||
}
|
||||
const [, key, value] = match;
|
||||
if (Object.prototype.hasOwnProperty.call(configs, key)) {
|
||||
if (!cliOptions[key])
|
||||
cliOptions[key] = [];
|
||||
cliOptions[key].push(
|
||||
// Infer the type from the config object and parse accordingly
|
||||
typeof configs[key][0] === 'number' ? +value : value
|
||||
);
|
||||
} else {
|
||||
extraOptions[key] = value;
|
||||
}
|
||||
}
|
||||
return { cli: { ...configs, ...cliOptions }, extra: extraOptions };
|
||||
}
|
||||
|
||||
_queue(options) {
|
||||
const queue = [];
|
||||
const keys = Object.keys(options);
|
||||
|
||||
// Perform a depth-first walk through all options to generate a
|
||||
// configuration list that contains all combinations.
|
||||
function recursive(keyIndex, prevConfig) {
|
||||
const key = keys[keyIndex];
|
||||
const values = options[key];
|
||||
|
||||
for (const value of values) {
|
||||
if (typeof value !== 'number' && typeof value !== 'string') {
|
||||
throw new TypeError(
|
||||
`configuration "${key}" had type ${typeof value}`);
|
||||
}
|
||||
if (typeof value !== typeof values[0]) {
|
||||
// This is a requirement for being able to consistently and
|
||||
// predictably parse CLI provided configuration values.
|
||||
throw new TypeError(`configuration "${key}" has mixed types`);
|
||||
}
|
||||
|
||||
const currConfig = { [key]: value, ...prevConfig };
|
||||
|
||||
if (keyIndex + 1 < keys.length) {
|
||||
recursive(keyIndex + 1, currConfig);
|
||||
} else {
|
||||
queue.push(currConfig);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (keys.length > 0) {
|
||||
recursive(0, {});
|
||||
} else {
|
||||
queue.push({});
|
||||
}
|
||||
|
||||
return queue;
|
||||
}
|
||||
|
||||
http(options, cb) {
|
||||
const http_options = { ...options };
|
||||
http_options.benchmarker = http_options.benchmarker ||
|
||||
this.config.benchmarker ||
|
||||
this.extra_options.benchmarker ||
|
||||
http_benchmarkers.default_http_benchmarker;
|
||||
http_benchmarkers.run(
|
||||
http_options, (error, code, used_benchmarker, result, elapsed) => {
|
||||
if (cb) {
|
||||
cb(code);
|
||||
}
|
||||
if (error) {
|
||||
console.error(error);
|
||||
process.exit(code || 1);
|
||||
}
|
||||
this.config.benchmarker = used_benchmarker;
|
||||
this.report(result, elapsed);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
_run() {
|
||||
// If forked, report to the parent.
|
||||
if (process.send) {
|
||||
process.send({
|
||||
type: 'config',
|
||||
name: this.name,
|
||||
queueLength: this.queue.length,
|
||||
});
|
||||
}
|
||||
|
||||
const recursive = (queueIndex) => {
|
||||
const config = this.queue[queueIndex];
|
||||
|
||||
// Set NODE_RUN_BENCHMARK_FN to indicate that the child shouldn't
|
||||
// construct a configuration queue, but just execute the benchmark
|
||||
// function.
|
||||
const childEnv = { ...process.env };
|
||||
childEnv.NODE_RUN_BENCHMARK_FN = '';
|
||||
|
||||
// Create configuration arguments
|
||||
const childArgs = [];
|
||||
for (const [key, value] of Object.entries(config)) {
|
||||
childArgs.push(`${key}=${value}`);
|
||||
}
|
||||
for (const [key, value] of Object.entries(this.extra_options)) {
|
||||
childArgs.push(`${key}=${value}`);
|
||||
}
|
||||
|
||||
const child = child_process.fork(require.main.filename, childArgs, {
|
||||
env: childEnv,
|
||||
execArgv: this.flags.concat(process.execArgv),
|
||||
});
|
||||
child.on('message', sendResult);
|
||||
child.on('close', (code) => {
|
||||
if (code) {
|
||||
process.exit(code);
|
||||
}
|
||||
|
||||
if (queueIndex + 1 < this.queue.length) {
|
||||
recursive(queueIndex + 1);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
recursive(0);
|
||||
}
|
||||
|
||||
start() {
|
||||
if (this.#started) {
|
||||
throw new Error('Called start more than once in a single benchmark');
|
||||
}
|
||||
this.#started = true;
|
||||
this.#time = process.hrtime();
|
||||
}
|
||||
|
||||
end(operations) {
|
||||
// Get elapsed time now and do error checking later for accuracy.
|
||||
const elapsed = process.hrtime(this.#time);
|
||||
|
||||
if (!this.#started) {
|
||||
throw new Error('called end without start');
|
||||
}
|
||||
if (this.#ended) {
|
||||
throw new Error('called end multiple times');
|
||||
}
|
||||
if (typeof operations !== 'number') {
|
||||
throw new Error('called end() without specifying operation count');
|
||||
}
|
||||
if (!process.env.NODEJS_BENCHMARK_ZERO_ALLOWED && operations <= 0) {
|
||||
throw new Error('called end() with operation count <= 0');
|
||||
}
|
||||
if (elapsed[0] === 0 && elapsed[1] === 0) {
|
||||
if (!process.env.NODEJS_BENCHMARK_ZERO_ALLOWED)
|
||||
throw new Error('insufficient clock precision for short benchmark');
|
||||
// Avoid dividing by zero
|
||||
elapsed[1] = 1;
|
||||
}
|
||||
|
||||
this.#ended = true;
|
||||
const time = elapsed[0] + elapsed[1] / 1e9;
|
||||
const rate = operations / time;
|
||||
this.report(rate, elapsed);
|
||||
}
|
||||
|
||||
report(rate, elapsed) {
|
||||
sendResult({
|
||||
name: this.name,
|
||||
conf: this.config,
|
||||
rate,
|
||||
time: elapsed[0] + elapsed[1] / 1e9,
|
||||
type: 'report',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function formatResult(data) {
|
||||
// Construct configuration string, " A=a, B=b, ..."
|
||||
let conf = '';
|
||||
for (const key of Object.keys(data.conf)) {
|
||||
conf += ` ${key}=${JSON.stringify(data.conf[key])}`;
|
||||
}
|
||||
|
||||
let rate = data.rate.toString().split('.');
|
||||
rate[0] = rate[0].replace(/(\d)(?=(?:\d\d\d)+(?!\d))/g, '$1,');
|
||||
rate = (rate[1] ? rate.join('.') : rate[0]);
|
||||
return `${data.name}${conf}: ${rate}`;
|
||||
}
|
||||
|
||||
function sendResult(data) {
|
||||
if (process.send) {
|
||||
// If forked, report by process send
|
||||
process.send(data);
|
||||
} else {
|
||||
// Otherwise report by stdout
|
||||
console.log(formatResult(data));
|
||||
}
|
||||
}
|
||||
|
||||
const urls = {
|
||||
long: 'http://nodejs.org:89/docs/latest/api/foo/bar/qua/13949281/0f28b/' +
|
||||
'/5d49/b3020/url.html#test?payload1=true&payload2=false&test=1' +
|
||||
'&benchmark=3&foo=38.38.011.293&bar=1234834910480&test=19299&3992&' +
|
||||
'key=f5c65e1e98fe07e648249ad41e1cfdb0',
|
||||
short: 'https://nodejs.org/en/blog/',
|
||||
idn: 'http://你好你好.在线',
|
||||
auth: 'https://user:pass@example.com/path?search=1',
|
||||
file: 'file:///foo/bar/test/node.js',
|
||||
ws: 'ws://localhost:9229/f46db715-70df-43ad-a359-7f9949f39868',
|
||||
javascript: 'javascript:alert("node is awesome");',
|
||||
percent: 'https://%E4%BD%A0/foo',
|
||||
dot: 'https://example.org/./a/../b/./c',
|
||||
};
|
||||
|
||||
const searchParams = {
|
||||
noencode: 'foo=bar&baz=quux&xyzzy=thud',
|
||||
multicharsep: 'foo=bar&&&&&&&&&&baz=quux&&&&&&&&&&xyzzy=thud',
|
||||
encodefake: 'foo=%©ar&baz=%A©uux&xyzzy=%©ud',
|
||||
encodemany: '%66%6F%6F=bar&%62%61%7A=quux&xyzzy=%74h%75d',
|
||||
encodelast: 'foo=bar&baz=quux&xyzzy=thu%64',
|
||||
multivalue: 'foo=bar&foo=baz&foo=quux&quuy=quuz',
|
||||
multivaluemany: 'foo=bar&foo=baz&foo=quux&quuy=quuz&foo=abc&foo=def&' +
|
||||
'foo=ghi&foo=jkl&foo=mno&foo=pqr&foo=stu&foo=vwxyz',
|
||||
manypairs: 'a&b&c&d&e&f&g&h&i&j&k&l&m&n&o&p&q&r&s&t&u&v&w&x&y&z',
|
||||
manyblankpairs: '&&&&&&&&&&&&&&&&&&&&&&&&',
|
||||
altspaces: 'foo+bar=baz+quux&xyzzy+thud=quuy+quuz&abc=def+ghi',
|
||||
};
|
||||
|
||||
function getUrlData(withBase) {
|
||||
const data = require('../test/fixtures/wpt/url/resources/urltestdata.json');
|
||||
const result = [];
|
||||
for (const item of data) {
|
||||
if (item.failure || !item.input) continue;
|
||||
if (withBase) {
|
||||
result.push([item.input, item.base]);
|
||||
} else if (item.base !== 'about:blank') {
|
||||
result.push(item.base);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate an array of data for URL benchmarks to use.
|
||||
* The size of the resulting data set is the original data size * 2 ** `e`.
|
||||
* The 'wpt' type contains about 400 data points when `withBase` is true,
|
||||
* and 200 data points when `withBase` is false.
|
||||
* Other types contain 200 data points with or without base.
|
||||
*
|
||||
* @param {string} type Type of the data, 'wpt' or a key of `urls`
|
||||
* @param {number} e The repetition of the data, as exponent of 2
|
||||
* @param {boolean} withBase Whether to include a base URL
|
||||
* @param {boolean} asUrl Whether to return the results as URL objects
|
||||
* @return {string[] | string[][] | URL[]}
|
||||
*/
|
||||
function bakeUrlData(type, e = 0, withBase = false, asUrl = false) {
|
||||
let result = [];
|
||||
if (type === 'wpt') {
|
||||
result = getUrlData(withBase);
|
||||
} else if (urls[type]) {
|
||||
const input = urls[type];
|
||||
const item = withBase ? [input, 'about:blank'] : input;
|
||||
// Roughly the size of WPT URL test data
|
||||
result = new Array(200).fill(item);
|
||||
} else {
|
||||
throw new Error(`Unknown url data type ${type}`);
|
||||
}
|
||||
|
||||
if (typeof e !== 'number') {
|
||||
throw new Error(`e must be a number, received ${e}`);
|
||||
}
|
||||
|
||||
for (let i = 0; i < e; ++i) {
|
||||
result = result.concat(result);
|
||||
}
|
||||
|
||||
if (asUrl) {
|
||||
if (withBase) {
|
||||
result = result.map(([input, base]) => new URL(input, base));
|
||||
} else {
|
||||
result = result.map((input) => new URL(input));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
Benchmark,
|
||||
PORT: http_benchmarkers.PORT,
|
||||
bakeUrlData,
|
||||
binding(bindingName) {
|
||||
try {
|
||||
const { internalBinding } = require('internal/test/binding');
|
||||
|
||||
return internalBinding(bindingName);
|
||||
} catch {
|
||||
return process.binding(bindingName);
|
||||
}
|
||||
},
|
||||
buildType: process.features.debug ? 'Debug' : 'Release',
|
||||
createBenchmark(fn, configs, options) {
|
||||
return new Benchmark(fn, configs, options);
|
||||
},
|
||||
sendResult,
|
||||
searchParams,
|
||||
urlDataTypes: Object.keys(urls).concat(['wpt']),
|
||||
urls,
|
||||
};
|
|
@ -0,0 +1,120 @@
|
|||
#!/usr/bin/env Rscript
|
||||
library(ggplot2);
|
||||
library(plyr);
|
||||
|
||||
# get __dirname and load ./_cli.R
|
||||
args = commandArgs(trailingOnly = F);
|
||||
dirname = dirname(sub("--file=", "", args[grep("--file", args)]));
|
||||
source(paste0(dirname, '/_cli.R'), chdir=T);
|
||||
|
||||
if (!is.null(args.options$help) ||
|
||||
(!is.null(args.options$plot) && args.options$plot == TRUE)) {
|
||||
stop("usage: cat file.csv | Rscript compare.R
|
||||
--help show this message
|
||||
--plot filename save plot to filename");
|
||||
}
|
||||
|
||||
plot.filename = args.options$plot;
|
||||
|
||||
dat = read.csv(
|
||||
file('stdin'),
|
||||
colClasses=c('character', 'character', 'character', 'numeric', 'numeric')
|
||||
);
|
||||
dat = data.frame(dat);
|
||||
|
||||
dat$nameTwoLines = paste0(dat$filename, '\n', dat$configuration);
|
||||
dat$name = paste0(dat$filename, dat$configuration);
|
||||
|
||||
# Create a box plot
|
||||
if (!is.null(plot.filename)) {
|
||||
p = ggplot(data=dat);
|
||||
p = p + geom_boxplot(aes(x=nameTwoLines, y=rate, fill=binary));
|
||||
p = p + ylab("rate of operations (higher is better)");
|
||||
p = p + xlab("benchmark");
|
||||
p = p + theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5));
|
||||
ggsave(plot.filename, p);
|
||||
}
|
||||
|
||||
# computes the shared standard error, as used in the welch t-test
|
||||
welch.sd = function (old.rate, new.rate) {
|
||||
old.se.squared = var(old.rate) / length(old.rate)
|
||||
new.se.squared = var(new.rate) / length(new.rate)
|
||||
return(sqrt(old.se.squared + new.se.squared))
|
||||
}
|
||||
|
||||
# calculate the improvement confidence interval. The improvement is calculated
|
||||
# by dividing by old.mu and not new.mu, because old.mu is what the mean
|
||||
# improvement is calculated relative to.
|
||||
confidence.interval = function (shared.se, old.mu, w, risk) {
|
||||
interval = qt(1 - (risk / 2), w$parameter) * shared.se;
|
||||
return(sprintf("±%.2f%%", (interval / old.mu) * 100))
|
||||
}
|
||||
|
||||
# Print a table with results
|
||||
statistics = ddply(dat, "name", function(subdat) {
|
||||
old.rate = subset(subdat, binary == "old")$rate;
|
||||
new.rate = subset(subdat, binary == "new")$rate;
|
||||
|
||||
# Calculate improvement for the "new" binary compared with the "old" binary
|
||||
old.mu = mean(old.rate);
|
||||
new.mu = mean(new.rate);
|
||||
improvement = sprintf("%.2f %%", ((new.mu - old.mu) / old.mu * 100));
|
||||
|
||||
r = list(
|
||||
confidence = "NA",
|
||||
improvement = improvement,
|
||||
"accuracy (*)" = "NA",
|
||||
"(**)" = "NA",
|
||||
"(***)" = "NA"
|
||||
);
|
||||
|
||||
# Check if there is enough data to calculate the calculate the p-value
|
||||
if (length(old.rate) > 1 && length(new.rate) > 1) {
|
||||
# Perform a statistics test to see of there actually is a difference in
|
||||
# performance.
|
||||
w = t.test(rate ~ binary, data=subdat);
|
||||
shared.se = welch.sd(old.rate, new.rate)
|
||||
|
||||
# Add user friendly stars to the table. There should be at least one star
|
||||
# before you can say that there is an improvement.
|
||||
confidence = '';
|
||||
if (w$p.value < 0.001) {
|
||||
confidence = '***';
|
||||
} else if (w$p.value < 0.01) {
|
||||
confidence = '**';
|
||||
} else if (w$p.value < 0.05) {
|
||||
confidence = '*';
|
||||
}
|
||||
|
||||
r = list(
|
||||
confidence = confidence,
|
||||
improvement = improvement,
|
||||
"accuracy (*)" = confidence.interval(shared.se, old.mu, w, 0.05),
|
||||
"(**)" = confidence.interval(shared.se, old.mu, w, 0.01),
|
||||
"(***)" = confidence.interval(shared.se, old.mu, w, 0.001)
|
||||
);
|
||||
}
|
||||
|
||||
return(data.frame(r, check.names=FALSE));
|
||||
});
|
||||
|
||||
|
||||
# Set the benchmark names as the row.names to left align them in the print
|
||||
row.names(statistics) = statistics$name;
|
||||
statistics$name = NULL;
|
||||
|
||||
options(width = 200);
|
||||
print(statistics);
|
||||
cat("\n")
|
||||
cat(sprintf(
|
||||
"Be aware that when doing many comparisons the risk of a false-positive
|
||||
result increases. In this case there are %d comparisons, you can thus
|
||||
expect the following amount of false-positive results:
|
||||
%.2f false positives, when considering a 5%% risk acceptance (*, **, ***),
|
||||
%.2f false positives, when considering a 1%% risk acceptance (**, ***),
|
||||
%.2f false positives, when considering a 0.1%% risk acceptance (***)
|
||||
",
|
||||
nrow(statistics),
|
||||
nrow(statistics) * 0.05,
|
||||
nrow(statistics) * 0.01,
|
||||
nrow(statistics) * 0.001))
|
|
@ -0,0 +1,113 @@
|
|||
'use strict';
|
||||
|
||||
const { fork } = require('child_process');
|
||||
const { inspect } = require('util');
|
||||
const path = require('path');
|
||||
const CLI = require('./_cli.js');
|
||||
const BenchmarkProgress = require('./_benchmark_progress.js');
|
||||
|
||||
//
|
||||
// Parse arguments
|
||||
//
|
||||
const cli = new CLI(`usage: ./node compare.js [options] [--] <category> ...
|
||||
Run each benchmark in the <category> directory many times using two different
|
||||
node versions. More than one <category> directory can be specified.
|
||||
The output is formatted as csv, which can be processed using for
|
||||
example 'compare.R'.
|
||||
|
||||
--new ./new-node-binary new node binary (required)
|
||||
--old ./old-node-binary old node binary (required)
|
||||
--runs 30 number of samples
|
||||
--filter pattern includes only benchmark scripts matching
|
||||
<pattern> (can be repeated)
|
||||
--exclude pattern excludes scripts matching <pattern> (can be
|
||||
repeated)
|
||||
--set variable=value set benchmark variable (can be repeated)
|
||||
--no-progress don't show benchmark progress indicator
|
||||
`, { arrayArgs: ['set', 'filter', 'exclude'], boolArgs: ['no-progress'] });
|
||||
|
||||
if (!cli.optional.new || !cli.optional.old) {
|
||||
cli.abort(cli.usage);
|
||||
}
|
||||
|
||||
const binaries = ['old', 'new'];
|
||||
const runs = cli.optional.runs ? parseInt(cli.optional.runs, 10) : 30;
|
||||
const benchmarks = cli.benchmarks();
|
||||
|
||||
if (benchmarks.length === 0) {
|
||||
console.error('No benchmarks found');
|
||||
process.exitCode = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
// Create queue from the benchmarks list such both node versions are tested
|
||||
// `runs` amount of times each.
|
||||
// Note: BenchmarkProgress relies on this order to estimate
|
||||
// how much runs remaining for a file. All benchmarks generated from
|
||||
// the same file must be run consecutively.
|
||||
const queue = [];
|
||||
for (const filename of benchmarks) {
|
||||
for (let iter = 0; iter < runs; iter++) {
|
||||
for (const binary of binaries) {
|
||||
queue.push({ binary, filename, iter });
|
||||
}
|
||||
}
|
||||
}
|
||||
// queue.length = binary.length * runs * benchmarks.length
|
||||
|
||||
// Print csv header
|
||||
console.log('"binary", "filename", "configuration", "rate", "time"');
|
||||
|
||||
const kStartOfQueue = 0;
|
||||
|
||||
const showProgress = !cli.optional['no-progress'];
|
||||
let progress;
|
||||
if (showProgress) {
|
||||
progress = new BenchmarkProgress(queue, benchmarks);
|
||||
progress.startQueue(kStartOfQueue);
|
||||
}
|
||||
|
||||
(function recursive(i) {
|
||||
const job = queue[i];
|
||||
|
||||
const child = fork(path.resolve(__dirname, job.filename), cli.optional.set, {
|
||||
execPath: cli.optional[job.binary]
|
||||
});
|
||||
|
||||
child.on('message', (data) => {
|
||||
if (data.type === 'report') {
|
||||
// Construct configuration string, " A=a, B=b, ..."
|
||||
let conf = '';
|
||||
for (const key of Object.keys(data.conf)) {
|
||||
conf += ` ${key}=${inspect(data.conf[key])}`;
|
||||
}
|
||||
conf = conf.slice(1);
|
||||
// Escape quotes (") for correct csv formatting
|
||||
conf = conf.replace(/"/g, '""');
|
||||
|
||||
console.log(`"${job.binary}", "${job.filename}", "${conf}", ` +
|
||||
`${data.rate}, ${data.time}`);
|
||||
if (showProgress) {
|
||||
// One item in the subqueue has been completed.
|
||||
progress.completeConfig(data);
|
||||
}
|
||||
} else if (showProgress && data.type === 'config') {
|
||||
// The child has computed the configurations, ready to run subqueue.
|
||||
progress.startSubqueue(data, i);
|
||||
}
|
||||
});
|
||||
|
||||
child.once('close', (code) => {
|
||||
if (code) {
|
||||
process.exit(code);
|
||||
}
|
||||
if (showProgress) {
|
||||
progress.completeRun(job);
|
||||
}
|
||||
|
||||
// If there are more benchmarks execute the next
|
||||
if (i + 1 < queue.length) {
|
||||
recursive(i + 1);
|
||||
}
|
||||
});
|
||||
})(kStartOfQueue);
|
|
@ -0,0 +1,39 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
const crypto = require('crypto');
|
||||
const keylen = { 'aes-128-gcm': 16, 'aes-192-gcm': 24, 'aes-256-gcm': 32 };
|
||||
const bench = common.createBenchmark(main, {
|
||||
n: [500],
|
||||
cipher: ['aes-128-gcm', 'aes-192-gcm', 'aes-256-gcm'],
|
||||
len: [1024, 4 * 1024, 16 * 1024, 64 * 1024, 256 * 1024, 1024 * 1024]
|
||||
});
|
||||
|
||||
function main({ n, len, cipher }) {
|
||||
const message = Buffer.alloc(len, 'b');
|
||||
const key = crypto.randomBytes(keylen[cipher]);
|
||||
const iv = crypto.randomBytes(12);
|
||||
const associate_data = Buffer.alloc(16, 'z');
|
||||
bench.start();
|
||||
AEAD_Bench(cipher, message, associate_data, key, iv, n, len);
|
||||
}
|
||||
|
||||
function AEAD_Bench(cipher, message, associate_data, key, iv, n, len) {
|
||||
const written = n * len;
|
||||
const bits = written * 8;
|
||||
const mbits = bits / (1024 * 1024);
|
||||
|
||||
for (let i = 0; i < n; i++) {
|
||||
const alice = crypto.createCipheriv(cipher, key, iv);
|
||||
alice.setAAD(associate_data);
|
||||
const enc = alice.update(message);
|
||||
alice.final();
|
||||
const tag = alice.getAuthTag();
|
||||
const bob = crypto.createDecipheriv(cipher, key, iv);
|
||||
bob.setAuthTag(tag);
|
||||
bob.setAAD(associate_data);
|
||||
bob.update(enc);
|
||||
bob.final();
|
||||
}
|
||||
|
||||
bench.end(mbits);
|
||||
}
|
|
@ -0,0 +1,101 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
writes: [500],
|
||||
cipher: ['AES192', 'AES256'],
|
||||
type: ['asc', 'utf', 'buf'],
|
||||
len: [2, 1024, 102400, 1024 * 1024],
|
||||
api: ['legacy', 'stream']
|
||||
}, {
|
||||
flags: ['--no-warnings']
|
||||
});
|
||||
|
||||
function main({ api, cipher, type, len, writes }) {
|
||||
if (api === 'stream' && /^v0\.[0-8]\./.test(process.version)) {
|
||||
console.error('Crypto streams not available until v0.10');
|
||||
// Use the legacy, just so that we can compare them.
|
||||
api = 'legacy';
|
||||
}
|
||||
|
||||
const crypto = require('crypto');
|
||||
const assert = require('assert');
|
||||
const alice = crypto.getDiffieHellman('modp5');
|
||||
const bob = crypto.getDiffieHellman('modp5');
|
||||
|
||||
alice.generateKeys();
|
||||
bob.generateKeys();
|
||||
|
||||
const pubEnc = /^v0\.[0-8]/.test(process.version) ? 'binary' : null;
|
||||
const alice_secret = alice.computeSecret(bob.getPublicKey(), pubEnc, 'hex');
|
||||
const bob_secret = bob.computeSecret(alice.getPublicKey(), pubEnc, 'hex');
|
||||
|
||||
// alice_secret and bob_secret should be the same
|
||||
assert(alice_secret === bob_secret);
|
||||
|
||||
const alice_cipher = crypto.createCipher(cipher, alice_secret);
|
||||
const bob_cipher = crypto.createDecipher(cipher, bob_secret);
|
||||
|
||||
let message;
|
||||
let encoding;
|
||||
switch (type) {
|
||||
case 'asc':
|
||||
message = 'a'.repeat(len);
|
||||
encoding = 'ascii';
|
||||
break;
|
||||
case 'utf':
|
||||
message = 'ü'.repeat(len / 2);
|
||||
encoding = 'utf8';
|
||||
break;
|
||||
case 'buf':
|
||||
message = Buffer.alloc(len, 'b');
|
||||
break;
|
||||
default:
|
||||
throw new Error(`unknown message type: ${type}`);
|
||||
}
|
||||
|
||||
const fn = api === 'stream' ? streamWrite : legacyWrite;
|
||||
|
||||
// Write data as fast as possible to alice, and have bob decrypt.
|
||||
// use old API for comparison to v0.8
|
||||
bench.start();
|
||||
fn(alice_cipher, bob_cipher, message, encoding, writes);
|
||||
}
|
||||
|
||||
function streamWrite(alice, bob, message, encoding, writes) {
|
||||
let written = 0;
|
||||
bob.on('data', (c) => {
|
||||
written += c.length;
|
||||
});
|
||||
|
||||
bob.on('end', () => {
|
||||
// Gbits
|
||||
const bits = written * 8;
|
||||
const gbits = bits / (1024 * 1024 * 1024);
|
||||
bench.end(gbits);
|
||||
});
|
||||
|
||||
alice.pipe(bob);
|
||||
|
||||
while (writes-- > 0)
|
||||
alice.write(message, encoding);
|
||||
|
||||
alice.end();
|
||||
}
|
||||
|
||||
function legacyWrite(alice, bob, message, encoding, writes) {
|
||||
let written = 0;
|
||||
let enc, dec;
|
||||
for (let i = 0; i < writes; i++) {
|
||||
enc = alice.update(message, encoding);
|
||||
dec = bob.update(enc);
|
||||
written += dec.length;
|
||||
}
|
||||
enc = alice.final();
|
||||
dec = bob.update(enc);
|
||||
written += dec.length;
|
||||
dec = bob.final();
|
||||
written += dec.length;
|
||||
const gbits = written / (1024 * 1024 * 1024);
|
||||
bench.end(gbits);
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
n: [1, 5000],
|
||||
v: ['crypto', 'tls']
|
||||
});
|
||||
|
||||
function main({ n, v }) {
|
||||
const method = require(v).getCiphers;
|
||||
let i = 0;
|
||||
// First call to getCiphers will dominate the results
|
||||
if (n > 1) {
|
||||
for (; i < n; i++)
|
||||
method();
|
||||
}
|
||||
bench.start();
|
||||
for (i = 0; i < n; i++) method();
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
// Throughput benchmark
|
||||
// creates a single hasher, then pushes a bunch of data through it
|
||||
'use strict';
|
||||
const common = require('../common.js');
|
||||
const crypto = require('crypto');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
writes: [500],
|
||||
algo: [ 'sha256', 'md5' ],
|
||||
type: ['asc', 'utf', 'buf'],
|
||||
out: ['hex', 'binary', 'buffer'],
|
||||
len: [2, 1024, 102400, 1024 * 1024],
|
||||
api: ['legacy', 'stream']
|
||||
});
|
||||
|
||||
function main({ api, type, len, out, writes, algo }) {
|
||||
if (api === 'stream' && /^v0\.[0-8]\./.test(process.version)) {
|
||||
console.error('Crypto streams not available until v0.10');
|
||||
// Use the legacy, just so that we can compare them.
|
||||
api = 'legacy';
|
||||
}
|
||||
|
||||
let message;
|
||||
let encoding;
|
||||
switch (type) {
|
||||
case 'asc':
|
||||
message = 'a'.repeat(len);
|
||||
encoding = 'ascii';
|
||||
break;
|
||||
case 'utf':
|
||||
message = 'ü'.repeat(len / 2);
|
||||
encoding = 'utf8';
|
||||
break;
|
||||
case 'buf':
|
||||
message = Buffer.alloc(len, 'b');
|
||||
break;
|
||||
default:
|
||||
throw new Error(`unknown message type: ${type}`);
|
||||
}
|
||||
|
||||
const fn = api === 'stream' ? streamWrite : legacyWrite;
|
||||
|
||||
bench.start();
|
||||
fn(algo, message, encoding, writes, len, out);
|
||||
}
|
||||
|
||||
function legacyWrite(algo, message, encoding, writes, len, outEnc) {
|
||||
const written = writes * len;
|
||||
const bits = written * 8;
|
||||
const gbits = bits / (1024 * 1024 * 1024);
|
||||
|
||||
while (writes-- > 0) {
|
||||
const h = crypto.createHash(algo);
|
||||
h.update(message, encoding);
|
||||
let res = h.digest(outEnc);
|
||||
|
||||
// Include buffer creation costs for older versions
|
||||
if (outEnc === 'buffer' && typeof res === 'string')
|
||||
res = Buffer.from(res, 'binary');
|
||||
}
|
||||
|
||||
bench.end(gbits);
|
||||
}
|
||||
|
||||
function streamWrite(algo, message, encoding, writes, len, outEnc) {
|
||||
const written = writes * len;
|
||||
const bits = written * 8;
|
||||
const gbits = bits / (1024 * 1024 * 1024);
|
||||
|
||||
while (writes-- > 0) {
|
||||
const h = crypto.createHash(algo);
|
||||
|
||||
if (outEnc !== 'buffer')
|
||||
h.setEncoding(outEnc);
|
||||
|
||||
h.write(message, encoding);
|
||||
h.end();
|
||||
h.read();
|
||||
}
|
||||
|
||||
bench.end(gbits);
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
// Throughput benchmark
|
||||
// creates a single hasher, then pushes a bunch of data through it
|
||||
'use strict';
|
||||
const common = require('../common.js');
|
||||
const crypto = require('crypto');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
writes: [500],
|
||||
algo: ['sha1', 'sha256', 'sha512'],
|
||||
type: ['asc', 'utf', 'buf'],
|
||||
len: [2, 1024, 102400, 1024 * 1024],
|
||||
api: ['legacy', 'stream']
|
||||
});
|
||||
|
||||
function main({ api, type, len, algo, writes }) {
|
||||
if (api === 'stream' && /^v0\.[0-8]\./.test(process.version)) {
|
||||
console.error('Crypto streams not available until v0.10');
|
||||
// Use the legacy, just so that we can compare them.
|
||||
api = 'legacy';
|
||||
}
|
||||
|
||||
let message;
|
||||
let encoding;
|
||||
switch (type) {
|
||||
case 'asc':
|
||||
message = 'a'.repeat(len);
|
||||
encoding = 'ascii';
|
||||
break;
|
||||
case 'utf':
|
||||
message = 'ü'.repeat(len / 2);
|
||||
encoding = 'utf8';
|
||||
break;
|
||||
case 'buf':
|
||||
message = Buffer.alloc(len, 'b');
|
||||
break;
|
||||
default:
|
||||
throw new Error(`unknown message type: ${type}`);
|
||||
}
|
||||
|
||||
const fn = api === 'stream' ? streamWrite : legacyWrite;
|
||||
|
||||
bench.start();
|
||||
fn(algo, message, encoding, writes, len);
|
||||
}
|
||||
|
||||
function legacyWrite(algo, message, encoding, writes, len) {
|
||||
const written = writes * len;
|
||||
const bits = written * 8;
|
||||
const gbits = bits / (1024 * 1024 * 1024);
|
||||
const h = crypto.createHash(algo);
|
||||
|
||||
while (writes-- > 0)
|
||||
h.update(message, encoding);
|
||||
|
||||
h.digest();
|
||||
|
||||
bench.end(gbits);
|
||||
}
|
||||
|
||||
function streamWrite(algo, message, encoding, writes, len) {
|
||||
const written = writes * len;
|
||||
const bits = written * 8;
|
||||
const gbits = bits / (1024 * 1024 * 1024);
|
||||
const h = crypto.createHash(algo);
|
||||
|
||||
while (writes-- > 0)
|
||||
h.write(message, encoding);
|
||||
|
||||
h.end();
|
||||
h.read();
|
||||
|
||||
bench.end(gbits);
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const { randomBytes } = require('crypto');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
size: [64, 1024, 8192, 512 * 1024],
|
||||
n: [1e3],
|
||||
});
|
||||
|
||||
function main({ n, size }) {
|
||||
bench.start();
|
||||
for (let i = 0; i < n; ++i)
|
||||
randomBytes(size);
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
'use strict';
|
||||
// Throughput benchmark in signing and verifying
|
||||
const common = require('../common.js');
|
||||
const crypto = require('crypto');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const fixtures_keydir = path.resolve(__dirname, '../../test/fixtures/keys/');
|
||||
const keylen_list = ['1024', '2048', '4096'];
|
||||
const RSA_PublicPem = {};
|
||||
const RSA_PrivatePem = {};
|
||||
|
||||
keylen_list.forEach((key) => {
|
||||
RSA_PublicPem[key] =
|
||||
fs.readFileSync(`${fixtures_keydir}/rsa_public_${key}.pem`);
|
||||
RSA_PrivatePem[key] =
|
||||
fs.readFileSync(`${fixtures_keydir}/rsa_private_${key}.pem`);
|
||||
});
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
n: [500],
|
||||
keylen: keylen_list,
|
||||
len: [16, 32, 64]
|
||||
});
|
||||
|
||||
function main({ len, algo, keylen, n }) {
|
||||
const message = Buffer.alloc(len, 'b');
|
||||
bench.start();
|
||||
StreamWrite(algo, keylen, message, n, len);
|
||||
}
|
||||
|
||||
function StreamWrite(algo, keylen, message, n, len) {
|
||||
const written = n * len;
|
||||
const bits = written * 8;
|
||||
const kbits = bits / (1024);
|
||||
|
||||
const privateKey = RSA_PrivatePem[keylen];
|
||||
const publicKey = RSA_PublicPem[keylen];
|
||||
for (let i = 0; i < n; i++) {
|
||||
const enc = crypto.privateEncrypt(privateKey, message);
|
||||
crypto.publicDecrypt(publicKey, enc);
|
||||
}
|
||||
|
||||
bench.end(kbits);
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
'use strict';
|
||||
// Throughput benchmark in signing and verifying
|
||||
const common = require('../common.js');
|
||||
const crypto = require('crypto');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const fixtures_keydir = path.resolve(__dirname, '../../test/fixtures/keys/');
|
||||
const keylen_list = ['1024', '2048'];
|
||||
const RSA_PublicPem = {};
|
||||
const RSA_PrivatePem = {};
|
||||
|
||||
keylen_list.forEach((key) => {
|
||||
RSA_PublicPem[key] =
|
||||
fs.readFileSync(`${fixtures_keydir}/rsa_public_${key}.pem`);
|
||||
RSA_PrivatePem[key] =
|
||||
fs.readFileSync(`${fixtures_keydir}/rsa_private_${key}.pem`);
|
||||
});
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
writes: [500],
|
||||
algo: ['SHA1', 'SHA224', 'SHA256', 'SHA384', 'SHA512'],
|
||||
keylen: keylen_list,
|
||||
len: [1024, 102400, 2 * 102400, 3 * 102400, 1024 * 1024]
|
||||
});
|
||||
|
||||
function main({ len, algo, keylen, writes }) {
|
||||
const message = Buffer.alloc(len, 'b');
|
||||
bench.start();
|
||||
StreamWrite(algo, keylen, message, writes, len);
|
||||
}
|
||||
|
||||
function StreamWrite(algo, keylen, message, writes, len) {
|
||||
const written = writes * len;
|
||||
const bits = written * 8;
|
||||
const kbits = bits / (1024);
|
||||
|
||||
const privateKey = RSA_PrivatePem[keylen];
|
||||
const s = crypto.createSign(algo);
|
||||
const v = crypto.createVerify(algo);
|
||||
|
||||
while (writes-- > 0) {
|
||||
s.update(message);
|
||||
v.update(message);
|
||||
}
|
||||
|
||||
s.sign(privateKey, 'binary');
|
||||
s.end();
|
||||
v.end();
|
||||
|
||||
bench.end(kbits);
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
// Test UDP send throughput with the multi buffer API against Buffer.concat
|
||||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const dgram = require('dgram');
|
||||
const PORT = common.PORT;
|
||||
|
||||
// `num` is the number of send requests to queue up each time.
|
||||
// Keep it reasonably high (>10) otherwise you're benchmarking the speed of
|
||||
// event loop cycles more than anything else.
|
||||
const bench = common.createBenchmark(main, {
|
||||
len: [64, 256, 512, 1024],
|
||||
num: [100],
|
||||
chunks: [1, 2, 4, 8],
|
||||
type: ['concat', 'multi'],
|
||||
dur: [5]
|
||||
});
|
||||
|
||||
function main({ dur, len, num, type, chunks }) {
|
||||
const chunk = [];
|
||||
for (let i = 0; i < chunks; i++) {
|
||||
chunk.push(Buffer.allocUnsafe(Math.round(len / chunks)));
|
||||
}
|
||||
|
||||
// Server
|
||||
let sent = 0;
|
||||
const socket = dgram.createSocket('udp4');
|
||||
const onsend = type === 'concat' ? onsendConcat : onsendMulti;
|
||||
|
||||
function onsendConcat() {
|
||||
if (sent++ % num === 0) {
|
||||
// The setImmediate() is necessary to have event loop progress on OSes
|
||||
// that only perform synchronous I/O on nonblocking UDP sockets.
|
||||
setImmediate(() => {
|
||||
for (let i = 0; i < num; i++) {
|
||||
socket.send(Buffer.concat(chunk), PORT, '127.0.0.1', onsend);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function onsendMulti() {
|
||||
if (sent++ % num === 0) {
|
||||
// The setImmediate() is necessary to have event loop progress on OSes
|
||||
// that only perform synchronous I/O on nonblocking UDP sockets.
|
||||
setImmediate(() => {
|
||||
for (let i = 0; i < num; i++) {
|
||||
socket.send(chunk, PORT, '127.0.0.1', onsend);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
socket.on('listening', () => {
|
||||
bench.start();
|
||||
onsend();
|
||||
|
||||
setTimeout(() => {
|
||||
const bytes = sent * len;
|
||||
const gbits = (bytes * 8) / (1024 * 1024 * 1024);
|
||||
bench.end(gbits);
|
||||
process.exit(0);
|
||||
}, dur * 1000);
|
||||
});
|
||||
|
||||
socket.bind(PORT);
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const dgram = require('dgram');
|
||||
|
||||
const configs = {
|
||||
n: [1e4],
|
||||
port: ['true', 'false'],
|
||||
address: ['true', 'false'],
|
||||
};
|
||||
|
||||
const bench = common.createBenchmark(main, configs);
|
||||
const noop = () => {};
|
||||
|
||||
function main({ n, port, address }) {
|
||||
port = port === 'true' ? 0 : undefined;
|
||||
address = address === 'true' ? '0.0.0.0' : undefined;
|
||||
|
||||
if (port !== undefined && address !== undefined) {
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
dgram.createSocket('udp4').bind(port, address)
|
||||
.on('error', noop)
|
||||
.unref();
|
||||
}
|
||||
bench.end(n);
|
||||
} else if (port !== undefined) {
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
dgram.createSocket('udp4')
|
||||
.bind(port)
|
||||
.on('error', noop)
|
||||
.unref();
|
||||
}
|
||||
bench.end(n);
|
||||
} else if (port === undefined && address === undefined) {
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
dgram.createSocket('udp4')
|
||||
.bind()
|
||||
.on('error', noop)
|
||||
.unref();
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
// test UDP send/recv throughput with the multi buffer API
|
||||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const dgram = require('dgram');
|
||||
const PORT = common.PORT;
|
||||
|
||||
// `num` is the number of send requests to queue up each time.
|
||||
// Keep it reasonably high (>10) otherwise you're benchmarking the speed of
|
||||
// event loop cycles more than anything else.
|
||||
const bench = common.createBenchmark(main, {
|
||||
len: [64, 256, 1024],
|
||||
num: [100],
|
||||
chunks: [1, 2, 4, 8],
|
||||
type: ['send', 'recv'],
|
||||
dur: [5]
|
||||
});
|
||||
|
||||
function main({ dur, len, num, type, chunks }) {
|
||||
const chunk = [];
|
||||
for (let i = 0; i < chunks; i++) {
|
||||
chunk.push(Buffer.allocUnsafe(Math.round(len / chunks)));
|
||||
}
|
||||
let sent = 0;
|
||||
let received = 0;
|
||||
const socket = dgram.createSocket('udp4');
|
||||
|
||||
function onsend() {
|
||||
if (sent++ % num === 0) {
|
||||
// The setImmediate() is necessary to have event loop progress on OSes
|
||||
// that only perform synchronous I/O on nonblocking UDP sockets.
|
||||
setImmediate(() => {
|
||||
for (let i = 0; i < num; i++) {
|
||||
socket.send(chunk, PORT, '127.0.0.1', onsend);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
socket.on('listening', () => {
|
||||
bench.start();
|
||||
onsend();
|
||||
|
||||
setTimeout(() => {
|
||||
const bytes = (type === 'send' ? sent : received) * len;
|
||||
const gbits = (bytes * 8) / (1024 * 1024 * 1024);
|
||||
bench.end(gbits);
|
||||
process.exit(0);
|
||||
}, dur * 1000);
|
||||
});
|
||||
|
||||
socket.on('message', () => {
|
||||
received++;
|
||||
});
|
||||
|
||||
socket.bind(PORT);
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
// Test UDP send/recv throughput with the "old" offset/length API
|
||||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const dgram = require('dgram');
|
||||
const PORT = common.PORT;
|
||||
|
||||
// `num` is the number of send requests to queue up each time.
|
||||
// Keep it reasonably high (>10) otherwise you're benchmarking the speed of
|
||||
// event loop cycles more than anything else.
|
||||
const bench = common.createBenchmark(main, {
|
||||
len: [1, 64, 256, 1024],
|
||||
num: [100],
|
||||
type: ['send', 'recv'],
|
||||
dur: [5]
|
||||
});
|
||||
|
||||
function main({ dur, len, num, type }) {
|
||||
const chunk = Buffer.allocUnsafe(len);
|
||||
let sent = 0;
|
||||
let received = 0;
|
||||
const socket = dgram.createSocket('udp4');
|
||||
|
||||
function onsend() {
|
||||
if (sent++ % num === 0) {
|
||||
// The setImmediate() is necessary to have event loop progress on OSes
|
||||
// that only perform synchronous I/O on nonblocking UDP sockets.
|
||||
setImmediate(() => {
|
||||
for (let i = 0; i < num; i++) {
|
||||
socket.send(chunk, 0, chunk.length, PORT, '127.0.0.1', onsend);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
socket.on('listening', () => {
|
||||
bench.start();
|
||||
onsend();
|
||||
|
||||
setTimeout(() => {
|
||||
const bytes = (type === 'send' ? sent : received) * chunk.length;
|
||||
const gbits = (bytes * 8) / (1024 * 1024 * 1024);
|
||||
bench.end(gbits);
|
||||
process.exit(0);
|
||||
}, dur * 1000);
|
||||
});
|
||||
|
||||
socket.on('message', () => {
|
||||
received++;
|
||||
});
|
||||
|
||||
socket.bind(PORT);
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
// test UDP send/recv throughput with the new single Buffer API
|
||||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const dgram = require('dgram');
|
||||
const PORT = common.PORT;
|
||||
|
||||
// `num` is the number of send requests to queue up each time.
|
||||
// Keep it reasonably high (>10) otherwise you're benchmarking the speed of
|
||||
// event loop cycles more than anything else.
|
||||
const bench = common.createBenchmark(main, {
|
||||
len: [1, 64, 256, 1024],
|
||||
num: [100],
|
||||
type: ['send', 'recv'],
|
||||
dur: [5]
|
||||
});
|
||||
|
||||
function main({ dur, len, num, type }) {
|
||||
const chunk = Buffer.allocUnsafe(len);
|
||||
let sent = 0;
|
||||
let received = 0;
|
||||
const socket = dgram.createSocket('udp4');
|
||||
|
||||
function onsend() {
|
||||
if (sent++ % num === 0) {
|
||||
// The setImmediate() is necessary to have event loop progress on OSes
|
||||
// that only perform synchronous I/O on nonblocking UDP sockets.
|
||||
setImmediate(() => {
|
||||
for (let i = 0; i < num; i++) {
|
||||
socket.send(chunk, PORT, '127.0.0.1', onsend);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
socket.on('listening', () => {
|
||||
bench.start();
|
||||
onsend();
|
||||
|
||||
setTimeout(() => {
|
||||
const bytes = (type === 'send' ? sent : received) * chunk.length;
|
||||
const gbits = (bytes * 8) / (1024 * 1024 * 1024);
|
||||
bench.end(gbits);
|
||||
process.exit(0);
|
||||
}, dur * 1000);
|
||||
});
|
||||
|
||||
socket.on('message', () => {
|
||||
received++;
|
||||
});
|
||||
|
||||
socket.bind(PORT);
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const { lookup } = require('dns').promises;
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
name: ['127.0.0.1', '::1'],
|
||||
all: ['true', 'false'],
|
||||
n: [5e6]
|
||||
});
|
||||
|
||||
function main({ name, n, all }) {
|
||||
if (all === 'true') {
|
||||
const opts = { all: true };
|
||||
bench.start();
|
||||
(async function cb() {
|
||||
for (let i = 0; i < n; i++) {
|
||||
await lookup(name, opts);
|
||||
}
|
||||
})();
|
||||
bench.end(n);
|
||||
} else {
|
||||
bench.start();
|
||||
(async function cb() {
|
||||
for (let i = 0; i < n; i++) {
|
||||
await lookup(name);
|
||||
}
|
||||
})();
|
||||
bench.end(n);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const lookup = require('dns').lookup;
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
name: ['127.0.0.1', '::1'],
|
||||
all: ['true', 'false'],
|
||||
n: [5e6]
|
||||
});
|
||||
|
||||
function main({ name, n, all }) {
|
||||
let i = 0;
|
||||
|
||||
if (all === 'true') {
|
||||
const opts = { all: true };
|
||||
bench.start();
|
||||
(function cb() {
|
||||
if (i++ === n) {
|
||||
bench.end(n);
|
||||
return;
|
||||
}
|
||||
lookup(name, opts, cb);
|
||||
})();
|
||||
} else {
|
||||
bench.start();
|
||||
(function cb() {
|
||||
if (i++ === n) {
|
||||
bench.end(n);
|
||||
return;
|
||||
}
|
||||
lookup(name, cb);
|
||||
})();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
'use strict';
|
||||
const common = require('../common.js');
|
||||
const domain = require('domain');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
args: [0, 1, 2, 3],
|
||||
n: [10]
|
||||
});
|
||||
|
||||
const bdomain = domain.create();
|
||||
const gargs = [1, 2, 3];
|
||||
|
||||
function main({ n, args }) {
|
||||
const myArguments = gargs.slice(0, args);
|
||||
bench.start();
|
||||
|
||||
bdomain.enter();
|
||||
for (let i = 0; i < n; i++) {
|
||||
if (myArguments.length >= 2) {
|
||||
const args = Array.prototype.slice.call(myArguments, 1);
|
||||
fn.apply(this, args);
|
||||
} else {
|
||||
fn.call(this);
|
||||
}
|
||||
}
|
||||
bdomain.exit();
|
||||
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function fn(a = 1, b = 2, c = 3) {
|
||||
return a + b + c;
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const assert = require('assert');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
method: ['withoutdefaults', 'withdefaults'],
|
||||
n: [1e8]
|
||||
});
|
||||
|
||||
function oldStyleDefaults(x, y) {
|
||||
x = x || 1;
|
||||
y = y || 2;
|
||||
assert.strictEqual(x, 1);
|
||||
assert.strictEqual(y, 2);
|
||||
}
|
||||
|
||||
function defaultParams(x = 1, y = 2) {
|
||||
assert.strictEqual(x, 1);
|
||||
assert.strictEqual(y, 2);
|
||||
}
|
||||
|
||||
function runOldStyleDefaults(n) {
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++)
|
||||
oldStyleDefaults();
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function runDefaultParams(n) {
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++)
|
||||
defaultParams();
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function main({ n, method }) {
|
||||
switch (method) {
|
||||
case 'withoutdefaults':
|
||||
runOldStyleDefaults(n);
|
||||
break;
|
||||
case 'withdefaults':
|
||||
runDefaultParams(n);
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unexpected method "${method}"`);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const assert = require('assert');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
method: ['swap', 'destructure'],
|
||||
n: [1e8]
|
||||
});
|
||||
|
||||
function runSwapManual(n) {
|
||||
let x, y, r;
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
x = 1, y = 2;
|
||||
r = x;
|
||||
x = y;
|
||||
y = r;
|
||||
assert.strictEqual(x, 2);
|
||||
assert.strictEqual(y, 1);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function runSwapDestructured(n) {
|
||||
let x, y;
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
x = 1, y = 2;
|
||||
[x, y] = [y, x];
|
||||
assert.strictEqual(x, 2);
|
||||
assert.strictEqual(y, 1);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function main({ n, method }) {
|
||||
switch (method) {
|
||||
case 'swap':
|
||||
runSwapManual(n);
|
||||
break;
|
||||
case 'destructure':
|
||||
runSwapDestructured(n);
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unexpected method "${method}"`);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
method: ['normal', 'destructureObject'],
|
||||
n: [1e8]
|
||||
});
|
||||
|
||||
function runNormal(n) {
|
||||
const o = { x: 0, y: 1 };
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
/* eslint-disable no-unused-vars */
|
||||
const x = o.x;
|
||||
const y = o.y;
|
||||
const r = o.r || 2;
|
||||
/* eslint-enable no-unused-vars */
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function runDestructured(n) {
|
||||
const o = { x: 0, y: 1 };
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
/* eslint-disable no-unused-vars */
|
||||
const { x, y, r = 2 } = o;
|
||||
/* eslint-enable no-unused-vars */
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function main({ n, method }) {
|
||||
switch (method) {
|
||||
case 'normal':
|
||||
runNormal(n);
|
||||
break;
|
||||
case 'destructureObject':
|
||||
runDestructured(n);
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unexpected method "${method}"`);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
method: ['for', 'for-of', 'for-in', 'forEach'],
|
||||
count: [5, 10, 20, 100],
|
||||
n: [5e6]
|
||||
});
|
||||
|
||||
function useFor(n, items, count) {
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
for (let j = 0; j < count; j++) {
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
const item = items[j];
|
||||
}
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function useForOf(n, items) {
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
for (const item of items) {}
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function useForIn(n, items) {
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
for (const j in items) {
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
const item = items[j];
|
||||
}
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function useForEach(n, items) {
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
items.forEach((item) => {});
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function main({ n, count, method }) {
|
||||
const items = new Array(count);
|
||||
let fn;
|
||||
for (let i = 0; i < count; i++)
|
||||
items[i] = i;
|
||||
|
||||
switch (method) {
|
||||
case 'for':
|
||||
fn = useFor;
|
||||
break;
|
||||
case 'for-of':
|
||||
fn = useForOf;
|
||||
break;
|
||||
case 'for-in':
|
||||
fn = useForIn;
|
||||
break;
|
||||
case 'forEach':
|
||||
fn = useForEach;
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unexpected method "${method}"`);
|
||||
}
|
||||
fn(n, items, count);
|
||||
}
|
|
@ -0,0 +1,128 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const assert = require('assert');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
method: [
|
||||
'object', 'nullProtoObject', 'nullProtoLiteralObject', 'storageObject',
|
||||
'fakeMap', 'map',
|
||||
],
|
||||
n: [1e6]
|
||||
});
|
||||
|
||||
function runObject(n) {
|
||||
const m = {};
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
m[`i${i}`] = i;
|
||||
m[`s${i}`] = String(i);
|
||||
assert.strictEqual(String(m[`i${i}`]), m[`s${i}`]);
|
||||
m[`i${i}`] = undefined;
|
||||
m[`s${i}`] = undefined;
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function runNullProtoObject(n) {
|
||||
const m = Object.create(null);
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
m[`i${i}`] = i;
|
||||
m[`s${i}`] = String(i);
|
||||
assert.strictEqual(String(m[`i${i}`]), m[`s${i}`]);
|
||||
m[`i${i}`] = undefined;
|
||||
m[`s${i}`] = undefined;
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function runNullProtoLiteralObject(n) {
|
||||
const m = { __proto__: null };
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
m[`i${i}`] = i;
|
||||
m[`s${i}`] = String(i);
|
||||
assert.strictEqual(String(m[`i${i}`]), m[`s${i}`]);
|
||||
m[`i${i}`] = undefined;
|
||||
m[`s${i}`] = undefined;
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function StorageObject() {}
|
||||
StorageObject.prototype = Object.create(null);
|
||||
|
||||
function runStorageObject(n) {
|
||||
const m = new StorageObject();
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
m[`i${i}`] = i;
|
||||
m[`s${i}`] = String(i);
|
||||
assert.strictEqual(String(m[`i${i}`]), m[`s${i}`]);
|
||||
m[`i${i}`] = undefined;
|
||||
m[`s${i}`] = undefined;
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function fakeMap() {
|
||||
const m = {};
|
||||
return {
|
||||
get(key) { return m[`$${key}`]; },
|
||||
set(key, val) { m[`$${key}`] = val; },
|
||||
get size() { return Object.keys(m).length; },
|
||||
has(key) { return Object.prototype.hasOwnProperty.call(m, `$${key}`); }
|
||||
};
|
||||
}
|
||||
|
||||
function runFakeMap(n) {
|
||||
const m = fakeMap();
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
m.set(`i${i}`, i);
|
||||
m.set(`s${i}`, String(i));
|
||||
assert.strictEqual(String(m.get(`i${i}`)), m.get(`s${i}`));
|
||||
m.set(`i${i}`, undefined);
|
||||
m.set(`s${i}`, undefined);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function runMap(n) {
|
||||
const m = new Map();
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++) {
|
||||
m.set(`i${i}`, i);
|
||||
m.set(`s${i}`, String(i));
|
||||
assert.strictEqual(String(m.get(`i${i}`)), m.get(`s${i}`));
|
||||
m.set(`i${i}`, undefined);
|
||||
m.set(`s${i}`, undefined);
|
||||
}
|
||||
bench.end(n);
|
||||
}
|
||||
|
||||
function main({ n, method }) {
|
||||
switch (method) {
|
||||
case 'object':
|
||||
runObject(n);
|
||||
break;
|
||||
case 'nullProtoObject':
|
||||
runNullProtoObject(n);
|
||||
break;
|
||||
case 'nullProtoLiteralObject':
|
||||
runNullProtoLiteralObject(n);
|
||||
break;
|
||||
case 'storageObject':
|
||||
runStorageObject(n);
|
||||
break;
|
||||
case 'fakeMap':
|
||||
runFakeMap(n);
|
||||
break;
|
||||
case 'map':
|
||||
runMap(n);
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unexpected method "${method}"`);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const assert = require('assert');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
method: ['copy', 'rest', 'arguments'],
|
||||
n: [1e8]
|
||||
});
|
||||
|
||||
function copyArguments() {
|
||||
const len = arguments.length;
|
||||
const args = new Array(len);
|
||||
for (let i = 0; i < len; i++)
|
||||
args[i] = arguments[i];
|
||||
assert.strictEqual(args[0], 1);
|
||||
assert.strictEqual(args[1], 2);
|
||||
assert.strictEqual(args[2], 'a');
|
||||
assert.strictEqual(args[3], 'b');
|
||||
}
|
||||
|
||||
function restArguments(...args) {
|
||||
assert.strictEqual(args[0], 1);
|
||||
assert.strictEqual(args[1], 2);
|
||||
assert.strictEqual(args[2], 'a');
|
||||
assert.strictEqual(args[3], 'b');
|
||||
}
|
||||
|
||||
function useArguments() {
|
||||
assert.strictEqual(arguments[0], 1);
|
||||
assert.strictEqual(arguments[1], 2);
|
||||
assert.strictEqual(arguments[2], 'a');
|
||||
assert.strictEqual(arguments[3], 'b');
|
||||
}
|
||||
|
||||
function runCopyArguments(n) {
|
||||
for (let i = 0; i < n; i++)
|
||||
copyArguments(1, 2, 'a', 'b');
|
||||
}
|
||||
|
||||
function runRestArguments(n) {
|
||||
for (let i = 0; i < n; i++)
|
||||
restArguments(1, 2, 'a', 'b');
|
||||
}
|
||||
|
||||
function runUseArguments(n) {
|
||||
for (let i = 0; i < n; i++)
|
||||
useArguments(1, 2, 'a', 'b');
|
||||
}
|
||||
|
||||
function main({ n, method }) {
|
||||
let fn;
|
||||
switch (method) {
|
||||
case 'copy':
|
||||
fn = runCopyArguments;
|
||||
break;
|
||||
case 'rest':
|
||||
fn = runRestArguments;
|
||||
break;
|
||||
case 'arguments':
|
||||
fn = runUseArguments;
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unexpected method "${method}"`);
|
||||
}
|
||||
bench.start();
|
||||
fn(n);
|
||||
bench.end(n);
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const util = require('util');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
method: ['spread', 'assign', '_extend'],
|
||||
count: [5, 10, 20],
|
||||
n: [1e6]
|
||||
});
|
||||
|
||||
function main({ n, context, count, rest, method }) {
|
||||
|
||||
const src = {};
|
||||
for (let n = 0; n < count; n++)
|
||||
src[`p${n}`] = n;
|
||||
|
||||
let obj; // eslint-disable-line no-unused-vars
|
||||
|
||||
switch (method) {
|
||||
case '_extend':
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++)
|
||||
obj = util._extend({}, src);
|
||||
bench.end(n);
|
||||
break;
|
||||
case 'assign':
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++)
|
||||
obj = Object.assign({}, src);
|
||||
bench.end(n);
|
||||
break;
|
||||
case 'spread':
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++)
|
||||
obj = { ...src };
|
||||
bench.end(n);
|
||||
break;
|
||||
default:
|
||||
throw new Error('Unexpected method');
|
||||
}
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const assert = require('assert');
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
method: ['apply', 'spread', 'call-spread'],
|
||||
count: [5, 10, 20],
|
||||
context: ['context', 'null'],
|
||||
rest: [0, 1],
|
||||
n: [5e6]
|
||||
});
|
||||
|
||||
function makeTest(count, rest) {
|
||||
if (rest) {
|
||||
return function test(...args) {
|
||||
assert.strictEqual(count, args.length);
|
||||
};
|
||||
} else {
|
||||
return function test() {
|
||||
assert.strictEqual(count, arguments.length);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
function main({ n, context, count, rest, method }) {
|
||||
const ctx = context === 'context' ? {} : null;
|
||||
let fn = makeTest(count, rest);
|
||||
const args = new Array(count);
|
||||
|
||||
for (let i = 0; i < count; i++)
|
||||
args[i] = i;
|
||||
|
||||
switch (method) {
|
||||
case 'apply':
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++)
|
||||
fn.apply(ctx, args);
|
||||
bench.end(n);
|
||||
break;
|
||||
case 'spread':
|
||||
if (ctx !== null)
|
||||
fn = fn.bind(ctx);
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++)
|
||||
fn(...args);
|
||||
bench.end(n);
|
||||
break;
|
||||
case 'call-spread':
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++)
|
||||
fn.call(ctx, ...args);
|
||||
bench.end(n);
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unexpected method "${method}"`);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
|
||||
const configs = {
|
||||
n: [1e3],
|
||||
mode: [
|
||||
'multi-concat',
|
||||
'multi-join',
|
||||
'multi-template',
|
||||
'to-string-string',
|
||||
'to-string-concat',
|
||||
'to-string-template',
|
||||
],
|
||||
};
|
||||
|
||||
const bench = common.createBenchmark(main, configs);
|
||||
|
||||
function main({ n, mode }) {
|
||||
const str = 'abc';
|
||||
const num = 123;
|
||||
|
||||
let string;
|
||||
|
||||
switch (mode) {
|
||||
case 'multi-concat':
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++)
|
||||
string = '...' + str + ', ' + num + ', ' + str + ', ' + num + '.';
|
||||
bench.end(n);
|
||||
break;
|
||||
case 'multi-join':
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++)
|
||||
string = ['...', str, ', ', num, ', ', str, ', ', num, '.'].join('');
|
||||
bench.end(n);
|
||||
break;
|
||||
case 'multi-template':
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++)
|
||||
string = `...${str}, ${num}, ${str}, ${num}.`;
|
||||
bench.end(n);
|
||||
break;
|
||||
case 'to-string-string':
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++)
|
||||
string = String(num);
|
||||
bench.end(n);
|
||||
break;
|
||||
case 'to-string-concat':
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++)
|
||||
string = '' + num;
|
||||
bench.end(n);
|
||||
break;
|
||||
case 'to-string-template':
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++)
|
||||
string = `${num}`;
|
||||
bench.end(n);
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unexpected method "${mode}"`);
|
||||
}
|
||||
|
||||
return string;
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
'use strict';
|
||||
|
||||
const assert = require('assert');
|
||||
const common = require('../common.js');
|
||||
|
||||
const configs = {
|
||||
n: [1e3],
|
||||
mode: ['Array', 'repeat'],
|
||||
encoding: ['ascii', 'utf8'],
|
||||
size: [1e1, 1e3, 1e6],
|
||||
};
|
||||
|
||||
const bench = common.createBenchmark(main, configs);
|
||||
|
||||
function main({ n, size, encoding, mode }) {
|
||||
const character = encoding === 'ascii' ? 'a' : '\ud83d\udc0e'; // '🐎'
|
||||
|
||||
let str;
|
||||
|
||||
switch (mode) {
|
||||
case 'Array':
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++)
|
||||
str = new Array(size + 1).join(character);
|
||||
bench.end(n);
|
||||
break;
|
||||
case 'repeat':
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++)
|
||||
str = character.repeat(size);
|
||||
bench.end(n);
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unexpected method "${mode}"`);
|
||||
}
|
||||
|
||||
assert.strictEqual([...str].length, size);
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue