1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2025-01-21 04:52:26 -05:00

Merge branch 'main' into ext-node-errors-no-tostring

This commit is contained in:
Bartek Iwańczuk 2024-10-26 23:44:45 +01:00 committed by GitHub
commit 2131c6214a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8331 changed files with 638722 additions and 328509 deletions

View file

@ -11,8 +11,17 @@ rustflags = [
"link-arg=/STACK:4194304", "link-arg=/STACK:4194304",
] ]
[target.x86_64-apple-darwin]
rustflags = [
"-C",
"link-args=-weak_framework Metal -weak_framework MetalPerformanceShaders -weak_framework QuartzCore -weak_framework CoreGraphics",
]
[target.aarch64-apple-darwin] [target.aarch64-apple-darwin]
rustflags = ["-C", "link-arg=-fuse-ld=lld"] rustflags = [
"-C",
"link-args=-fuse-ld=lld -weak_framework Metal -weak_framework MetalPerformanceShaders -weak_framework QuartzCore -weak_framework CoreGraphics",
]
[target.'cfg(all())'] [target.'cfg(all())']
rustflags = [ rustflags = [

View file

@ -1,5 +1,10 @@
FROM mcr.microsoft.com/vscode/devcontainers/rust:1-bullseye FROM mcr.microsoft.com/vscode/devcontainers/rust:1-bullseye
# Install cmake
RUN apt-get update \
&& apt-get install -y cmake \
&& rm -rf /var/lib/apt/lists/*
# Install Deno # Install Deno
ENV DENO_INSTALL=/usr/local ENV DENO_INSTALL=/usr/local
RUN curl -fsSL https://deno.land/x/install/install.sh | sh RUN curl -fsSL https://deno.land/x/install/install.sh | sh

View file

@ -4,23 +4,23 @@
"dockerfile": "Dockerfile" "dockerfile": "Dockerfile"
}, },
"runArgs": ["--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined"], "runArgs": ["--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined"],
"customizations": {
"settings": { "vscode": {
"lldb.executable": "/usr/bin/lldb", "settings": {
// VS Code don't watch files under ./target "lldb.executable": "/usr/bin/lldb",
"files.watcherExclude": { // VS Code don't watch files under ./target
"**/target/**": true "files.watcherExclude": {
"**/target/**": true
},
"extensions": [
"rust-lang.rust-analyzer",
"tamasfe.even-better-toml",
"vadimcn.vscode-lldb",
"mutantdino.resourcemonitor"
]
}
} }
}, },
"extensions": [
"rust-lang.rust-analyzer",
"bungcip.better-toml",
"vadimcn.vscode-lldb",
"mutantdino.resourcemonitor"
],
"postCreateCommand": "git submodule update --init", "postCreateCommand": "git submodule update --init",
"remoteUser": "vscode" "remoteUser": "vscode"
} }

View file

@ -4,6 +4,7 @@
"include": [ "include": [
"ban-untagged-todo", "ban-untagged-todo",
"camelcase", "camelcase",
"no-console",
"guard-for-in" "guard-for-in"
], ],
"exclude": [ "exclude": [

View file

@ -8,6 +8,9 @@
"json": { "json": {
"deno": true "deno": true
}, },
"yaml": {
"quotes": "preferSingle"
},
"exec": { "exec": {
"commands": [{ "commands": [{
"command": "rustfmt --config imports_granularity=item", "command": "rustfmt --config imports_granularity=item",
@ -18,47 +21,58 @@
".cargo_home", ".cargo_home",
".git", ".git",
"cli/bench/testdata/express-router.js", "cli/bench/testdata/express-router.js",
"cli/bench/testdata/lsp_benchdata/",
"cli/bench/testdata/npm/", "cli/bench/testdata/npm/",
"cli/tsc/*typescript.js",
"cli/tsc/dts/lib.d.ts", "cli/tsc/dts/lib.d.ts",
"cli/tsc/dts/lib.scripthost.d.ts",
"cli/tsc/dts/lib.decorators*.d.ts", "cli/tsc/dts/lib.decorators*.d.ts",
"cli/tsc/dts/lib.webworker*.d.ts",
"cli/tsc/dts/lib.dom*.d.ts", "cli/tsc/dts/lib.dom*.d.ts",
"cli/tsc/dts/lib.es*.d.ts", "cli/tsc/dts/lib.es*.d.ts",
"cli/tsc/dts/lib.scripthost.d.ts",
"cli/tsc/dts/lib.webworker*.d.ts",
"cli/tsc/dts/typescript.d.ts", "cli/tsc/dts/typescript.d.ts",
"cli/tests/node_compat/test", "ext/websocket/autobahn/reports",
"cli/tests/testdata/file_extensions/ts_with_js_extension.js",
"cli/tests/testdata/fmt/badly_formatted.json",
"cli/tests/testdata/fmt/badly_formatted.md",
"cli/tests/testdata/byte_order_mark.ts",
"cli/tests/testdata/encoding",
"cli/tests/testdata/fmt/",
"cli/tests/testdata/lint/glob/",
"cli/tests/testdata/test/glob/",
"cli/tests/testdata/import_assertions/json_with_shebang.json",
"cli/tests/testdata/run/error_syntax_empty_trailing_line.mjs",
"cli/tests/testdata/run/inline_js_source_map*",
"cli/tests/testdata/malformed_config/",
"cli/tests/testdata/npm/registry/",
"cli/tests/testdata/test/markdown_windows.md",
"cli/tsc/*typescript.js",
"gh-pages", "gh-pages",
"target", "target",
"test_ffi/tests/test.js", "tests/ffi/tests/test.js",
"test_util/std", "tests/node_compat/runner/suite",
"test_util/wpt", "tests/node_compat/runner/TODO.md",
"third_party", "tests/node_compat/test",
"tools/node_compat/TODO.md", "tests/registry/",
"tools/node_compat/node", "tests/specs/bench/default_ts",
"tools/wpt/expectation.json", "tests/specs/fmt",
"tools/wpt/manifest.json", "tests/specs/lint/bom",
"ext/websocket/autobahn/reports" "tests/specs/lint/default_ts",
"tests/specs/lint/syntax_error_reporting",
"tests/specs/publish/no_check_surfaces_syntax_error",
"tests/specs/run/default_ts",
"tests/specs/test/default_ts",
"tests/testdata/byte_order_mark.ts",
"tests/testdata/encoding",
"tests/testdata/file_extensions/ts_with_js_extension.js",
"tests/testdata/fmt/",
"tests/testdata/fmt/badly_formatted.ipynb",
"tests/testdata/fmt/badly_formatted.json",
"tests/testdata/fmt/badly_formatted.md",
"tests/testdata/import_attributes/json_with_shebang.json",
"tests/testdata/lint/glob/",
"tests/testdata/malformed_config/",
"tests/testdata/run/byte_order_mark.ts",
"tests/testdata/run/error_syntax_empty_trailing_line.mjs",
"tests/testdata/run/inline_js_source_map*",
"tests/testdata/test/markdown_windows.md",
"tests/util/std",
"tests/wpt/runner/expectation.json",
"tests/wpt/runner/manifest.json",
"tests/wpt/suite",
"third_party"
], ],
"plugins": [ "plugins": [
"https://plugins.dprint.dev/typescript-0.86.1.wasm", "https://plugins.dprint.dev/typescript-0.93.0.wasm",
"https://plugins.dprint.dev/json-0.17.4.wasm", "https://plugins.dprint.dev/json-0.19.4.wasm",
"https://plugins.dprint.dev/markdown-0.15.3.wasm", "https://plugins.dprint.dev/markdown-0.17.8.wasm",
"https://plugins.dprint.dev/toml-0.5.4.wasm", "https://plugins.dprint.dev/toml-0.6.3.wasm",
"https://plugins.dprint.dev/exec-0.4.3.json@42343548b8022c99b1d750be6b894fe6b6c7ee25f72ae9f9082226dd2e515072" "https://plugins.dprint.dev/exec-0.5.0.json@8d9972eee71fa1590e04873540421f3eda7674d0f1aae3d7c788615e7b7413d0",
"https://plugins.dprint.dev/g-plane/pretty_yaml-v0.5.0.wasm"
] ]
} }

View file

@ -9,8 +9,9 @@ charset = utf-8
trim_trailing_whitespace = true trim_trailing_whitespace = true
[*.out] # make editor neutral to .out files [*.out] # make editor neutral to .out files
insert_final_newline = false insert_final_newline = unset
trim_trailing_whitespace = false trim_trailing_whitespace = unset
[*.py] [tests/node_compat/test/**]
indent_size = 4 insert_final_newline = unset
trim_trailing_whitespace = unset

6
.gitattributes vendored
View file

@ -2,11 +2,11 @@
* text=auto eol=lf * text=auto eol=lf
*.png -text *.png -text
/cli/tests/testdata/encoding/* -text /tests/testdata/encoding/* -text
# Tell git which symlinks point to files, and which ones point to directories. # Tell git which symlinks point to files, and which ones point to directories.
# This is relevant for Windows only, and requires git >= 2.19.2 to work. # This is relevant for Windows only, and requires git >= 2.19.2 to work.
/cli/tests/testdata/symlink_to_subdir symlink=dir /tests/testdata/symlink_to_subdir symlink=dir
# Tell github these are vendored files. # Tell github these are vendored files.
# Doesn't include them in the language statistics. # Doesn't include them in the language statistics.
@ -14,4 +14,4 @@
/cli/dts/* linguist-vendored /cli/dts/* linguist-vendored
# Keep Windows line endings in cross-platform doc check test file # Keep Windows line endings in cross-platform doc check test file
/cli/tests/testdata/test/markdown_windows.md eol=crlf /tests/testdata/test/markdown_windows.md eol=crlf

9
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View file

@ -0,0 +1,9 @@
---
name: 🐛 Bug Report
about: Report an issue found in the Deno CLI.
title: ''
labels: ''
assignees: ''
---
Version: Deno x.x.x

8
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View file

@ -0,0 +1,8 @@
blank_issues_enabled: true
contact_links:
- name: 🦕 Deploy Feedback
url: https://github.com/denoland/deploy_feedback/
about: Provide feature requests or bug reports for the Deno Deploy edge runtime.
- name: 💬 Discord
url: https://discord.gg/deno
about: Join our community on Discord.

View file

@ -0,0 +1,7 @@
---
name: 💡 Feature Request
about: Suggest a feature for the Deno CLI.
title: ''
labels: ''
assignees: ''
---

View file

@ -1,5 +1,5 @@
<!-- <!--
Before submitting a PR, please read https://deno.com/manual/contributing Before submitting a PR, please read https://docs.deno.com/runtime/manual/references/contributing
1. Give the PR a descriptive title. 1. Give the PR a descriptive title.

10
.github/SECURITY.md vendored
View file

@ -9,11 +9,13 @@ we use to work with the security research community to address runtime security.
## Reporting a vulnerability ## Reporting a vulnerability
Please email findings to security@deno.com. We strive to resolve all problems as Please open a new
quickly as possible, and are more than happy to play an active role in [Security advisory](https://github.com/denoland/deno/security/advisories/new)
publication of writeups after the problem is resolved. with your findings. We strive to resolve all problems as quickly as possible,
and are more than happy to play an active role in publication of writeups after
the problem is resolved.
Try to include as much information as possible in the initial email, so we can Try to include as much information as possible in the initial report, so we can
quickly address the issue. quickly address the issue.
**Please do not open security issues in the public issue tracker.** **Please do not open security issues in the public issue tracker.**

View file

@ -1,9 +1,9 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
// This file contains the implementation of a Github Action. Github uses // This file contains the implementation of a Github Action. Github uses
// Node.js v12.x to run actions, so this is Node code and not Deno code. // Node.js v20.x to run actions, so this is Node code and not Deno code.
const { spawn } = require("child_process"); const { spawn } = require("child_process");
const fs = require("fs"); const { existsSync } = require("fs");
const { utimes, mkdir, readFile, writeFile } = require("fs/promises"); const { utimes, mkdir, readFile, writeFile } = require("fs/promises");
const { dirname, resolve } = require("path"); const { dirname, resolve } = require("path");
const { StringDecoder } = require("string_decoder"); const { StringDecoder } = require("string_decoder");
@ -147,7 +147,7 @@ async function* ls(dir = "") {
break; break;
case "160000": // Git submodule. case "160000": // Git submodule.
// sometimes we don't checkout all submodules // sometimes we don't checkout all submodules
if (fs.existsSync(path)) { if (existsSync(path)) {
yield* ls(path); yield* ls(path);
} }
break; break;

View file

@ -7,4 +7,4 @@ inputs:
required: true required: true
runs: runs:
main: action.js main: action.js
using: node16 using: node20

View file

@ -1,49 +0,0 @@
name: bench
on:
# Runs at minute 9 past hour 0, 6, 12, and 18.
schedule:
- cron: 9 0,6,12,18 * * *
workflow_dispatch:
jobs:
bench:
name: bench / ${{ matrix.os }} / ${{ matrix.deno-version }}
if: github.repository == 'denoland/deno'
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-22.04-xl]
env:
CARGO_TERM_COLOR: always
RUST_BACKTRACE: full
CI: true
GOOGLE_SVC_KEY: ${{ secrets.GOOGLE_SVC_KEY }}
steps:
- name: Clone repository
uses: actions/checkout@v3
with:
submodules: true
persist-credentials: false
- uses: dsherret/rust-toolchain-file@v1
- name: Install protoc
uses: arduino/setup-protoc@v2
with:
version: "21.12"
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Build release
run: cargo build --release --locked --all-targets
- name: Worker info
run: |
cat /proc/cpuinfo
cat /proc/meminfo
- name: Run and Post benchmarks
run: cargo bench --locked

View file

@ -2,6 +2,11 @@ name: cargo_publish
on: workflow_dispatch on: workflow_dispatch
# Ensures only one publish is running at a time
concurrency:
group: ${{ github.workflow }}
cancel-in-progress: true
jobs: jobs:
build: build:
name: cargo publish name: cargo publish
@ -20,7 +25,7 @@ jobs:
git config --global fetch.parallel 32 git config --global fetch.parallel 32
- name: Clone repository - name: Clone repository
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
token: ${{ secrets.DENOBOT_PAT }} token: ${{ secrets.DENOBOT_PAT }}
submodules: recursive submodules: recursive
@ -28,11 +33,9 @@ jobs:
- uses: dsherret/rust-toolchain-file@v1 - uses: dsherret/rust-toolchain-file@v1
- name: Install deno - name: Install deno
uses: denoland/setup-deno@v1 uses: denoland/setup-deno@v2
with: with:
# use a recent version instead of the latest version in case deno-version: v1.x
# the latest version ever has issues that breaks publishing
deno-version: v1.31.3
- name: Publish - name: Publish
env: env:

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,8 @@
# GENERATED BY ./ci.generate.ts -- DO NOT DIRECTLY EDIT # GENERATED BY ./ci.generate.ts -- DO NOT DIRECTLY EDIT
name: ci name: ci
permissions:
contents: write
on: on:
push: push:
branches: branches:
@ -29,114 +31,141 @@ jobs:
git config --global fetch.parallel 32 git config --global fetch.parallel 32
if: github.event.pull_request.draft == true if: github.event.pull_request.draft == true
- name: Clone repository - name: Clone repository
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
fetch-depth: 5 fetch-depth: 5
submodules: false submodules: false
if: github.event.pull_request.draft == true if: github.event.pull_request.draft == true
- id: check - id: check
if: 'github.event.pull_request.draft == true && (!contains(github.event.pull_request.labels.*.name, ''ci-draft''))'
run: |- run: |-
GIT_MESSAGE=$(git log --format=%s -n 1 ${{github.event.after}}) GIT_MESSAGE=$(git log --format=%s -n 1 ${{github.event.after}})
echo Commit message: $GIT_MESSAGE echo Commit message: $GIT_MESSAGE
echo $GIT_MESSAGE | grep '\[ci\]' || (echo 'Exiting due to draft PR. Commit with [ci] to bypass.' ; echo 'skip_build=true' >> $GITHUB_OUTPUT) echo $GIT_MESSAGE | grep '\[ci\]' || (echo 'Exiting due to draft PR. Commit with [ci] to bypass or add the ci-draft label.' ; echo 'skip_build=true' >> $GITHUB_OUTPUT)
if: github.event.pull_request.draft == true
build: build:
name: '${{ matrix.job }} ${{ matrix.profile }} ${{ matrix.os_display_name }}' name: '${{ matrix.job }} ${{ matrix.profile }} ${{ matrix.os }}-${{ matrix.arch }}'
needs: needs:
- pre_build - pre_build
if: '${{ needs.pre_build.outputs.skip_build != ''true'' }}' if: '${{ needs.pre_build.outputs.skip_build != ''true'' }}'
runs-on: '${{ matrix.runner || matrix.os }}' runs-on: '${{ matrix.runner }}'
timeout-minutes: 120 timeout-minutes: 180
defaults: defaults:
run: run:
shell: bash shell: bash
strategy: strategy:
matrix: matrix:
include: include:
- os: macos-12 - os: macos
arch: x86_64
runner: macos-13
job: test job: test
profile: debug profile: debug
os_display_name: macos-x86_64 - os: macos
- os: macos-12 arch: x86_64
runner: '${{ (!contains(github.event.pull_request.labels.*.name, ''ci-full'') && (github.event_name == ''pull_request'')) && ''ubuntu-22.04'' || ''macos-13'' }}'
job: test job: test
profile: release profile: release
skip_pr: true skip: '${{ !contains(github.event.pull_request.labels.*.name, ''ci-full'') && (github.event_name == ''pull_request'') }}'
runner: '${{ github.event_name == ''pull_request'' && ''ubuntu-22.04'' || ''macos-12'' }}' - os: macos
os_display_name: macos-x86_64 arch: aarch64
- os: windows-2022 runner: macos-14
job: test job: test
profile: debug profile: debug
os_display_name: windows-x86_64 - os: macos
- os: '${{ github.repository == ''denoland/deno'' && ''windows-2022-xl'' || ''windows-2022'' }}' arch: aarch64
runner: '${{ (!contains(github.event.pull_request.labels.*.name, ''ci-full'') && (github.event_name == ''pull_request'')) && ''ubuntu-22.04'' || ''macos-14'' }}'
job: test job: test
profile: release profile: release
skip_pr: true skip: '${{ !contains(github.event.pull_request.labels.*.name, ''ci-full'') && (github.event_name == ''pull_request'') }}'
runner: '${{ github.event_name == ''pull_request'' && ''ubuntu-22.04'' || github.repository == ''denoland/deno'' && ''windows-2022-xl'' || ''windows-2022'' }}' - os: windows
os_display_name: windows-x86_64 arch: x86_64
- os: '${{ github.repository == ''denoland/deno'' && ''ubuntu-22.04-xl'' || ''ubuntu-22.04'' }}' runner: windows-2022
job: test
profile: debug
- os: windows
arch: x86_64
runner: '${{ (!contains(github.event.pull_request.labels.*.name, ''ci-full'') && (github.event_name == ''pull_request'')) && ''ubuntu-22.04'' || github.repository == ''denoland/deno'' && ''windows-2022-xl'' || ''windows-2022'' }}'
job: test
profile: release
skip: '${{ !contains(github.event.pull_request.labels.*.name, ''ci-full'') && (github.event_name == ''pull_request'') }}'
- os: linux
arch: x86_64
runner: '${{ github.repository == ''denoland/deno'' && ''ubuntu-22.04-xl'' || ''ubuntu-22.04'' }}'
job: test job: test
profile: release profile: release
use_sysroot: true use_sysroot: true
wpt: '${{ !startsWith(github.ref, ''refs/tags/'') }}' wpt: '${{ !startsWith(github.ref, ''refs/tags/'') }}'
os_display_name: ubuntu-x86_64 - os: linux
- os: '${{ github.repository == ''denoland/deno'' && ''ubuntu-22.04-xl'' || ''ubuntu-22.04'' }}' arch: x86_64
runner: '${{ (!contains(github.event.pull_request.labels.*.name, ''ci-full'') && (github.event_name == ''pull_request'' && !contains(github.event.pull_request.labels.*.name, ''ci-bench''))) && ''ubuntu-22.04'' || github.repository == ''denoland/deno'' && ''ubuntu-22.04-xl'' || ''ubuntu-22.04'' }}'
job: bench job: bench
profile: release profile: release
use_sysroot: true use_sysroot: true
skip_pr: '${{ !contains(github.event.pull_request.labels.*.name, ''ci-bench'') }}' skip: '${{ !contains(github.event.pull_request.labels.*.name, ''ci-full'') && (github.event_name == ''pull_request'' && !contains(github.event.pull_request.labels.*.name, ''ci-bench'')) }}'
runner: '${{ github.event_name == ''pull_request'' && !contains(github.event.pull_request.labels.*.name, ''ci-bench'') && ''ubuntu-22.04'' || github.repository == ''denoland/deno'' && ''ubuntu-22.04-xl'' || ''ubuntu-22.04'' }}' - os: linux
os_display_name: ubuntu-x86_64 arch: x86_64
- os: ubuntu-22.04 runner: ubuntu-22.04
job: test job: test
profile: debug profile: debug
use_sysroot: true use_sysroot: true
os_display_name: ubuntu-x86_64 - os: linux
- os: ubuntu-22.04 arch: x86_64
runner: ubuntu-22.04
job: lint job: lint
profile: debug profile: debug
os_display_name: ubuntu-x86_64 - os: linux
- os: macos-12 arch: aarch64
runner: ubicloud-standard-16-arm
job: test
profile: debug
- os: linux
arch: aarch64
runner: ubicloud-standard-16-arm
job: test
profile: release
use_sysroot: true
- os: macos
arch: x86_64
runner: macos-13
job: lint job: lint
profile: debug profile: debug
os_display_name: macos-x86_64 - os: windows
- os: windows-2022 arch: x86_64
runner: windows-2022
job: lint job: lint
profile: debug profile: debug
os_display_name: windows-x86_64
fail-fast: '${{ github.event_name == ''pull_request'' || (github.ref != ''refs/heads/main'' && !startsWith(github.ref, ''refs/tags/'')) }}' fail-fast: '${{ github.event_name == ''pull_request'' || (github.ref != ''refs/heads/main'' && !startsWith(github.ref, ''refs/tags/'')) }}'
env: env:
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always
RUST_BACKTRACE: full RUST_BACKTRACE: full
RUST_LIB_BACKTRACE: 0
steps: steps:
- name: Reconfigure Windows Storage
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (startsWith(matrix.os, ''windows'') && !endsWith(matrix.os, ''-xl''))'
shell: pwsh
run: |-
New-Item -ItemType "directory" -Path "$env:TEMP/__target__"
New-Item -ItemType Junction -Target "$env:TEMP/__target__" -Path "D:/a/deno/deno"
- name: Configure git - name: Configure git
run: |- run: |-
git config --global core.symlinks true git config --global core.symlinks true
git config --global fetch.parallel 32 git config --global fetch.parallel 32
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr)' if: '!(matrix.skip)'
- name: Clone repository - name: Clone repository
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
fetch-depth: 5 fetch-depth: 5
submodules: false submodules: false
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr)' if: '!(matrix.skip)'
- name: Clone submodule ./test_util/std - name: Clone submodule ./tests/util/std
run: git submodule update --init --recursive --depth=1 -- ./test_util/std run: git submodule update --init --recursive --depth=1 -- ./tests/util/std
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr)' if: '!(matrix.skip)'
- name: Clone submodule ./test_util/wpt - name: Clone submodule ./tests/wpt/suite
run: git submodule update --init --recursive --depth=1 -- ./test_util/wpt run: git submodule update --init --recursive --depth=1 -- ./tests/wpt/suite
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.wpt)' if: '!(matrix.skip) && (matrix.wpt)'
- name: Clone submodule ./tools/node_compat/node - name: Clone submodule ./tests/node_compat/runner/suite
run: git submodule update --init --recursive --depth=1 -- ./tools/node_compat/node run: git submodule update --init --recursive --depth=1 -- ./tests/node_compat/runner/suite
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.job == ''lint'' && startsWith(matrix.os, ''ubuntu''))' if: '!(matrix.skip) && (matrix.job == ''lint'' && matrix.os == ''linux'')'
- name: Clone submodule ./cli/bench/testdata/lsp_benchdata
run: git submodule update --init --recursive --depth=1 -- ./cli/bench/testdata/lsp_benchdata
if: '!(matrix.skip) && (matrix.job == ''bench'')'
- name: 'Create source tarballs (release, linux)' - name: 'Create source tarballs (release, linux)'
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (startsWith(matrix.os, 'ubuntu') && !(matrix.skip) && (matrix.os == 'linux' &&
matrix.profile == 'release' && matrix.profile == 'release' &&
matrix.job == 'test' && matrix.job == 'test' &&
github.repository == 'denoland/deno' && github.repository == 'denoland/deno' &&
@ -146,44 +175,38 @@ jobs:
tar --exclude=".git*" --exclude=target --exclude=third_party/prebuilt \ tar --exclude=".git*" --exclude=target --exclude=third_party/prebuilt \
-czvf target/release/deno_src.tar.gz -C .. deno -czvf target/release/deno_src.tar.gz -C .. deno
- uses: dsherret/rust-toolchain-file@v1 - uses: dsherret/rust-toolchain-file@v1
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr)' if: '!(matrix.skip)'
- if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.job == ''lint'' || matrix.job == ''test'' || matrix.job == ''bench'')' - if: '!(matrix.skip) && (matrix.job == ''lint'' || matrix.job == ''test'' || matrix.job == ''bench'')'
name: Install Deno name: Install Deno
uses: denoland/setup-deno@v1 uses: denoland/setup-deno@v2
with: with:
deno-version: v1.x deno-version: v1.x
- name: Install Python - name: Install Python
uses: actions/setup-python@v4 uses: actions/setup-python@v5
with: with:
python-version: 3.11 python-version: 3.11
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.job != ''lint'')' if: '!(matrix.skip) && (matrix.job != ''lint'' && (matrix.os != ''linux'' || matrix.arch != ''aarch64''))'
- name: Remove unused versions of Python - name: Remove unused versions of Python
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.job != ''lint'' && (startsWith(matrix.os, ''windows'')))' if: '!(matrix.skip) && (matrix.job != ''lint'' && (matrix.os != ''linux'' || matrix.arch != ''aarch64'') && (matrix.os == ''windows''))'
shell: pwsh shell: pwsh
run: |- run: |-
$env:PATH -split ";" | $env:PATH -split ";" |
Where-Object { Test-Path "$_\python.exe" } | Where-Object { Test-Path "$_\python.exe" } |
Select-Object -Skip 1 | Select-Object -Skip 1 |
ForEach-Object { Move-Item "$_" "$_.disabled" } ForEach-Object { Move-Item "$_" "$_.disabled" }
- if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.job == ''bench'')' - if: '!(matrix.skip) && (matrix.job == ''bench'' || matrix.job == ''test'')'
name: Install Node name: Install Node
uses: actions/setup-node@v3 uses: actions/setup-node@v4
with: with:
node-version: 18 node-version: 18
- name: Install protoc
uses: arduino/setup-protoc@v2
with:
version: '21.12'
repo-token: '${{ secrets.GITHUB_TOKEN }}'
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr)'
- if: |- - if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (matrix.profile == 'release' && !(matrix.skip) && (matrix.profile == 'release' &&
matrix.job == 'test' && matrix.job == 'test' &&
github.repository == 'denoland/deno' && github.repository == 'denoland/deno' &&
(github.ref == 'refs/heads/main' || (github.ref == 'refs/heads/main' ||
startsWith(github.ref, 'refs/tags/'))) startsWith(github.ref, 'refs/tags/')))
name: Authenticate with Google Cloud name: Authenticate with Google Cloud
uses: google-github-actions/auth@v1 uses: google-github-actions/auth@v2
with: with:
project_id: denoland project_id: denoland
credentials_json: '${{ secrets.GCP_SA_KEY }}' credentials_json: '${{ secrets.GCP_SA_KEY }}'
@ -191,165 +214,192 @@ jobs:
create_credentials_file: true create_credentials_file: true
- name: Setup gcloud (unix) - name: Setup gcloud (unix)
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (runner.os != 'Windows' && !(matrix.skip) && (matrix.os != 'windows' &&
matrix.profile == 'release' && matrix.profile == 'release' &&
matrix.job == 'test' && matrix.job == 'test' &&
github.repository == 'denoland/deno' && github.repository == 'denoland/deno' &&
(github.ref == 'refs/heads/main' || (github.ref == 'refs/heads/main' ||
startsWith(github.ref, 'refs/tags/'))) startsWith(github.ref, 'refs/tags/')))
uses: google-github-actions/setup-gcloud@v1 uses: google-github-actions/setup-gcloud@v2
with: with:
project_id: denoland project_id: denoland
- name: Setup gcloud (windows) - name: Setup gcloud (windows)
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (runner.os == 'Windows' && !(matrix.skip) && (matrix.os == 'windows' &&
matrix.profile == 'release' && matrix.profile == 'release' &&
matrix.job == 'test' && matrix.job == 'test' &&
github.repository == 'denoland/deno' && github.repository == 'denoland/deno' &&
(github.ref == 'refs/heads/main' || (github.ref == 'refs/heads/main' ||
startsWith(github.ref, 'refs/tags/'))) startsWith(github.ref, 'refs/tags/')))
uses: google-github-actions/setup-gcloud@v1 uses: google-github-actions/setup-gcloud@v2
env: env:
CLOUDSDK_PYTHON: '${{env.pythonLocation}}\python.exe' CLOUDSDK_PYTHON: '${{env.pythonLocation}}\python.exe'
with: with:
project_id: denoland project_id: denoland
- name: Configure canary build - name: Configure canary build
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (matrix.job == 'test' && !(matrix.skip) && (matrix.job == 'test' &&
matrix.profile == 'release' && matrix.profile == 'release' &&
github.repository == 'denoland/deno' && github.repository == 'denoland/deno' &&
github.ref == 'refs/heads/main') github.ref == 'refs/heads/main')
run: echo "DENO_CANARY=true" >> $GITHUB_ENV run: echo "DENO_CANARY=true" >> $GITHUB_ENV
- if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.use_sysroot)' - if: '!(matrix.skip) && (matrix.use_sysroot)'
name: Set up incremental LTO and sysroot build name: Set up incremental LTO and sysroot build
run: |- run: |-
# Setting up sysroot
export DEBIAN_FRONTEND=noninteractive
# Avoid running man-db triggers, which sometimes takes several minutes # Avoid running man-db triggers, which sometimes takes several minutes
# to complete. # to complete.
sudo apt-get remove --purge -y man-db sudo apt-get -qq remove --purge -y man-db > /dev/null 2> /dev/null
# Remove older clang before we install # Remove older clang before we install
sudo apt-get remove 'clang-12*' 'clang-13*' 'clang-14*' 'clang-15*' 'llvm-12*' 'llvm-13*' 'llvm-14*' 'llvm-15*' 'lld-12*' 'lld-13*' 'lld-14*' 'lld-15*' sudo apt-get -qq remove 'clang-12*' 'clang-13*' 'clang-14*' 'clang-15*' 'clang-16*' 'llvm-12*' 'llvm-13*' 'llvm-14*' 'llvm-15*' 'llvm-16*' 'lld-12*' 'lld-13*' 'lld-14*' 'lld-15*' 'lld-16*' > /dev/null 2> /dev/null
# Install clang-XXX, lld-XXX, and debootstrap. # Install clang-XXX, lld-XXX, and debootstrap.
echo "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-16 main" | echo "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-18 main" |
sudo dd of=/etc/apt/sources.list.d/llvm-toolchain-jammy-16.list sudo dd of=/etc/apt/sources.list.d/llvm-toolchain-jammy-18.list
curl https://apt.llvm.org/llvm-snapshot.gpg.key | curl https://apt.llvm.org/llvm-snapshot.gpg.key |
gpg --dearmor | gpg --dearmor |
sudo dd of=/etc/apt/trusted.gpg.d/llvm-snapshot.gpg sudo dd of=/etc/apt/trusted.gpg.d/llvm-snapshot.gpg
sudo apt-get update sudo apt-get update
# this was unreliable sometimes, so try again if it fails # this was unreliable sometimes, so try again if it fails
sudo apt-get install --no-install-recommends debootstrap clang-16 lld-16 clang-tools-16 clang-format-16 clang-tidy-16 || echo 'Failed. Trying again.' && sudo apt-get clean && sudo apt-get update && sudo apt-get install --no-install-recommends debootstrap clang-16 lld-16 clang-tools-16 clang-format-16 clang-tidy-16 sudo apt-get install --no-install-recommends clang-18 lld-18 clang-tools-18 clang-format-18 clang-tidy-18 || echo 'Failed. Trying again.' && sudo apt-get clean && sudo apt-get update && sudo apt-get install --no-install-recommends clang-18 lld-18 clang-tools-18 clang-format-18 clang-tidy-18
# Fix alternatives # Fix alternatives
(yes '' | sudo update-alternatives --force --all) || true (yes '' | sudo update-alternatives --force --all) > /dev/null 2> /dev/null || true
# Create ubuntu-16.04 sysroot environment, which is used to avoid echo "Decompressing sysroot..."
# depending on a very recent version of glibc. wget -q https://github.com/denoland/deno_sysroot_build/releases/download/sysroot-20240528/sysroot-`uname -m`.tar.xz -O /tmp/sysroot.tar.xz
# `libc6-dev` is required for building any C source files. cd /
# `file` and `make` are needed to build libffi-sys. xzcat /tmp/sysroot.tar.xz | sudo tar -x
# `curl` is needed to build rusty_v8.
sudo debootstrap \
--include=ca-certificates,curl,file,libc6-dev,make \
--no-merged-usr --variant=minbase xenial /sysroot \
http://azure.archive.ubuntu.com/ubuntu
sudo mount --rbind /dev /sysroot/dev sudo mount --rbind /dev /sysroot/dev
sudo mount --rbind /sys /sysroot/sys sudo mount --rbind /sys /sysroot/sys
sudo mount --rbind /home /sysroot/home sudo mount --rbind /home /sysroot/home
sudo mount -t proc /proc /sysroot/proc sudo mount -t proc /proc /sysroot/proc
cd
wget https://github.com/denoland/deno_third_party/raw/master/prebuilt/linux64/libdl/libdl.a echo "Done."
wget https://github.com/denoland/deno_third_party/raw/master/prebuilt/linux64/libdl/libdl.so.2
sudo ln -s libdl.so.2 /sysroot/lib/x86_64-linux-gnu/libdl.so
sudo ln -s libdl.a /sysroot/lib/x86_64-linux-gnu/libdl.a
# Configure the build environment. Both Rust and Clang will produce # Configure the build environment. Both Rust and Clang will produce
# llvm bitcode only, so we can use lld's incremental LTO support. # llvm bitcode only, so we can use lld's incremental LTO support.
cat >> $GITHUB_ENV << __0
# Load the sysroot's env vars
echo "sysroot env:"
cat /sysroot/.env
. /sysroot/.env
# Important notes:
# 1. -ldl seems to be required to avoid a failure in FFI tests. This flag seems
# to be in the Rust default flags in the smoketest, so uncertain why we need
# to be explicit here.
# 2. RUSTFLAGS and RUSTDOCFLAGS must be specified, otherwise the doctests fail
# to build because the object formats are not compatible.
echo "
CARGO_PROFILE_BENCH_INCREMENTAL=false CARGO_PROFILE_BENCH_INCREMENTAL=false
CARGO_PROFILE_BENCH_LTO=false CARGO_PROFILE_BENCH_LTO=false
CARGO_PROFILE_RELEASE_INCREMENTAL=false CARGO_PROFILE_RELEASE_INCREMENTAL=false
CARGO_PROFILE_RELEASE_LTO=false CARGO_PROFILE_RELEASE_LTO=false
RUSTFLAGS<<__1 RUSTFLAGS<<__1
-C linker-plugin-lto=true -C linker-plugin-lto=true
-C linker=clang-16 -C linker=clang-18
-C link-arg=-fuse-ld=lld-16 -C link-arg=-fuse-ld=lld-18
-C link-arg=--sysroot=/sysroot
-C link-arg=-ldl -C link-arg=-ldl
-C link-arg=-Wl,--allow-shlib-undefined -C link-arg=-Wl,--allow-shlib-undefined
-C link-arg=-Wl,--thinlto-cache-dir=$(pwd)/target/release/lto-cache -C link-arg=-Wl,--thinlto-cache-dir=$(pwd)/target/release/lto-cache
-C link-arg=-Wl,--thinlto-cache-policy,cache_size_bytes=700m -C link-arg=-Wl,--thinlto-cache-policy,cache_size_bytes=700m
--cfg tokio_unstable --cfg tokio_unstable
${{ env.RUSTFLAGS }} $RUSTFLAGS
__1 __1
RUSTDOCFLAGS<<__1 RUSTDOCFLAGS<<__1
-C linker-plugin-lto=true -C linker-plugin-lto=true
-C linker=clang-16 -C linker=clang-18
-C link-arg=-fuse-ld=lld-16 -C link-arg=-fuse-ld=lld-18
-C link-arg=--sysroot=/sysroot
-C link-arg=-ldl -C link-arg=-ldl
-C link-arg=-Wl,--allow-shlib-undefined -C link-arg=-Wl,--allow-shlib-undefined
-C link-arg=-Wl,--thinlto-cache-dir=$(pwd)/target/release/lto-cache -C link-arg=-Wl,--thinlto-cache-dir=$(pwd)/target/release/lto-cache
-C link-arg=-Wl,--thinlto-cache-policy,cache_size_bytes=700m -C link-arg=-Wl,--thinlto-cache-policy,cache_size_bytes=700m
${{ env.RUSTFLAGS }} --cfg tokio_unstable
$RUSTFLAGS
__1 __1
CC=clang-16 CC=/usr/bin/clang-18
CFLAGS=-flto=thin --sysroot=/sysroot CFLAGS=-flto=thin $CFLAGS
__0 " > $GITHUB_ENV
- name: Remove macOS cURL --ipv4 flag
run: |-
curl --version
which curl
cat /etc/hosts
rm ~/.curlrc || true
if: '!(matrix.skip) && (matrix.os == ''macos'')'
- name: Install macOS aarch64 lld
run: ./tools/install_prebuilt.js ld64.lld
if: '!(matrix.skip) && (matrix.os == ''macos'' && matrix.arch == ''aarch64'')'
- name: Install rust-codesign
run: |-
./tools/install_prebuilt.js rcodesign
echo $GITHUB_WORKSPACE/third_party/prebuilt/mac >> $GITHUB_PATH
if: '!(matrix.skip) && (matrix.os == ''macos'')'
- name: Log versions - name: Log versions
run: |- run: |-
python --version echo '*** Python'
rustc --version command -v python && python --version || echo 'No python found or bad executable'
cargo --version echo '*** Rust'
which dpkg && dpkg -l command -v rustc && rustc --version || echo 'No rustc found or bad executable'
if [[ "${{ matrix.job }}" == "lint" ]] || [[ "${{ matrix.job }}" == "test" ]]; then echo '*** Cargo'
deno --version command -v cargo && cargo --version || echo 'No cargo found or bad executable'
fi echo '*** Deno'
if [ "${{ matrix.job }}" == "bench" ] command -v deno && deno --version || echo 'No deno found or bad executable'
then echo '*** Node'
node -v command -v node && node --version || echo 'No node found or bad executable'
./tools/install_prebuilt.js wrk hyperfine echo '*** Installed packages'
fi command -v dpkg && dpkg -l || echo 'No dpkg found or bad executable'
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr)' if: '!(matrix.skip)'
- name: Install benchmark tools
if: '!(matrix.skip) && (matrix.job == ''bench'')'
run: ./tools/install_prebuilt.js wrk hyperfine
- name: Cache Cargo home - name: Cache Cargo home
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: |- path: |-
~/.cargo/registry/index ~/.cargo/registry/index
~/.cargo/registry/cache ~/.cargo/registry/cache
key: '51-cargo-home-${{ matrix.os }}-${{ hashFiles(''Cargo.lock'') }}' key: '22-cargo-home-${{ matrix.os }}-${{ matrix.arch }}-${{ hashFiles(''Cargo.lock'') }}'
restore-keys: '51-cargo-home-${{ matrix.os }}' restore-keys: '22-cargo-home-${{ matrix.os }}-${{ matrix.arch }}'
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr)' if: '!(matrix.skip)'
- name: Restore cache build output (PR) - name: Restore cache build output (PR)
uses: actions/cache/restore@v3 uses: actions/cache/restore@v4
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (github.ref != ''refs/heads/main'' && !startsWith(github.ref, ''refs/tags/''))' if: '!(matrix.skip) && (github.ref != ''refs/heads/main'' && !startsWith(github.ref, ''refs/tags/''))'
with: with:
path: |- path: |-
./target ./target
!./target/*/gn_out !./target/*/gn_out
!./target/*/gn_root
!./target/*/*.zip !./target/*/*.zip
!./target/*/*.tar.gz !./target/*/*.tar.gz
key: never_saved key: never_saved
restore-keys: '51-cargo-target-${{ matrix.os }}-${{ matrix.profile }}-${{ matrix.job }}-' restore-keys: '22-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-'
- name: Apply and update mtime cache - name: Apply and update mtime cache
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (!startsWith(github.ref, ''refs/tags/''))' if: '!(matrix.skip) && (!startsWith(github.ref, ''refs/tags/''))'
uses: ./.github/mtime_cache uses: ./.github/mtime_cache
with: with:
cache-path: ./target cache-path: ./target
- name: test_format.js - name: test_format.js
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.job == ''lint'' && startsWith(matrix.os, ''ubuntu''))' if: '!(matrix.skip) && (matrix.job == ''lint'' && matrix.os == ''linux'')'
run: deno run --unstable --allow-write --allow-read --allow-run --allow-net ./tools/format.js --check run: deno run --allow-write --allow-read --allow-run --allow-net ./tools/format.js --check
- name: Lint PR title - name: Lint PR title
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.job == ''lint'' && github.event_name == ''pull_request'' && startsWith(matrix.os, ''ubuntu''))' if: '!(matrix.skip) && (matrix.job == ''lint'' && github.event_name == ''pull_request'' && matrix.os == ''linux'')'
env: env:
PR_TITLE: '${{ github.event.pull_request.title }}' PR_TITLE: '${{ github.event.pull_request.title }}'
run: deno run ./tools/verify_pr_title.js "$PR_TITLE" run: deno run ./tools/verify_pr_title.js "$PR_TITLE"
- name: lint.js - name: lint.js
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.job == ''lint'')' if: '!(matrix.skip) && (matrix.job == ''lint'')'
run: deno run --unstable --allow-write --allow-read --allow-run --allow-net ./tools/lint.js run: deno run --allow-write --allow-read --allow-run --allow-net ./tools/lint.js
- name: jsdoc_checker.js
if: '!(matrix.skip) && (matrix.job == ''lint'')'
run: deno run --allow-read --allow-env --allow-sys ./tools/jsdoc_checker.js
- name: node_compat/setup.ts --check - name: node_compat/setup.ts --check
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.job == ''lint'' && startsWith(matrix.os, ''ubuntu''))' if: '!(matrix.skip) && (matrix.job == ''lint'' && matrix.os == ''linux'')'
run: deno run --allow-write --allow-read --allow-run=git ./tools/node_compat/setup.ts --check run: deno run --allow-write --allow-read --allow-run=git ./tests/node_compat/runner/setup.ts --check
- name: Build debug - name: Build debug
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.job == ''test'' && matrix.profile == ''debug'')' if: '!(matrix.skip) && (matrix.job == ''test'' && matrix.profile == ''debug'')'
run: |- run: |-
df -h df -h
cargo build --locked --all-targets cargo build --locked --all-targets
@ -358,154 +408,151 @@ jobs:
CARGO_PROFILE_DEV_DEBUG: 0 CARGO_PROFILE_DEV_DEBUG: 0
- name: Build release - name: Build release
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && ((matrix.job == 'test' || matrix.job == 'bench') && !(matrix.skip) && ((matrix.job == 'test' || matrix.job == 'bench') &&
matrix.profile == 'release' && (matrix.use_sysroot || matrix.profile == 'release' && (matrix.use_sysroot ||
(github.repository == 'denoland/deno' && github.repository == 'denoland/deno'))
(github.ref == 'refs/heads/main' ||
startsWith(github.ref, 'refs/tags/')))))
run: |- run: |-
df -h df -h
cargo build --release --locked --all-targets cargo build --release --locked --all-targets
df -h df -h
- name: Check deno binary
if: '!(matrix.skip) && (matrix.job == ''test'')'
run: 'target/${{ matrix.profile }}/deno eval "console.log(1+2)" | grep 3'
env:
NO_COLOR: 1
- name: Check deno binary (in sysroot)
if: '!(matrix.skip) && (matrix.job == ''test'' && matrix.use_sysroot)'
run: 'sudo chroot /sysroot "$(pwd)/target/${{ matrix.profile }}/deno" --version'
- name: Upload PR artifact (linux) - name: Upload PR artifact (linux)
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (matrix.job == 'test' && !(matrix.skip) && (matrix.job == 'test' &&
matrix.profile == 'release' && (matrix.use_sysroot || matrix.profile == 'release' && (matrix.use_sysroot ||
(github.repository == 'denoland/deno' && (github.repository == 'denoland/deno' &&
(github.ref == 'refs/heads/main' || (github.ref == 'refs/heads/main' ||
startsWith(github.ref, 'refs/tags/'))))) startsWith(github.ref, 'refs/tags/')))))
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v4
with: with:
name: 'deno-${{ github.event.number }}' name: 'deno-${{ matrix.os }}-${{ matrix.arch }}-${{ github.event.number }}'
path: target/release/deno path: target/release/deno
- name: Pre-release (linux) - name: Pre-release (linux)
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (startsWith(matrix.os, 'ubuntu') && !(matrix.skip) && (matrix.os == 'linux' &&
matrix.job == 'test' && matrix.job == 'test' &&
matrix.profile == 'release' && matrix.profile == 'release' &&
github.repository == 'denoland/deno') github.repository == 'denoland/deno')
run: |- run: |-
cd target/release cd target/release
zip -r deno-x86_64-unknown-linux-gnu.zip deno zip -r deno-${{ matrix.arch }}-unknown-linux-gnu.zip deno
shasum -a 256 deno-${{ matrix.arch }}-unknown-linux-gnu.zip > deno-${{ matrix.arch }}-unknown-linux-gnu.zip.sha256sum
strip denort
zip -r denort-${{ matrix.arch }}-unknown-linux-gnu.zip denort
shasum -a 256 denort-${{ matrix.arch }}-unknown-linux-gnu.zip > denort-${{ matrix.arch }}-unknown-linux-gnu.zip.sha256sum
./deno types > lib.deno.d.ts ./deno types > lib.deno.d.ts
- name: Pre-release (mac) - name: Pre-release (mac)
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (startsWith(matrix.os, 'macOS') && !(matrix.skip) && (matrix.os == 'macos' &&
matrix.job == 'test' && matrix.job == 'test' &&
matrix.profile == 'release' && matrix.profile == 'release' &&
github.repository == 'denoland/deno' && github.repository == 'denoland/deno')
(github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/'))) env:
APPLE_CODESIGN_KEY: '${{ secrets.APPLE_CODESIGN_KEY }}'
APPLE_CODESIGN_PASSWORD: '${{ secrets.APPLE_CODESIGN_PASSWORD }}'
run: |- run: |-
echo "Key is $(echo $APPLE_CODESIGN_KEY | base64 -d | wc -c) bytes"
rcodesign sign target/release/deno --code-signature-flags=runtime --p12-password="$APPLE_CODESIGN_PASSWORD" --p12-file=<(echo $APPLE_CODESIGN_KEY | base64 -d) --entitlements-xml-file=cli/entitlements.plist
cd target/release cd target/release
zip -r deno-x86_64-apple-darwin.zip deno zip -r deno-${{ matrix.arch }}-apple-darwin.zip deno
shasum -a 256 deno-${{ matrix.arch }}-apple-darwin.zip > deno-${{ matrix.arch }}-apple-darwin.zip.sha256sum
strip denort
zip -r denort-${{ matrix.arch }}-apple-darwin.zip denort
shasum -a 256 denort-${{ matrix.arch }}-apple-darwin.zip > denort-${{ matrix.arch }}-apple-darwin.zip.sha256sum
- name: Pre-release (windows) - name: Pre-release (windows)
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (startsWith(matrix.os, 'windows') && !(matrix.skip) && (matrix.os == 'windows' &&
matrix.job == 'test' && matrix.job == 'test' &&
matrix.profile == 'release' && matrix.profile == 'release' &&
github.repository == 'denoland/deno' && github.repository == 'denoland/deno')
(github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/')))
shell: pwsh shell: pwsh
run: Compress-Archive -CompressionLevel Optimal -Force -Path target/release/deno.exe -DestinationPath target/release/deno-x86_64-pc-windows-msvc.zip run: |-
- name: Upload canary to dl.deno.land (unix) Compress-Archive -CompressionLevel Optimal -Force -Path target/release/deno.exe -DestinationPath target/release/deno-${{ matrix.arch }}-pc-windows-msvc.zip
Get-FileHash target/release/deno-${{ matrix.arch }}-pc-windows-msvc.zip -Algorithm SHA256 | Format-List > target/release/deno-${{ matrix.arch }}-pc-windows-msvc.zip.sha256sum
Compress-Archive -CompressionLevel Optimal -Force -Path target/release/denort.exe -DestinationPath target/release/denort-${{ matrix.arch }}-pc-windows-msvc.zip
Get-FileHash target/release/denort-${{ matrix.arch }}-pc-windows-msvc.zip -Algorithm SHA256 | Format-List > target/release/denort-${{ matrix.arch }}-pc-windows-msvc.zip.sha256sum
- name: Upload canary to dl.deno.land
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (runner.os != 'Windows' && !(matrix.skip) && (matrix.job == 'test' &&
matrix.job == 'test' &&
matrix.profile == 'release' && matrix.profile == 'release' &&
github.repository == 'denoland/deno' && github.repository == 'denoland/deno' &&
github.ref == 'refs/heads/main') github.ref == 'refs/heads/main')
run: 'gsutil -h "Cache-Control: public, max-age=3600" cp ./target/release/*.zip gs://dl.deno.land/canary/$(git rev-parse HEAD)/' run: |-
- name: Upload canary to dl.deno.land (windows) gsutil -h "Cache-Control: public, max-age=3600" cp ./target/release/*.zip gs://dl.deno.land/canary/$(git rev-parse HEAD)/
if: |- gsutil -h "Cache-Control: public, max-age=3600" cp ./target/release/*.sha256sum gs://dl.deno.land/canary/$(git rev-parse HEAD)/
!(github.event_name == 'pull_request' && matrix.skip_pr) && (runner.os == 'Windows' && echo ${{ github.sha }} > canary-latest.txt
matrix.job == 'test' && gsutil -h "Cache-Control: no-cache" cp canary-latest.txt gs://dl.deno.land/canary-$(rustc -vV | sed -n "s|host: ||p")-latest.txt
matrix.profile == 'release' &&
github.repository == 'denoland/deno' &&
github.ref == 'refs/heads/main')
env:
CLOUDSDK_PYTHON: '${{env.pythonLocation}}\python.exe'
run: 'gsutil -h "Cache-Control: public, max-age=3600" cp ./target/release/*.zip gs://dl.deno.land/canary/$(git rev-parse HEAD)/'
- name: Autobahn testsuite - name: Autobahn testsuite
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (matrix.job == 'test' && matrix.profile == 'release' && !(matrix.skip) && ((matrix.os == 'linux' && matrix.arch != 'aarch64') &&
!startsWith(github.ref, 'refs/tags/') && startsWith(matrix.os, 'ubuntu')) matrix.job == 'test' &&
run: target/release/deno run -A --unstable ext/websocket/autobahn/fuzzingclient.js matrix.profile == 'release' &&
- name: Test debug !startsWith(github.ref, 'refs/tags/'))
run: target/release/deno run -A --config tests/config/deno.json ext/websocket/autobahn/fuzzingclient.js
- name: 'Test (full, debug)'
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (matrix.job == 'test' && matrix.profile == 'debug' && !(matrix.skip) && (matrix.job == 'test' &&
!startsWith(github.ref, 'refs/tags/') && startsWith(matrix.os, 'ubuntu')) matrix.profile == 'debug' &&
!startsWith(github.ref, 'refs/tags/') &&
matrix.os == 'linux')
run: cargo test --locked run: cargo test --locked
env: env:
CARGO_PROFILE_DEV_DEBUG: 0 CARGO_PROFILE_DEV_DEBUG: 0
- name: Test debug (fast) - name: 'Test (fast, debug)'
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (matrix.job == 'test' && matrix.profile == 'debug' && !(matrix.skip) && (matrix.job == 'test' &&
!startsWith(matrix.os, 'ubuntu')) matrix.profile == 'debug' &&
(startsWith(github.ref, 'refs/tags/') || matrix.os != 'linux'))
run: |- run: |-
cargo test --locked --lib cargo test --locked --lib
cargo test --locked --test '*' cargo test --locked --tests
env: env:
CARGO_PROFILE_DEV_DEBUG: 0 CARGO_PROFILE_DEV_DEBUG: 0
- name: Test examples debug - name: Test (release)
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.job == ''test'' && matrix.profile == ''debug'')'
run: |-
cargo run -p deno_runtime --example extension_with_esm
cargo run -p deno_runtime --example extension_with_esm --features include_js_files_for_snapshotting
env:
CARGO_PROFILE_DEV_DEBUG: 0
- name: Test release
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (matrix.job == 'test' && matrix.profile == 'release' && !(matrix.skip) && (matrix.job == 'test' &&
matrix.profile == 'release' &&
(matrix.use_sysroot || ( (matrix.use_sysroot || (
github.repository == 'denoland/deno' && github.repository == 'denoland/deno' &&
github.ref == 'refs/heads/main' && !startsWith(github.ref, 'refs/tags/')))) !startsWith(github.ref, 'refs/tags/'))))
run: cargo test --release --locked run: cargo test --release --locked
- name: Check deno binary
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.profile == ''release'' && startsWith(github.ref, ''refs/tags/''))'
run: target/release/deno eval "console.log(1+2)" | grep 3
env:
NO_COLOR: 1
- name: Check deno binary (in sysroot)
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.profile == ''release'' && matrix.use_sysroot)'
run: sudo chroot /sysroot "$(pwd)/target/release/deno" --version
- name: Configure hosts file for WPT - name: Configure hosts file for WPT
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.wpt)' if: '!(matrix.skip) && (matrix.wpt)'
run: ./wpt make-hosts-file | sudo tee -a /etc/hosts run: ./wpt make-hosts-file | sudo tee -a /etc/hosts
working-directory: test_util/wpt/ working-directory: tests/wpt/suite/
- name: Run web platform tests (debug) - name: Run web platform tests (debug)
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.wpt && matrix.profile == ''debug'')' if: '!(matrix.skip) && (matrix.wpt && matrix.profile == ''debug'')'
env: env:
DENO_BIN: ./target/debug/deno DENO_BIN: ./target/debug/deno
run: |- run: |-
deno run --allow-env --allow-net --allow-read --allow-run \ deno run -A --lock=tools/deno.lock.json --config tests/config/deno.json\
--allow-write --unstable \ ./tests/wpt/wpt.ts setup
--lock=tools/deno.lock.json \ deno run -A --lock=tools/deno.lock.json --config tests/config/deno.json\
./tools/wpt.ts setup ./tests/wpt/wpt.ts run --quiet --binary="$DENO_BIN"
deno run --allow-env --allow-net --allow-read --allow-run \
--allow-write --unstable \
--lock=tools/deno.lock.json \
./tools/wpt.ts run --quiet --binary="$DENO_BIN"
- name: Run web platform tests (release) - name: Run web platform tests (release)
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.wpt && matrix.profile == ''release'')' if: '!(matrix.skip) && (matrix.wpt && matrix.profile == ''release'')'
env: env:
DENO_BIN: ./target/release/deno DENO_BIN: ./target/release/deno
run: |- run: |-
deno run --allow-env --allow-net --allow-read --allow-run \ deno run -A --lock=tools/deno.lock.json --config tests/config/deno.json\
--allow-write --unstable \ ./tests/wpt/wpt.ts setup
--lock=tools/deno.lock.json \ deno run -A --lock=tools/deno.lock.json --config tests/config/deno.json\
./tools/wpt.ts setup ./tests/wpt/wpt.ts run --quiet --release \
deno run --allow-env --allow-net --allow-read --allow-run \
--allow-write --unstable \
--lock=tools/deno.lock.json \
./tools/wpt.ts run --quiet --release \
--binary="$DENO_BIN" \ --binary="$DENO_BIN" \
--json=wpt.json \ --json=wpt.json \
--wptreport=wptreport.json --wptreport=wptreport.json
- name: Upload wpt results to dl.deno.land - name: Upload wpt results to dl.deno.land
continue-on-error: true continue-on-error: true
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (matrix.wpt && !(matrix.skip) && (matrix.wpt &&
runner.os == 'Linux' && matrix.os == 'linux' &&
matrix.profile == 'release' && matrix.profile == 'release' &&
github.repository == 'denoland/deno' && github.repository == 'denoland/deno' &&
github.ref == 'refs/heads/main' && !startsWith(github.ref, 'refs/tags/')) github.ref == 'refs/heads/main' && !startsWith(github.ref, 'refs/tags/'))
@ -518,8 +565,8 @@ jobs:
- name: Upload wpt results to wpt.fyi - name: Upload wpt results to wpt.fyi
continue-on-error: true continue-on-error: true
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (matrix.wpt && !(matrix.skip) && (matrix.wpt &&
runner.os == 'Linux' && matrix.os == 'linux' &&
matrix.profile == 'release' && matrix.profile == 'release' &&
github.repository == 'denoland/deno' && github.repository == 'denoland/deno' &&
github.ref == 'refs/heads/main' && !startsWith(github.ref, 'refs/tags/')) github.ref == 'refs/heads/main' && !startsWith(github.ref, 'refs/tags/'))
@ -531,11 +578,11 @@ jobs:
./target/release/deno run --allow-all --lock=tools/deno.lock.json \ ./target/release/deno run --allow-all --lock=tools/deno.lock.json \
./tools/upload_wptfyi.js $(git rev-parse HEAD) --ghstatus ./tools/upload_wptfyi.js $(git rev-parse HEAD) --ghstatus
- name: Run benchmarks - name: Run benchmarks
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.job == ''bench'' && !startsWith(github.ref, ''refs/tags/''))' if: '!(matrix.skip) && (matrix.job == ''bench'' && !startsWith(github.ref, ''refs/tags/''))'
run: cargo bench --locked run: cargo bench --locked
- name: Post Benchmarks - name: Post Benchmarks
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (matrix.job == 'bench' && !(matrix.skip) && (matrix.job == 'bench' &&
github.repository == 'denoland/deno' && github.repository == 'denoland/deno' &&
github.ref == 'refs/heads/main' && !startsWith(github.ref, 'refs/tags/')) github.ref == 'refs/heads/main' && !startsWith(github.ref, 'refs/tags/'))
env: env:
@ -544,8 +591,7 @@ jobs:
git clone --depth 1 --branch gh-pages \ git clone --depth 1 --branch gh-pages \
https://${DENOBOT_PAT}@github.com/denoland/benchmark_data.git \ https://${DENOBOT_PAT}@github.com/denoland/benchmark_data.git \
gh-pages gh-pages
./target/release/deno run --allow-all --unstable \ ./target/release/deno run --allow-all ./tools/build_benchmark_jsons.js --release
./tools/build_benchmark_jsons.js --release
cd gh-pages cd gh-pages
git config user.email "propelml@gmail.com" git config user.email "propelml@gmail.com"
git config user.name "denobot" git config user.name "denobot"
@ -553,36 +599,41 @@ jobs:
git commit --message "Update benchmarks" git commit --message "Update benchmarks"
git push origin gh-pages git push origin gh-pages
- name: Build product size info - name: Build product size info
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.job != ''lint'' && matrix.profile != ''debug'' && github.repository == ''denoland/deno'' && (github.ref == ''refs/heads/main'' || startsWith(github.ref, ''refs/tags/'')))' if: '!(matrix.skip) && (matrix.job != ''lint'' && matrix.profile != ''debug'' && github.repository == ''denoland/deno'' && (github.ref == ''refs/heads/main'' || startsWith(github.ref, ''refs/tags/'')))'
run: |- run: |-
du -hd1 "./target/${{ matrix.profile }}" du -hd1 "./target/${{ matrix.profile }}"
du -ha "./target/${{ matrix.profile }}/deno" du -ha "./target/${{ matrix.profile }}/deno"
du -ha "./target/${{ matrix.profile }}/denort"
- name: Worker info - name: Worker info
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && (matrix.job == ''bench'')' if: '!(matrix.skip) && (matrix.job == ''bench'')'
run: |- run: |-
cat /proc/cpuinfo cat /proc/cpuinfo
cat /proc/meminfo cat /proc/meminfo
- name: Upload release to dl.deno.land (unix) - name: Upload release to dl.deno.land (unix)
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (runner.os != 'Windows' && !(matrix.skip) && (matrix.os != 'windows' &&
matrix.job == 'test' && matrix.job == 'test' &&
matrix.profile == 'release' && matrix.profile == 'release' &&
github.repository == 'denoland/deno' && github.repository == 'denoland/deno' &&
startsWith(github.ref, 'refs/tags/')) startsWith(github.ref, 'refs/tags/'))
run: 'gsutil -h "Cache-Control: public, max-age=3600" cp ./target/release/*.zip gs://dl.deno.land/release/${GITHUB_REF#refs/*/}/' run: |-
gsutil -h "Cache-Control: public, max-age=3600" cp ./target/release/*.zip gs://dl.deno.land/release/${GITHUB_REF#refs/*/}/
gsutil -h "Cache-Control: public, max-age=3600" cp ./target/release/*.sha256sum gs://dl.deno.land/release/${GITHUB_REF#refs/*/}/
- name: Upload release to dl.deno.land (windows) - name: Upload release to dl.deno.land (windows)
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (runner.os == 'Windows' && !(matrix.skip) && (matrix.os == 'windows' &&
matrix.job == 'test' && matrix.job == 'test' &&
matrix.profile == 'release' && matrix.profile == 'release' &&
github.repository == 'denoland/deno' && github.repository == 'denoland/deno' &&
startsWith(github.ref, 'refs/tags/')) startsWith(github.ref, 'refs/tags/'))
env: env:
CLOUDSDK_PYTHON: '${{env.pythonLocation}}\python.exe' CLOUDSDK_PYTHON: '${{env.pythonLocation}}\python.exe'
run: 'gsutil -h "Cache-Control: public, max-age=3600" cp ./target/release/*.zip gs://dl.deno.land/release/${GITHUB_REF#refs/*/}/' run: |-
gsutil -h "Cache-Control: public, max-age=3600" cp ./target/release/*.zip gs://dl.deno.land/release/${GITHUB_REF#refs/*/}/
gsutil -h "Cache-Control: public, max-age=3600" cp ./target/release/*.sha256sum gs://dl.deno.land/release/${GITHUB_REF#refs/*/}/
- name: Create release notes - name: Create release notes
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (matrix.job == 'test' && !(matrix.skip) && (matrix.job == 'test' &&
matrix.profile == 'release' && matrix.profile == 'release' &&
github.repository == 'denoland/deno' && github.repository == 'denoland/deno' &&
startsWith(github.ref, 'refs/tags/')) startsWith(github.ref, 'refs/tags/'))
@ -592,7 +643,7 @@ jobs:
- name: Upload release to GitHub - name: Upload release to GitHub
uses: softprops/action-gh-release@v0.1.15 uses: softprops/action-gh-release@v0.1.15
if: |- if: |-
!(github.event_name == 'pull_request' && matrix.skip_pr) && (matrix.job == 'test' && !(matrix.skip) && (matrix.job == 'test' &&
matrix.profile == 'release' && matrix.profile == 'release' &&
github.repository == 'denoland/deno' && github.repository == 'denoland/deno' &&
startsWith(github.ref, 'refs/tags/')) startsWith(github.ref, 'refs/tags/'))
@ -601,22 +652,40 @@ jobs:
with: with:
files: |- files: |-
target/release/deno-x86_64-pc-windows-msvc.zip target/release/deno-x86_64-pc-windows-msvc.zip
target/release/deno-x86_64-pc-windows-msvc.zip.sha256sum
target/release/denort-x86_64-pc-windows-msvc.zip
target/release/denort-x86_64-pc-windows-msvc.zip.sha256sum
target/release/deno-x86_64-unknown-linux-gnu.zip target/release/deno-x86_64-unknown-linux-gnu.zip
target/release/deno-x86_64-unknown-linux-gnu.zip.sha256sum
target/release/denort-x86_64-unknown-linux-gnu.zip
target/release/denort-x86_64-unknown-linux-gnu.zip.sha256sum
target/release/deno-x86_64-apple-darwin.zip target/release/deno-x86_64-apple-darwin.zip
target/release/deno-x86_64-apple-darwin.zip.sha256sum
target/release/denort-x86_64-apple-darwin.zip
target/release/denort-x86_64-apple-darwin.zip.sha256sum
target/release/deno-aarch64-unknown-linux-gnu.zip
target/release/deno-aarch64-unknown-linux-gnu.zip.sha256sum
target/release/denort-aarch64-unknown-linux-gnu.zip
target/release/denort-aarch64-unknown-linux-gnu.zip.sha256sum
target/release/deno-aarch64-apple-darwin.zip
target/release/deno-aarch64-apple-darwin.zip.sha256sum
target/release/denort-aarch64-apple-darwin.zip
target/release/denort-aarch64-apple-darwin.zip.sha256sum
target/release/deno_src.tar.gz target/release/deno_src.tar.gz
target/release/lib.deno.d.ts target/release/lib.deno.d.ts
body_path: target/release/release-notes.md body_path: target/release/release-notes.md
draft: true draft: true
- name: Save cache build output (main) - name: Save cache build output (main)
uses: actions/cache/save@v3 uses: actions/cache/save@v4
if: '!(github.event_name == ''pull_request'' && matrix.skip_pr) && ((matrix.job == ''test'' || matrix.job == ''lint'') && github.ref == ''refs/heads/main'')' if: '!(matrix.skip) && ((matrix.job == ''test'' || matrix.job == ''lint'') && github.ref == ''refs/heads/main'')'
with: with:
path: |- path: |-
./target ./target
!./target/*/gn_out !./target/*/gn_out
!./target/*/*.zip !./target/*/*.zip
!./target/*/*.sha256sum
!./target/*/*.tar.gz !./target/*/*.tar.gz
key: '51-cargo-target-${{ matrix.os }}-${{ matrix.profile }}-${{ matrix.job }}-${{ github.sha }}' key: '22-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-${{ github.sha }}'
publish-canary: publish-canary:
name: publish canary name: publish canary
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
@ -625,14 +694,14 @@ jobs:
if: github.repository == 'denoland/deno' && github.ref == 'refs/heads/main' if: github.repository == 'denoland/deno' && github.ref == 'refs/heads/main'
steps: steps:
- name: Authenticate with Google Cloud - name: Authenticate with Google Cloud
uses: google-github-actions/auth@v1 uses: google-github-actions/auth@v2
with: with:
project_id: denoland project_id: denoland
credentials_json: '${{ secrets.GCP_SA_KEY }}' credentials_json: '${{ secrets.GCP_SA_KEY }}'
export_environment_variables: true export_environment_variables: true
create_credentials_file: true create_credentials_file: true
- name: Setup gcloud - name: Setup gcloud
uses: google-github-actions/setup-gcloud@v1 uses: google-github-actions/setup-gcloud@v2
with: with:
project_id: denoland project_id: denoland
- name: Upload canary version file to dl.deno.land - name: Upload canary version file to dl.deno.land

View file

@ -0,0 +1,62 @@
name: promote_to_release
on:
workflow_dispatch:
inputs:
releaseKind:
description: 'Kind of release'
type: choice
options:
- rc
- lts
required: true
commitHash:
description: Commit to promote to release
required: true
jobs:
promote-to-release:
name: Promote to Release
runs-on: macOS-latest
if: github.repository == 'denoland/deno'
steps:
- name: Clone repository
uses: actions/checkout@v4
with:
token: ${{ secrets.DENOBOT_PAT }}
submodules: recursive
- name: Authenticate with Google Cloud
uses: google-github-actions/auth@v1
with:
project_id: denoland
credentials_json: ${{ secrets.GCP_SA_KEY }}
export_environment_variables: true
create_credentials_file: true
- name: Setup gcloud
uses: google-github-actions/setup-gcloud@v1
with:
project_id: denoland
- name: Install deno
uses: denoland/setup-deno@v2
with:
deno-version: v1.x
- name: Install rust-codesign
run: |-
./tools/install_prebuilt.js rcodesign
echo $GITHUB_WORKSPACE/third_party/prebuilt/mac >> $GITHUB_PATH
- name: Promote to Release
env:
APPLE_CODESIGN_KEY: '${{ secrets.APPLE_CODESIGN_KEY }}'
APPLE_CODESIGN_PASSWORD: '${{ secrets.APPLE_CODESIGN_PASSWORD }}'
run: |
deno run -A ./tools/release/promote_to_release.ts ${{github.event.inputs.releaseKind}} ${{github.event.inputs.commitHash}}
- name: Upload archives to dl.deno.land
run: |
gsutil -h "Cache-Control: public, max-age=3600" cp ./*.zip gs://dl.deno.land/release/$(cat release-${{github.event.inputs.releaseKind}}-latest.txt)/
gsutil -h "Cache-Control: no-cache" cp release-${{github.event.inputs.releaseKind}}-latest.txt gs://dl.deno.land/release-${{github.event.inputs.releaseKind}}-latest.txt

View file

@ -8,9 +8,9 @@ on:
default: 'patch' default: 'patch'
type: choice type: choice
options: options:
- patch - patch
- minor - minor
- major - major
required: true required: true
jobs: jobs:
@ -31,14 +31,12 @@ jobs:
git config --global fetch.parallel 32 git config --global fetch.parallel 32
- name: Clone repository - name: Clone repository
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Install deno - name: Install deno
uses: denoland/setup-deno@v1 uses: denoland/setup-deno@v2
with: with:
# use a recent version instead of the latest version in case deno-version: v1.x
# the latest version ever has issues that breaks publishing
deno-version: v1.31.3
- name: Create Gist URL - name: Create Gist URL
env: env:

View file

@ -8,9 +8,9 @@ on:
default: 'patch' default: 'patch'
type: choice type: choice
options: options:
- patch - patch
- minor - minor
- major - major
required: true required: true
jobs: jobs:
@ -31,7 +31,7 @@ jobs:
git config --global fetch.parallel 32 git config --global fetch.parallel 32
- name: Clone repository - name: Clone repository
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
token: ${{ secrets.DENOBOT_PAT }} token: ${{ secrets.DENOBOT_PAT }}
submodules: recursive submodules: recursive
@ -39,11 +39,9 @@ jobs:
- uses: dsherret/rust-toolchain-file@v1 - uses: dsherret/rust-toolchain-file@v1
- name: Install deno - name: Install deno
uses: denoland/setup-deno@v1 uses: denoland/setup-deno@v2
with: with:
# use a recent version instead of the latest version in case deno-version: v1.x
# the latest version ever has issues that breaks publishing
deno-version: v1.31.3
- name: Run version bump - name: Run version bump
run: | run: |

View file

@ -24,20 +24,20 @@ jobs:
steps: steps:
- name: Clone repository - name: Clone repository
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
persist-credentials: false persist-credentials: false
- name: Setup Deno - name: Setup Deno
uses: denoland/setup-deno@v1 uses: denoland/setup-deno@v2
with: with:
deno-version: ${{ matrix.deno-version }} deno-version: ${{ matrix.deno-version }}
- name: Install Python - name: Install Python
uses: actions/setup-python@v4 uses: actions/setup-python@v5
with: with:
python-version: "3.11" python-version: '3.11'
- name: Log versions - name: Log versions
run: | run: |
@ -45,7 +45,7 @@ jobs:
deno --version deno --version
- name: Switch WPT submodule to epochs/daily - name: Switch WPT submodule to epochs/daily
working-directory: test_util/wpt/ working-directory: tests/wpt/suite/
shell: bash shell: bash
run: | run: |
git remote set-branches origin '*' git remote set-branches origin '*'
@ -55,24 +55,22 @@ jobs:
- name: Configure hosts file for WPT (unix) - name: Configure hosts file for WPT (unix)
if: runner.os != 'Windows' if: runner.os != 'Windows'
working-directory: test_util/wpt/ working-directory: tests/wpt/suite/
run: ./wpt make-hosts-file | sudo tee -a /etc/hosts run: ./wpt make-hosts-file | sudo tee -a /etc/hosts
- name: Configure hosts file for WPT (windows) - name: Configure hosts file for WPT (windows)
if: runner.os == 'Windows' if: runner.os == 'Windows'
working-directory: test_util/wpt/ working-directory: tests/wpt/suite/
run: python wpt make-hosts-file | Out-File $env:SystemRoot\System32\drivers\etc\hosts -Encoding ascii -Append run: python wpt make-hosts-file | Out-File $env:SystemRoot\System32\drivers\etc\hosts -Encoding ascii -Append
- name: Run web platform tests - name: Run web platform tests
shell: bash shell: bash
run: | run: |
deno run --unstable --allow-write --allow-read --allow-net \ deno run -A --lock=tools/deno.lock.json --config=tests/config/deno.json \
--allow-env --allow-run --lock=tools/deno.lock.json \ ./tests/wpt/wpt.ts setup
./tools/wpt.ts setup deno run -A --lock=tools/deno.lock.json --config=tests/config/deno.json \
deno run --unstable --allow-write --allow-read --allow-net \ ./tests/wpt/wpt.ts run \ \
--allow-env --allow-run --lock=tools/deno.lock.json \ --binary=$(which deno) --quiet --release --no-ignore --json=wpt.json --wptreport=wptreport.json --exit-zero
./tools/wpt.ts run \
--binary=$(which deno) --quiet --release --no-ignore --json=wpt.json --wptreport=wptreport.json || true
- name: Upload wpt results to wpt.fyi - name: Upload wpt results to wpt.fyi
env: env:

16
.gitignore vendored
View file

@ -10,11 +10,11 @@
gclient_config.py_entries gclient_config.py_entries
/target/ /target/
/std/hash/_wasm/target /std/hash/_wasm/target
/tools/wpt/manifest.json /tests/wpt/runner/manifest.json
/third_party/ /third_party/
/test_napi/node_modules /tests/napi/node_modules
/test_napi/build /tests/napi/build
/test_napi/third_party_tests/node_modules /tests/napi/third_party_tests/node_modules
# MacOS generated files # MacOS generated files
.DS_Store .DS_Store
@ -25,10 +25,14 @@ gclient_config.py_entries
/flamegraph*.svg /flamegraph*.svg
# WPT generated cert files # WPT generated cert files
/tools/wpt/certs/index.txt* /tests/wpt/runner/certs/index.txt*
/tools/wpt/certs/serial* /tests/wpt/runner/certs/serial*
/ext/websocket/autobahn/reports /ext/websocket/autobahn/reports
# JUnit files produced by deno test --junit # JUnit files produced by deno test --junit
junit.xml junit.xml
# Jupyter files
.ipynb_checkpoints/
Untitled*.ipynb

19
.gitmodules vendored
View file

@ -1,11 +1,16 @@
[submodule "test_util/std"] [submodule "tests/util/std"]
path = test_util/std path = tests/util/std
url = https://github.com/denoland/deno_std url = https://github.com/denoland/deno_std
shallow = true shallow = true
[submodule "test_util/wpt"] [submodule "tests/wpt/suite"]
path = test_util/wpt path = tests/wpt/suite
url = https://github.com/web-platform-tests/wpt.git url = https://github.com/web-platform-tests/wpt.git
shallow = true
[submodule "tools/node_compat/node"] [submodule "tests/node_compat/runner/suite"]
path = tools/node_compat/node path = tests/node_compat/runner/suite
url = https://github.com/denoland/node_test.git url = https://github.com/denoland/node_test.git
shallow = true
[submodule "cli/bench/testdata/lsp_benchdata"]
path = cli/bench/testdata/lsp_benchdata
url = https://github.com/denoland/deno_lsp_benchdata.git
shallow = true

5710
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,18 +1,15 @@
# Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. # Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
[workspace] [workspace]
resolver = "2" resolver = "2"
members = [ members = [
"bench_util", "bench_util",
"cli", "cli",
"cli/napi/sym",
"runtime",
"test_ffi",
"test_napi",
"test_util",
"ext/broadcast_channel", "ext/broadcast_channel",
"ext/cache", "ext/cache",
"ext/canvas",
"ext/console", "ext/console",
"ext/cron",
"ext/crypto", "ext/crypto",
"ext/fetch", "ext/fetch",
"ext/ffi", "ext/ffi",
@ -20,16 +17,26 @@ members = [
"ext/http", "ext/http",
"ext/io", "ext/io",
"ext/kv", "ext/kv",
"ext/napi",
"ext/napi/sym",
"ext/net", "ext/net",
"ext/node", "ext/node",
"ext/url", "ext/url",
"ext/web", "ext/web",
"ext/webgpu",
"ext/webidl", "ext/webidl",
"ext/websocket", "ext/websocket",
"ext/webstorage", "ext/webstorage",
"ext/napi", "resolvers/deno",
"resolvers/node",
"runtime",
"runtime/permissions",
"tests",
"tests/ffi",
"tests/napi",
"tests/util/server",
] ]
exclude = ["test_util/std/hash/_wasm"] exclude = ["tests/util/std/hash/_wasm"]
[workspace.package] [workspace.package]
authors = ["the Deno authors"] authors = ["the Deno authors"]
@ -38,138 +45,190 @@ license = "MIT"
repository = "https://github.com/denoland/deno" repository = "https://github.com/denoland/deno"
[workspace.dependencies] [workspace.dependencies]
deno_ast = { version = "0.28.0", features = ["transpiling"] } deno_ast = { version = "=0.42.2", features = ["transpiling"] }
deno_core = { version = "0.314.2" }
deno_core = "0.204.0" deno_bench_util = { version = "0.168.0", path = "./bench_util" }
deno_lockfile = "=0.23.1"
deno_media_type = { version = "0.1.4", features = ["module_specifier"] }
deno_npm = "=0.25.4"
deno_path_util = "=0.2.1"
deno_permissions = { version = "0.34.0", path = "./runtime/permissions" }
deno_runtime = { version = "0.183.0", path = "./runtime" }
deno_semver = "=0.5.16"
deno_terminal = "0.2.0"
napi_sym = { version = "0.104.0", path = "./ext/napi/sym" }
test_util = { package = "test_server", path = "./tests/util/server" }
deno_runtime = { version = "0.125.0", path = "./runtime" } denokv_proto = "0.8.1"
napi_sym = { version = "0.47.0", path = "./cli/napi/sym" } denokv_remote = "0.8.1"
deno_bench_util = { version = "0.111.0", path = "./bench_util" } # denokv_sqlite brings in bundled sqlite if we don't disable the default features
test_util = { path = "./test_util" } denokv_sqlite = { default-features = false, version = "0.8.2" }
deno_lockfile = "0.15.0"
deno_media_type = { version = "0.1.1", features = ["module_specifier"] }
deno_npm = "0.12.0"
deno_semver = "0.4.0"
# exts # exts
deno_broadcast_channel = { version = "0.111.0", path = "./ext/broadcast_channel" } deno_broadcast_channel = { version = "0.168.0", path = "./ext/broadcast_channel" }
deno_cache = { version = "0.49.0", path = "./ext/cache" } deno_cache = { version = "0.106.0", path = "./ext/cache" }
deno_console = { version = "0.117.0", path = "./ext/console" } deno_canvas = { version = "0.43.0", path = "./ext/canvas" }
deno_crypto = { version = "0.131.0", path = "./ext/crypto" } deno_console = { version = "0.174.0", path = "./ext/console" }
deno_fetch = { version = "0.141.0", path = "./ext/fetch" } deno_cron = { version = "0.54.0", path = "./ext/cron" }
deno_ffi = { version = "0.104.0", path = "./ext/ffi" } deno_crypto = { version = "0.188.0", path = "./ext/crypto" }
deno_fs = { version = "0.27.0", path = "./ext/fs" } deno_fetch = { version = "0.198.0", path = "./ext/fetch" }
deno_http = { version = "0.112.0", path = "./ext/http" } deno_ffi = { version = "0.161.0", path = "./ext/ffi" }
deno_io = { version = "0.27.0", path = "./ext/io" } deno_fs = { version = "0.84.0", path = "./ext/fs" }
deno_net = { version = "0.109.0", path = "./ext/net" } deno_http = { version = "0.172.0", path = "./ext/http" }
deno_node = { version = "0.54.0", path = "./ext/node" } deno_io = { version = "0.84.0", path = "./ext/io" }
deno_kv = { version = "0.25.0", path = "./ext/kv" } deno_kv = { version = "0.82.0", path = "./ext/kv" }
deno_tls = { version = "0.104.0", path = "./ext/tls" } deno_napi = { version = "0.105.0", path = "./ext/napi" }
deno_url = { version = "0.117.0", path = "./ext/url" } deno_net = { version = "0.166.0", path = "./ext/net" }
deno_web = { version = "0.148.0", path = "./ext/web" } deno_node = { version = "0.111.0", path = "./ext/node" }
deno_webidl = { version = "0.117.0", path = "./ext/webidl" } deno_tls = { version = "0.161.0", path = "./ext/tls" }
deno_websocket = { version = "0.122.0", path = "./ext/websocket" } deno_url = { version = "0.174.0", path = "./ext/url" }
deno_webstorage = { version = "0.112.0", path = "./ext/webstorage" } deno_web = { version = "0.205.0", path = "./ext/web" }
deno_napi = { version = "0.47.0", path = "./ext/napi" } deno_webgpu = { version = "0.141.0", path = "./ext/webgpu" }
deno_webidl = { version = "0.174.0", path = "./ext/webidl" }
deno_websocket = { version = "0.179.0", path = "./ext/websocket" }
deno_webstorage = { version = "0.169.0", path = "./ext/webstorage" }
# resolvers
deno_resolver = { version = "0.6.0", path = "./resolvers/deno" }
node_resolver = { version = "0.13.0", path = "./resolvers/node" }
aes = "=0.8.3" aes = "=0.8.3"
anyhow = "1.0.57" anyhow = "1.0.57"
async-trait = "0.1.73" async-trait = "0.1.73"
# TODO(mmastrac): Requires code changes to bump base32 = "=0.5.1"
base64 = "=0.13.1" base64 = "0.21.7"
bencher = "0.1" bencher = "0.1"
brotli = "3.3.4" brotli = "6.0.0"
bytes = "1.4.0" bytes = "1.4.0"
cache_control = "=0.2.0" cache_control = "=0.2.0"
cbc = { version = "=0.1.2", features = ["alloc"] } cbc = { version = "=0.1.2", features = ["alloc"] }
chrono = { version = "=0.4.26", default-features = false, features = ["std", "serde", "clock"] } # Note: Do not use the "clock" feature of chrono, as it links us to CoreFoundation on macOS.
# Instead use util::time::utc_now()
chrono = { version = "0.4", default-features = false, features = ["std", "serde"] }
color-print = "0.3.5"
console_static_text = "=0.8.1" console_static_text = "=0.8.1"
dashmap = "5.5.3"
data-encoding = "2.3.3"
data-url = "=0.3.0" data-url = "=0.3.0"
dlopen = "0.1.8" deno_cache_dir = "=0.13.0"
encoding_rs = "=0.8.33" deno_package_json = { version = "0.1.2", default-features = false }
dlopen2 = "0.6.1"
ecb = "=0.1.2" ecb = "=0.1.2"
fastwebsockets = "=0.4.4" elliptic-curve = { version = "0.13.4", features = ["alloc", "arithmetic", "ecdh", "std", "pem", "jwk"] }
encoding_rs = "=0.8.33"
fast-socks5 = "0.9.6"
faster-hex = "0.9"
fastwebsockets = { version = "0.8", features = ["upgrade", "unstable-split"] }
filetime = "0.2.16" filetime = "0.2.16"
flate2 = { version = "1.0.26", features = ["zlib-ng"], default-features = false } flate2 = { version = "1.0.30", default-features = false }
fs3 = "0.5.0" fs3 = "0.5.0"
futures = "0.3.21" futures = "0.3.21"
glob = "0.3.1" glob = "0.3.1"
hex = "0.4" h2 = "0.4.4"
http = "0.2.9" http = "1.0"
http-body = "1.0"
http-body-util = "0.1.2"
http_v02 = { package = "http", version = "0.2.9" }
httparse = "1.8.0" httparse = "1.8.0"
hyper = { version = "0.14.26", features = ["runtime", "http1"] } hyper = { version = "1.4.1", features = ["full"] }
# TODO(mmastrac): indexmap 2.0 will require multiple synchronized changes hyper-rustls = { version = "0.27.2", default-features = false, features = ["http1", "http2", "tls12", "ring"] }
indexmap1 = { package = "indexmap", version = "1", features = ["serde"] } hyper-util = { version = "=0.1.7", features = ["tokio", "client", "client-legacy", "server", "server-auto"] }
hyper_v014 = { package = "hyper", version = "0.14.26", features = ["runtime", "http1"] }
indexmap = { version = "2", features = ["serde"] } indexmap = { version = "2", features = ["serde"] }
ipnet = "2.3"
jsonc-parser = { version = "=0.26.2", features = ["serde"] }
lazy-regex = "3"
libc = "0.2.126" libc = "0.2.126"
log = "=0.4.20" libz-sys = { version = "1.1.20", default-features = false }
lsp-types = "=0.93.2" # used by tower-lsp and "proposed" feature is unstable in patch releases log = "0.4.20"
lsp-types = "=0.97.0" # used by tower-lsp and "proposed" feature is unstable in patch releases
memmem = "0.1.1" memmem = "0.1.1"
notify = "=5.0.0" monch = "=0.5.0"
notify = "=6.1.1"
num-bigint = { version = "0.4", features = ["rand"] } num-bigint = { version = "0.4", features = ["rand"] }
once_cell = "1.17.1" once_cell = "1.17.1"
os_pipe = "=1.1.4" os_pipe = { version = "=1.1.5", features = ["io_safety"] }
p224 = { version = "0.13.0", features = ["ecdh"] }
p256 = { version = "0.13.2", features = ["ecdh", "jwk"] }
p384 = { version = "0.13.0", features = ["ecdh", "jwk"] }
parking_lot = "0.12.0" parking_lot = "0.12.0"
percent-encoding = "=2.3.0" percent-encoding = "2.3.0"
phf = { version = "0.11", features = ["macros"] }
pin-project = "1.0.11" # don't pin because they yank crates from cargo pin-project = "1.0.11" # don't pin because they yank crates from cargo
pretty_assertions = "=1.4.0" pretty_assertions = "=1.4.0"
prost = "0.11" prost = "0.11"
prost-build = "0.11" prost-build = "0.11"
rand = "=0.8.5" rand = "=0.8.5"
regex = "^1.7.0" regex = "^1.7.0"
lazy-regex = "3" reqwest = { version = "=0.12.5", default-features = false, features = ["rustls-tls", "stream", "gzip", "brotli", "socks", "json", "http2"] } # pinned because of https://github.com/seanmonstar/reqwest/pull/1955
reqwest = { version = "0.11.20", default-features = false, features = ["rustls-tls", "stream", "gzip", "brotli", "socks", "json"] } ring = "^0.17.0"
ring = "=0.16.20" rusqlite = { version = "0.32.0", features = ["unlock_notify", "bundled"] }
rusqlite = { version = "=0.29.0", features = ["unlock_notify", "bundled"] } rustls = { version = "0.23.11", default-features = false, features = ["logging", "std", "tls12", "ring"] }
rustls = "0.21.0" rustls-pemfile = "2"
rustls-pemfile = "1.0.0" rustls-tokio-stream = "=0.3.0"
rustls-webpki = "0.101.4" rustls-webpki = "0.102"
rustls-native-certs = "0.6.2" rustyline = "=13.0.0"
webpki-roots = "0.25.2" saffron = "=0.1.0"
scopeguard = "1.2.0"
sec1 = "0.7"
serde = { version = "1.0.149", features = ["derive"] } serde = { version = "1.0.149", features = ["derive"] }
serde_bytes = "0.11" serde_bytes = "0.11"
serde_json = "1.0.85" serde_json = "1.0.85"
serde_repr = "=0.1.16" serde_repr = "=0.1.16"
sha2 = { version = "0.10.6", features = ["oid"] } sha1 = { version = "0.10.6", features = ["oid"] }
signature = "=1.6.4" sha2 = { version = "0.10.8", features = ["oid"] }
signature = "2.1"
slab = "0.4" slab = "0.4"
smallvec = "1.8" smallvec = "1.8"
socket2 = { version = "0.5.3", features = ["all"] } socket2 = { version = "0.5.3", features = ["all"] }
spki = "0.7.2"
tar = "=0.4.40" tar = "=0.4.40"
tempfile = "3.4.0" tempfile = "3.4.0"
thiserror = "1.0.40" termcolor = "1.1.3"
tokio = { version = "1.28.1", features = ["full"] } thiserror = "1.0.61"
tokio = { version = "1.36.0", features = ["full"] }
tokio-metrics = { version = "0.3.0", features = ["rt"] } tokio-metrics = { version = "0.3.0", features = ["rt"] }
tokio-rustls = "0.24.0" tokio-rustls = { version = "0.26.0", default-features = false, features = ["ring", "tls12"] }
tokio-socks = "0.5.1"
tokio-util = "0.7.4" tokio-util = "0.7.4"
tower-lsp = { version = "=0.17.0", features = ["proposed"] } tower = { version = "0.4.13", default-features = false, features = ["util"] }
url = { version = "2.3.1", features = ["serde", "expose_internals"] } tower-http = { version = "0.6.1", features = ["decompression-br", "decompression-gzip"] }
tower-lsp = { package = "deno_tower_lsp", version = "0.1.0", features = ["proposed"] }
tower-service = "0.3.2"
twox-hash = "=1.6.3"
# Upgrading past 2.4.1 may cause WPT failures
url = { version = "< 2.5.0", features = ["serde", "expose_internals"] }
uuid = { version = "1.3.0", features = ["v4"] } uuid = { version = "1.3.0", features = ["v4"] }
webpki-root-certs = "0.26.5"
webpki-roots = "0.26"
which = "4.2.5"
yoke = { version = "0.7.4", features = ["derive"] }
zeromq = { version = "=0.4.0", default-features = false, features = ["tcp-transport", "tokio-runtime"] }
zstd = "=0.12.4" zstd = "=0.12.4"
elliptic-curve = { version = "0.13.4", features = ["alloc", "arithmetic", "ecdh", "std", "pem"] }
p224 = { version = "0.13.0", features = ["ecdh"] }
p256 = { version = "0.13.2", features = ["ecdh"] }
p384 = { version = "0.13.0", features = ["ecdh"] }
# crypto # crypto
rsa = { version = "0.7.0", default-features = false, features = ["std", "pem", "hazmat"] } # hazmat needed for PrehashSigner in ext/node
hkdf = "0.12.3" hkdf = "0.12.3"
rsa = { version = "0.9.3", default-features = false, features = ["std", "pem", "hazmat"] } # hazmat needed for PrehashSigner in ext/node
# webgpu
raw-window-handle = "0.6.0"
wgpu-core = "0.21.1"
wgpu-types = "0.20"
# macros # macros
proc-macro2 = "1"
quote = "1" quote = "1"
syn = { version = "2", features = ["full", "extra-traits"] } syn = { version = "2", features = ["full", "extra-traits"] }
# unix # unix
nix = "=0.26.2" nix = "=0.27.1"
# windows deps # windows deps
fwdansi = "=1.1.0" junction = "=0.2.0"
winres = "=0.1.12"
winapi = "=0.3.9" winapi = "=0.3.9"
windows-sys = { version = "0.48.0", features = ["Win32_Media"] } windows-sys = { version = "0.52.0", features = ["Win32_Foundation", "Win32_Media", "Win32_Storage_FileSystem", "Win32_System_IO", "Win32_System_WindowsProgramming", "Wdk", "Wdk_System", "Wdk_System_SystemInformation", "Win32_Security", "Win32_System_Pipes", "Wdk_Storage_FileSystem", "Win32_System_Registry", "Win32_System_Kernel"] }
winres = "=0.1.12"
# NB: the `bench` and `release` profiles must remain EXACTLY the same.
[profile.release] [profile.release]
codegen-units = 1 codegen-units = 1
incremental = true incremental = true
@ -181,150 +240,89 @@ opt-level = 'z' # Optimize for size
inherits = "release" inherits = "release"
debug = true debug = true
# NB: the `bench` and `release` profiles must remain EXACTLY the same. # Faster to compile than `release` but with similar performance.
[profile.bench] [profile.release-lite]
codegen-units = 1 inherits = "release"
incremental = true codegen-units = 128
lto = true lto = "thin"
opt-level = 'z' # Optimize for size
# Key generation is too slow on `debug` # Key generation is too slow on `debug`
[profile.dev.package.num-bigint-dig] [profile.dev.package.num-bigint-dig]
opt-level = 3 opt-level = 3
# Optimize these packages for performance. # rusty-v8 needs at least -O1 to not miscompile
# NB: the `bench` and `release` profiles must remain EXACTLY the same. [profile.dev.package.v8]
[profile.bench.package.rand] opt-level = 1
opt-level = 3
[profile.bench.package.flate2]
opt-level = 3
[profile.bench.package.brotli]
opt-level = 3
[profile.bench.package.miniz_oxide]
opt-level = 3
[profile.bench.package.async-compression]
opt-level = 3
[profile.bench.package.brotli-decompressor]
opt-level = 3
[profile.bench.package.deno_bench_util]
opt-level = 3
[profile.bench.package.deno_core]
opt-level = 3
[profile.bench.package.deno_runtime]
opt-level = 3
[profile.bench.package.deno_http]
opt-level = 3
[profile.bench.package.deno_web]
opt-level = 3
[profile.bench.package.deno_broadcast_channel]
opt-level = 3
[profile.bench.package.deno_fetch]
opt-level = 3
[profile.bench.package.deno_ffi]
opt-level = 3
[profile.bench.package.deno_tls]
opt-level = 3
[profile.bench.package.deno_websocket]
opt-level = 3
[profile.bench.package.deno_net]
opt-level = 3
[profile.bench.package.deno_crypto]
opt-level = 3
[profile.bench.package.deno_node]
opt-level = 3
[profile.bench.package.num-bigint-dig]
opt-level = 3
[profile.bench.package.v8]
opt-level = 3
[profile.bench.package.serde_v8]
opt-level = 3
[profile.bench.package.serde]
opt-level = 3
[profile.bench.package.deno_url]
opt-level = 3
[profile.bench.package.url]
opt-level = 3
[profile.bench.package.bytes]
opt-level = 3
[profile.bench.package.futures-util]
opt-level = 3
[profile.bench.package.hyper]
opt-level = 3
[profile.bench.package.tokio]
opt-level = 3
[profile.bench.package.zstd]
opt-level = 3
[profile.bench.package.zstd-sys]
opt-level = 3
[profile.bench.package.base64-simd]
opt-level = 3
# NB: the `bench` and `release` profiles must remain EXACTLY the same. [profile.release.package.async-compression]
[profile.release.package.rand]
opt-level = 3 opt-level = 3
[profile.release.package.flate2] [profile.release.package.base64-simd]
opt-level = 3 opt-level = 3
[profile.release.package.brotli] [profile.release.package.brotli]
opt-level = 3 opt-level = 3
[profile.release.package.miniz_oxide]
opt-level = 3
[profile.release.package.async-compression]
opt-level = 3
[profile.release.package.brotli-decompressor] [profile.release.package.brotli-decompressor]
opt-level = 3 opt-level = 3
[profile.release.package.bytes]
opt-level = 3
[profile.release.package.deno_bench_util] [profile.release.package.deno_bench_util]
opt-level = 3 opt-level = 3
[profile.release.package.deno_broadcast_channel]
opt-level = 3
[profile.release.package.deno_core] [profile.release.package.deno_core]
opt-level = 3 opt-level = 3
[profile.release.package.deno_runtime]
opt-level = 3
[profile.release.package.deno_http]
opt-level = 3
[profile.release.package.deno_net]
opt-level = 3
[profile.release.package.deno_web]
opt-level = 3
[profile.release.package.deno_crypto] [profile.release.package.deno_crypto]
opt-level = 3 opt-level = 3
[profile.release.package.deno_node]
opt-level = 3
[profile.release.package.deno_broadcast_channel]
opt-level = 3
[profile.release.package.deno_fetch] [profile.release.package.deno_fetch]
opt-level = 3 opt-level = 3
[profile.release.package.deno_ffi] [profile.release.package.deno_ffi]
opt-level = 3 opt-level = 3
[profile.release.package.deno_tls] [profile.release.package.deno_http]
opt-level = 3
[profile.release.package.deno_websocket]
opt-level = 3 opt-level = 3
[profile.release.package.deno_napi] [profile.release.package.deno_napi]
opt-level = 3 opt-level = 3
[profile.release.package.test_napi] [profile.release.package.deno_net]
opt-level = 3 opt-level = 3
[profile.release.package.num-bigint-dig] [profile.release.package.deno_node]
opt-level = 3 opt-level = 3
[profile.release.package.v8] [profile.release.package.deno_runtime]
opt-level = 3 opt-level = 3
[profile.release.package.serde_v8] [profile.release.package.deno_tls]
opt-level = 3
[profile.release.package.serde]
opt-level = 3 opt-level = 3
[profile.release.package.deno_url] [profile.release.package.deno_url]
opt-level = 3 opt-level = 3
[profile.release.package.url] [profile.release.package.deno_web]
opt-level = 3 opt-level = 3
[profile.release.package.bytes] [profile.release.package.deno_websocket]
opt-level = 3
[profile.release.package.fastwebsockets]
opt-level = 3
[profile.release.package.flate2]
opt-level = 3 opt-level = 3
[profile.release.package.futures-util] [profile.release.package.futures-util]
opt-level = 3 opt-level = 3
[profile.release.package.hyper] [profile.release.package.hyper]
opt-level = 3 opt-level = 3
[profile.release.package.miniz_oxide]
opt-level = 3
[profile.release.package.num-bigint-dig]
opt-level = 3
[profile.release.package.rand]
opt-level = 3
[profile.release.package.serde]
opt-level = 3
[profile.release.package.serde_v8]
opt-level = 3
[profile.release.package.libsui]
opt-level = 3
[profile.release.package.test_napi]
opt-level = 3
[profile.release.package.tokio] [profile.release.package.tokio]
opt-level = 3 opt-level = 3
[profile.release.package.url]
opt-level = 3
[profile.release.package.v8]
opt-level = 3
[profile.release.package.zstd] [profile.release.package.zstd]
opt-level = 3 opt-level = 3
[profile.release.package.zstd-sys] [profile.release.package.zstd-sys]
opt-level = 3 opt-level = 3
[profile.release.package.base64-simd]
opt-level = 3

View file

@ -1,6 +1,6 @@
MIT License MIT License
Copyright 2018-2023 the Deno authors Copyright 2018-2024 the Deno authors
Permission is hereby granted, free of charge, to any person obtaining a copy of Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in this software and associated documentation files (the "Software"), to deal in

106
README.md
View file

@ -6,26 +6,21 @@
<img align="right" src="https://deno.land/logo.svg" height="150px" alt="the deno mascot dinosaur standing in the rain"> <img align="right" src="https://deno.land/logo.svg" height="150px" alt="the deno mascot dinosaur standing in the rain">
[Deno](https://deno.com/runtime) is a _simple_, _modern_ and _secure_ runtime [Deno](https://www.deno.com)
for **JavaScript** and **TypeScript** that uses V8 and is built in Rust. ([/ˈdiːnoʊ/](http://ipa-reader.xyz/?text=%CB%88di%CB%90no%CA%8A), pronounced
`dee-no`) is a JavaScript, TypeScript, and WebAssembly runtime with secure
defaults and a great developer experience. It's built on [V8](https://v8.dev/),
[Rust](https://www.rust-lang.org/), and [Tokio](https://tokio.rs/).
### Features Learn more about the Deno runtime
[in the documentation](https://docs.deno.com/runtime/manual).
- [Secure by default.](https://deno.land/manual/basics/permissions) No file, ## Installation
network, or environment access, unless explicitly enabled.
- Provides
[web platform functionality and APIs](https://deno.land/manual/runtime/web_platform_apis),
e.g. using ES modules, web workers, and `fetch()`.
- Supports
[TypeScript out of the box](https://deno.land/manual/advanced/typescript).
- Ships only a single executable file.
- [Built-in tooling](https://deno.land/manual/tools#built-in-tooling) including
`deno test`, `deno fmt`, `deno bench`, and more.
- Includes [a set of reviewed standard modules](https://deno.land/std/)
guaranteed to work with Deno.
- [Supports npm.](https://deno.land/manual/node)
### Install Install the Deno runtime on your system using one of the commands below. Note
that there are a number of ways to install Deno - a comprehensive list of
installation options can be found
[here](https://docs.deno.com/runtime/manual/getting_started/installation).
Shell (Mac, Linux): Shell (Mac, Linux):
@ -51,64 +46,49 @@ brew install deno
choco install deno choco install deno
``` ```
[Scoop](https://scoop.sh/) (Windows): ### Build and install from source
```powershell Complete instructions for building Deno from source can be found in the manual
scoop install deno [here](https://docs.deno.com/runtime/manual/references/contributing/building_from_source).
```
Build and install from source using [Cargo](https://crates.io/crates/deno): ## Your first Deno program
```sh Deno can be used for many different applications, but is most commonly used to
# Install the Protobuf compiler build web servers. Create a file called `server.ts` and include the following
apt install -y protobuf-compiler # Linux TypeScript code:
brew install protobuf # macOS
# Build and install Deno
cargo install deno --locked
```
See
[deno_install](https://github.com/denoland/deno_install/blob/master/README.md)
and [releases](https://github.com/denoland/deno/releases) for other options.
### Getting Started
Try [running a simple program](https://examples.deno.land/hello-world):
```sh
deno run https://deno.land/std/examples/welcome.ts
```
Or [setup a simple HTTP server](https://examples.deno.land/http-server):
```ts ```ts
Deno.serve((_req) => new Response("Hello, World!")); Deno.serve((_req: Request) => {
return new Response("Hello, world!");
});
``` ```
[More Examples](https://examples.deno.land) Run your server with the following command:
### Additional Resources ```sh
deno run --allow-net server.ts
```
- **[The Deno Manual](https://deno.land/manual)** is a great starting point for This should start a local web server on
[additional examples](https://deno.land/manual/examples), [http://localhost:8000](http://localhost:8000).
[setting up your environment](https://deno.land/manual/getting_started/setup_your_environment),
[using npm](https://deno.land/manual/node), and more.
- **[Runtime API reference](https://deno.land/api)** documents all APIs built
into Deno CLI.
- **[Deno Standard Modules](https://deno.land/std)** do not have external
dependencies and are reviewed by the Deno core team.
- **[deno.land/x](https://deno.land/x)** is the registry for third party
modules.
- **[Blog](https://deno.com/blog)** is where the Deno team shares important
product updates and “how to”s about solving technical problems.
### Contributing Learn more about writing and running Deno programs
[in the docs](https://docs.deno.com/runtime/manual).
We appreciate your help! ## Additional resources
To contribute, please read our - **[Deno Docs](https://docs.deno.com)**: official guides and reference docs for
[contributing instructions](https://deno.land/manual/contributing). the Deno runtime, [Deno Deploy](https://deno.com/deploy), and beyond.
- **[Deno Standard Library](https://jsr.io/@std)**: officially supported common
utilities for Deno programs.
- **[deno.land/x](https://deno.land/x)**: registry for third-party Deno modules.
- **[Developer Blog](https://deno.com/blog)**: Product updates, tutorials, and
more from the Deno team.
## Contributing
We appreciate your help! To contribute, please read our
[contributing instructions](https://docs.deno.com/runtime/manual/references/contributing/).
[Build status - Cirrus]: https://github.com/denoland/deno/workflows/ci/badge.svg?branch=main&event=push [Build status - Cirrus]: https://github.com/denoland/deno/workflows/ci/badge.svg?branch=main&event=push
[Build status]: https://github.com/denoland/deno/actions [Build status]: https://github.com/denoland/deno/actions

File diff suppressed because it is too large Load diff

View file

@ -1,8 +1,8 @@
# Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. # Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
[package] [package]
name = "deno_bench_util" name = "deno_bench_util"
version = "0.111.0" version = "0.168.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true
@ -17,13 +17,8 @@ path = "lib.rs"
[dependencies] [dependencies]
bencher.workspace = true bencher.workspace = true
deno_core.workspace = true deno_core.workspace = true
once_cell.workspace = true
tokio.workspace = true tokio.workspace = true
[[bench]]
name = "op_baseline"
harness = false
[[bench]] [[bench]]
name = "utf8" name = "utf8"
harness = false harness = false

View file

@ -3,13 +3,15 @@
Example: Example:
```rust ```rust
use deno_bench_util::bench_js_sync;
use deno_bench_util::bench_or_profile; use deno_bench_util::bench_or_profile;
use deno_bench_util::bencher::{benchmark_group, Bencher}; use deno_bench_util::bencher::benchmark_group;
use deno_bench_util::bench_js_sync}; use deno_bench_util::bencher::Bencher;
use deno_core::Extension; use deno_core::Extension;
#[op] #[op2]
#[number]
fn op_nop() -> usize { fn op_nop() -> usize {
9 9
} }

View file

@ -1,60 +0,0 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
use deno_bench_util::bench_js_async;
use deno_bench_util::bench_js_sync;
use deno_bench_util::bench_or_profile;
use deno_bench_util::bencher::benchmark_group;
use deno_bench_util::bencher::Bencher;
use deno_core::op;
use deno_core::Extension;
deno_core::extension!(
bench_setup,
ops = [
// op_pi_json,
op_pi_async,
op_nop
]
);
fn setup() -> Vec<Extension> {
vec![bench_setup::init_ops()]
}
#[op]
fn op_nop() {}
// TODO(bartlomieju): reenable, currently this op generates a fast function,
// which is wrong, because i64 is not a compatible type for fast call.
// #[op]
// fn op_pi_json() -> i64 {
// 314159
// }
// this is a function since async closures aren't stable
#[op]
async fn op_pi_async() -> i64 {
314159
}
// fn bench_op_pi_json(b: &mut Bencher) {
// bench_js_sync(b, r#"Deno.core.ops.op_pi_json();"#, setup);
// }
fn bench_op_nop(b: &mut Bencher) {
bench_js_sync(b, r#"Deno.core.ops.op_nop();"#, setup);
}
fn bench_op_async(b: &mut Bencher) {
bench_js_async(b, r#"Deno.core.opAsync("op_pi_async");"#, setup);
}
benchmark_group!(
benches,
// bench_op_pi_json,
bench_op_nop,
bench_op_async,
);
bench_or_profile!(benches);

View file

@ -1,4 +1,4 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use deno_bench_util::bench_js_sync_with; use deno_bench_util::bench_js_sync_with;
use deno_bench_util::bench_or_profile; use deno_bench_util::bench_or_profile;
@ -6,27 +6,23 @@ use deno_bench_util::bencher::benchmark_group;
use deno_bench_util::bencher::Bencher; use deno_bench_util::bencher::Bencher;
use deno_bench_util::BenchOptions; use deno_bench_util::BenchOptions;
use deno_core::Extension; use deno_core::Extension;
use deno_core::ExtensionFileSource;
use deno_core::ExtensionFileSourceCode;
fn setup() -> Vec<Extension> { fn setup() -> Vec<Extension> {
vec![Extension { deno_core::extension!(
name: "bench_setup", bench_setup,
js_files: std::borrow::Cow::Borrowed(&[ExtensionFileSource { js = ["ext:bench_setup/setup.js" = {
specifier: "ext:bench_setup/setup.js", source = r#"
code: ExtensionFileSourceCode::IncludedInBinary(
r#"
const hello = "hello world\n"; const hello = "hello world\n";
const hello1k = hello.repeat(1e3); const hello1k = hello.repeat(1e3);
const hello1m = hello.repeat(1e6); const hello1m = hello.repeat(1e6);
const helloEncoded = Deno.core.encode(hello); const helloEncoded = Deno.core.encode(hello);
const hello1kEncoded = Deno.core.encode(hello1k); const hello1kEncoded = Deno.core.encode(hello1k);
const hello1mEncoded = Deno.core.encode(hello1m); const hello1mEncoded = Deno.core.encode(hello1m);
"#, "#
), }]
}]), );
..Default::default()
}] vec![bench_setup::init_ops_and_esm()]
} }
fn bench_utf8_encode_12_b(b: &mut Bencher) { fn bench_utf8_encode_12_b(b: &mut Bencher) {

View file

@ -1,8 +1,9 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use bencher::Bencher; use bencher::Bencher;
use deno_core::v8; use deno_core::v8;
use deno_core::Extension; use deno_core::Extension;
use deno_core::JsRuntime; use deno_core::JsRuntime;
use deno_core::PollEventLoopOptions;
use deno_core::RuntimeOptions; use deno_core::RuntimeOptions;
use crate::profiling::is_profiling; use crate::profiling::is_profiling;
@ -115,6 +116,9 @@ pub fn bench_js_async_with(
} }
async fn inner_async(src: &'static str, runtime: &mut JsRuntime) { async fn inner_async(src: &'static str, runtime: &mut JsRuntime) {
runtime.execute_script_static("inner_loop", src).unwrap(); runtime.execute_script("inner_loop", src).unwrap();
runtime.run_event_loop(false).await.unwrap(); runtime
.run_event_loop(PollEventLoopOptions::default())
.await
.unwrap();
} }

View file

@ -1,4 +1,4 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
mod js_runtime; mod js_runtime;
mod profiling; mod profiling;

View file

@ -1,4 +1,4 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use bencher::DynBenchFn; use bencher::DynBenchFn;
use bencher::StaticBenchFn; use bencher::StaticBenchFn;
use bencher::TestDescAndFn; use bencher::TestDescAndFn;
@ -39,6 +39,7 @@ macro_rules! bench_or_profile {
}; };
} }
#[allow(clippy::print_stdout)]
pub fn run_profiles(opts: &TestOpts, tests: Vec<TestDescAndFn>) { pub fn run_profiles(opts: &TestOpts, tests: Vec<TestDescAndFn>) {
let tests = filter_tests(opts, tests); let tests = filter_tests(opts, tests);
// let decs = tests.iter().map(|t| t.desc.clone()).collect(); // let decs = tests.iter().map(|t| t.desc.clone()).collect();

View file

@ -1,12 +1,12 @@
# Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. # Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
[package] [package]
name = "deno" name = "deno"
version = "1.36.3" version = "2.0.3"
authors.workspace = true authors.workspace = true
default-run = "deno" default-run = "deno"
edition.workspace = true edition.workspace = true
exclude = ["tests/testdata/npm/registry/*"] exclude = ["bench/testdata/lsp_benchdata/"]
license.workspace = true license.workspace = true
repository.workspace = true repository.workspace = true
description = "Provides the deno executable" description = "Provides the deno executable"
@ -16,6 +16,16 @@ name = "deno"
path = "main.rs" path = "main.rs"
doc = false doc = false
[[bin]]
name = "denort"
path = "mainrt.rs"
doc = false
[[test]]
name = "integration"
path = "integration_tests_runner.rs"
harness = false
[[bench]] [[bench]]
name = "deno_bench" name = "deno_bench"
harness = false harness = false
@ -27,87 +37,121 @@ harness = false
path = "./bench/lsp_bench_standalone.rs" path = "./bench/lsp_bench_standalone.rs"
[features] [features]
default = ["upgrade", "__vendored_zlib_ng"]
# A feature that enables heap profiling with dhat on Linux.
# 1. Compile with `cargo build --profile=release-with-debug --features=dhat-heap`
# 2. Run the executable. It will output a dhat-heap.json file.
# 3. Open the json file in https://nnethercote.github.io/dh_view/dh_view.html
dhat-heap = ["dhat"]
# A feature that enables the upgrade subcommand and the background check for
# available updates (of deno binary). This is typically disabled for (Linux)
# distribution packages.
upgrade = []
# A dev feature to disable creations and loading of snapshots in favor of # A dev feature to disable creations and loading of snapshots in favor of
# loading JS sources at runtime. # loading JS sources at runtime.
__runtime_js_sources = ["deno_runtime/__runtime_js_sources"] hmr = ["deno_runtime/hmr"]
# Vendor zlib as zlib-ng
__vendored_zlib_ng = ["flate2/zlib-ng-compat", "libz-sys/zlib-ng"]
[build-dependencies] [build-dependencies]
deno_runtime = { workspace = true, features = ["snapshot_from_snapshot", "include_js_files_for_snapshotting"] } deno_runtime = { workspace = true, features = ["include_js_files_for_snapshotting", "only_snapshotted_js_sources"] }
deno_core = { workspace = true, features = ["include_js_files_for_snapshotting"] } deno_core = { workspace = true, features = ["include_js_files_for_snapshotting"] }
lazy-regex.workspace = true lazy-regex.workspace = true
serde.workspace = true serde.workspace = true
serde_json.workspace = true serde_json.workspace = true
zstd.workspace = true zstd.workspace = true
glibc_version = "0.1.2" glibc_version = "0.1.2"
flate2 = { workspace = true, features = ["default"] }
[target.'cfg(windows)'.build-dependencies] [target.'cfg(windows)'.build-dependencies]
winapi.workspace = true winapi.workspace = true
winres.workspace = true winres.workspace = true
[dependencies] [dependencies]
deno_ast = { workspace = true, features = ["bundler", "cjs", "codegen", "dep_graph", "module_specifier", "proposal", "react", "sourcemap", "transforms", "typescript", "view", "visit"] } deno_ast = { workspace = true, features = ["bundler", "cjs", "codegen", "proposal", "react", "sourcemap", "transforms", "typescript", "view", "visit"] }
deno_cache_dir = "=0.5.2" deno_cache_dir = { workspace = true }
deno_config = "=0.2.1" deno_config = { version = "=0.37.2", features = ["workspace", "sync"] }
deno_core = { workspace = true, features = ["include_js_files_for_snapshotting"] } deno_core = { workspace = true, features = ["include_js_files_for_snapshotting"] }
deno_doc = "=0.65.0" deno_doc = { version = "0.154.0", default-features = false, features = ["rust", "html", "syntect"] }
deno_emit = "=0.26.0" deno_graph = { version = "=0.83.4" }
deno_graph = "=0.52.0" deno_lint = { version = "=0.67.0", features = ["docs"] }
deno_lint = { version = "=0.50.2", features = ["docs"] }
deno_lockfile.workspace = true deno_lockfile.workspace = true
deno_npm.workspace = true deno_npm.workspace = true
deno_runtime = { workspace = true, features = ["dont_create_runtime_snapshot", "include_js_files_for_snapshotting"] } deno_package_json.workspace = true
deno_path_util.workspace = true
deno_resolver.workspace = true
deno_runtime = { workspace = true, features = ["include_js_files_for_snapshotting"] }
deno_semver.workspace = true deno_semver.workspace = true
deno_task_shell = "=0.13.2" deno_task_shell = "=0.18.1"
eszip = "=0.50.0" deno_terminal.workspace = true
napi_sym.workspace = true libsui = "0.4.0"
node_resolver.workspace = true
anstream = "0.6.14"
async-trait.workspace = true async-trait.workspace = true
base32 = "=0.4.0"
base64.workspace = true base64.workspace = true
bincode = "=1.3.3" bincode = "=1.3.3"
bytes.workspace = true
cache_control.workspace = true cache_control.workspace = true
chrono.workspace = true chrono = { workspace = true, features = ["now"] }
clap = { version = "=4.3.3", features = ["string"] } clap = { version = "=4.5.16", features = ["env", "string", "wrap_help", "error-context"] }
clap_complete = "=4.3.1" clap_complete = "=4.5.24"
clap_complete_fig = "=4.3.1" clap_complete_fig = "=4.5.2"
color-print.workspace = true
console_static_text.workspace = true console_static_text.workspace = true
data-url.workspace = true dashmap.workspace = true
data-encoding.workspace = true
dhat = { version = "0.3.3", optional = true }
dissimilar = "=1.0.4" dissimilar = "=1.0.4"
dprint-plugin-json = "=0.17.4" dotenvy = "0.15.7"
dprint-plugin-markdown = "=0.15.3" dprint-plugin-json = "=0.19.4"
dprint-plugin-typescript = "=0.86.2" dprint-plugin-jupyter = "=0.1.5"
encoding_rs.workspace = true dprint-plugin-markdown = "=0.17.8"
dprint-plugin-typescript = "=0.93.0"
env_logger = "=0.10.0" env_logger = "=0.10.0"
fancy-regex = "=0.10.0" fancy-regex = "=0.10.0"
fastwebsockets.workspace = true faster-hex.workspace = true
# If you disable the default __vendored_zlib_ng feature above, you _must_ be able to link against `-lz`.
flate2.workspace = true flate2.workspace = true
fs3.workspace = true fs3.workspace = true
glob = "0.3.1" glob = "0.3.1"
http.workspace = true http.workspace = true
hyper.workspace = true http-body.workspace = true
import_map = "=0.15.0" http-body-util.workspace = true
hyper-util.workspace = true
import_map = { version = "=0.20.1", features = ["ext"] }
indexmap.workspace = true indexmap.workspace = true
indexmap1.workspace = true jsonc-parser = { workspace = true, features = ["cst", "serde"] }
jsonc-parser = { version = "=0.21.1", features = ["serde"] } jupyter_runtime = { package = "runtimelib", version = "=0.14.0" }
lazy-regex.workspace = true lazy-regex.workspace = true
libc.workspace = true libc.workspace = true
libz-sys.workspace = true
log = { workspace = true, features = ["serde"] } log = { workspace = true, features = ["serde"] }
lsp-types.workspace = true lsp-types.workspace = true
monch = "=0.4.3" malva = "=0.11.0"
markup_fmt = "=0.14.0"
memmem.workspace = true
monch.workspace = true
notify.workspace = true notify.workspace = true
once_cell.workspace = true once_cell.workspace = true
os_pipe.workspace = true open = "5.0.1"
p256.workspace = true
pathdiff = "0.2.1"
percent-encoding.workspace = true percent-encoding.workspace = true
pin-project.workspace = true phf.workspace = true
quick-junit = "^0.3.3" pretty_yaml = "=0.5.0"
quick-junit = "^0.3.5"
rand = { workspace = true, features = ["small_rng"] } rand = { workspace = true, features = ["small_rng"] }
regex.workspace = true regex.workspace = true
ring.workspace = true ring.workspace = true
rustyline = { version = "=10.0.0", default-features = false, features = ["custom-bindings"] } rustyline.workspace = true
rustyline-derive = "=0.7.0" rustyline-derive = "=0.7.0"
serde.workspace = true serde.workspace = true
serde_repr.workspace = true serde_repr.workspace = true
sha2.workspace = true
shell-escape = "=0.1.5" shell-escape = "=0.1.5"
spki = { version = "0.7", features = ["pem"] }
strsim = "0.11.1"
tar.workspace = true tar.workspace = true
tempfile.workspace = true tempfile.workspace = true
text-size = "=1.1.0" text-size = "=1.1.0"
@ -116,15 +160,18 @@ thiserror.workspace = true
tokio.workspace = true tokio.workspace = true
tokio-util.workspace = true tokio-util.workspace = true
tower-lsp.workspace = true tower-lsp.workspace = true
twox-hash = "=1.6.3" tracing = { version = "0.1", features = ["log", "default"] }
typed-arena = "=2.0.1" twox-hash.workspace = true
typed-arena = "=2.0.2"
uuid = { workspace = true, features = ["serde"] } uuid = { workspace = true, features = ["serde"] }
walkdir = "=2.3.2" walkdir = "=2.3.2"
which.workspace = true
zeromq.workspace = true
zip = { version = "2.1.6", default-features = false, features = ["deflate-flate2"] }
zstd.workspace = true zstd.workspace = true
[target.'cfg(windows)'.dependencies] [target.'cfg(windows)'.dependencies]
fwdansi.workspace = true junction.workspace = true
junction = "=0.2.0"
winapi = { workspace = true, features = ["knownfolders", "mswsock", "objbase", "shlobj", "tlhelp32", "winbase", "winerror", "winsock2"] } winapi = { workspace = true, features = ["knownfolders", "mswsock", "objbase", "shlobj", "tlhelp32", "winbase", "winerror", "winsock2"] }
[target.'cfg(unix)'.dependencies] [target.'cfg(unix)'.dependencies]
@ -132,13 +179,8 @@ nix.workspace = true
[dev-dependencies] [dev-dependencies]
deno_bench_util.workspace = true deno_bench_util.workspace = true
flaky_test = "=0.1.0"
once_cell.workspace = true
os_pipe.workspace = true
pretty_assertions.workspace = true pretty_assertions.workspace = true
test_util.workspace = true test_util.workspace = true
trust-dns-client = "=0.22.0"
trust-dns-server = "=0.22.1"
[package.metadata.winres] [package.metadata.winres]
# This section defines the metadata that appears in the deno.exe PE header. # This section defines the metadata that appears in the deno.exe PE header.

123
cli/args/deno_json.rs Normal file
View file

@ -0,0 +1,123 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use std::collections::HashSet;
use deno_config::deno_json::TsConfigForEmit;
use deno_core::serde_json;
use deno_semver::jsr::JsrDepPackageReq;
use deno_semver::jsr::JsrPackageReqReference;
use deno_semver::npm::NpmPackageReqReference;
#[cfg(test)] // happens to only be used by the tests at the moment
pub struct DenoConfigFsAdapter<'a>(
pub &'a dyn deno_runtime::deno_fs::FileSystem,
);
#[cfg(test)]
impl<'a> deno_config::fs::DenoConfigFs for DenoConfigFsAdapter<'a> {
fn read_to_string_lossy(
&self,
path: &std::path::Path,
) -> Result<String, std::io::Error> {
self
.0
.read_text_file_lossy_sync(path, None)
.map_err(|err| err.into_io_error())
}
fn stat_sync(
&self,
path: &std::path::Path,
) -> Result<deno_config::fs::FsMetadata, std::io::Error> {
self
.0
.stat_sync(path)
.map(|stat| deno_config::fs::FsMetadata {
is_file: stat.is_file,
is_directory: stat.is_directory,
is_symlink: stat.is_symlink,
})
.map_err(|err| err.into_io_error())
}
fn read_dir(
&self,
path: &std::path::Path,
) -> Result<Vec<deno_config::fs::FsDirEntry>, std::io::Error> {
self
.0
.read_dir_sync(path)
.map_err(|err| err.into_io_error())
.map(|entries| {
entries
.into_iter()
.map(|e| deno_config::fs::FsDirEntry {
path: path.join(e.name),
metadata: deno_config::fs::FsMetadata {
is_file: e.is_file,
is_directory: e.is_directory,
is_symlink: e.is_symlink,
},
})
.collect()
})
}
}
pub fn deno_json_deps(
config: &deno_config::deno_json::ConfigFile,
) -> HashSet<JsrDepPackageReq> {
let values = imports_values(config.json.imports.as_ref())
.into_iter()
.chain(scope_values(config.json.scopes.as_ref()));
values_to_set(values)
}
fn imports_values(value: Option<&serde_json::Value>) -> Vec<&String> {
let Some(obj) = value.and_then(|v| v.as_object()) else {
return Vec::new();
};
let mut items = Vec::with_capacity(obj.len());
for value in obj.values() {
if let serde_json::Value::String(value) = value {
items.push(value);
}
}
items
}
fn scope_values(value: Option<&serde_json::Value>) -> Vec<&String> {
let Some(obj) = value.and_then(|v| v.as_object()) else {
return Vec::new();
};
obj.values().flat_map(|v| imports_values(Some(v))).collect()
}
fn values_to_set<'a>(
values: impl Iterator<Item = &'a String>,
) -> HashSet<JsrDepPackageReq> {
let mut entries = HashSet::new();
for value in values {
if let Ok(req_ref) = JsrPackageReqReference::from_str(value) {
entries.insert(JsrDepPackageReq::jsr(req_ref.into_inner().req));
} else if let Ok(req_ref) = NpmPackageReqReference::from_str(value) {
entries.insert(JsrDepPackageReq::npm(req_ref.into_inner().req));
}
}
entries
}
pub fn check_warn_tsconfig(ts_config: &TsConfigForEmit) {
if let Some(ignored_options) = &ts_config.maybe_ignored_options {
log::warn!("{}", ignored_options);
}
let serde_json::Value::Object(obj) = &ts_config.ts_config.0 else {
return;
};
if obj.get("experimentalDecorators") == Some(&serde_json::Value::Bool(true)) {
log::warn!(
"{} experimentalDecorators compiler option is deprecated and may be removed at any time",
deno_runtime::colors::yellow("Warning"),
);
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,7 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use deno_core::url::Url; use deno_core::url::Url;
use deno_runtime::deno_permissions::NetDescriptor;
use std::net::IpAddr; use std::net::IpAddr;
use std::str::FromStr; use std::str::FromStr;
@ -42,21 +43,17 @@ pub fn validator(host_and_port: &str) -> Result<String, String> {
/// `127.0.0.1:port` and `localhost:port`. /// `127.0.0.1:port` and `localhost:port`.
pub fn parse(paths: Vec<String>) -> clap::error::Result<Vec<String>> { pub fn parse(paths: Vec<String>) -> clap::error::Result<Vec<String>> {
let mut out: Vec<String> = vec![]; let mut out: Vec<String> = vec![];
for host_and_port in paths.iter() { for host_and_port in paths.into_iter() {
if Url::parse(&format!("internal://{host_and_port}")).is_ok() if let Ok(port) = host_and_port.parse::<BarePort>() {
|| host_and_port.parse::<IpAddr>().is_ok()
{
out.push(host_and_port.to_owned())
} else if let Ok(port) = host_and_port.parse::<BarePort>() {
// we got bare port, let's add default hosts // we got bare port, let's add default hosts
for host in ["0.0.0.0", "127.0.0.1", "localhost"].iter() { for host in ["0.0.0.0", "127.0.0.1", "localhost"].iter() {
out.push(format!("{}:{}", host, port.0)); out.push(format!("{}:{}", host, port.0));
} }
} else { } else {
return Err(clap::Error::raw( NetDescriptor::parse(&host_and_port).map_err(|e| {
clap::error::ErrorKind::InvalidValue, clap::Error::raw(clap::error::ErrorKind::InvalidValue, format!("{e:?}"))
format!("Bad host:port pair: {host_and_port}"), })?;
)); out.push(host_and_port)
} }
} }
Ok(out) Ok(out)
@ -121,8 +118,8 @@ mod tests {
let entries = svec![ let entries = svec![
"deno.land", "deno.land",
"deno.land:80", "deno.land:80",
"::", "[::]",
"::1", "[::1]",
"127.0.0.1", "127.0.0.1",
"[::1]", "[::1]",
"1.2.3.4:5678", "1.2.3.4:5678",
@ -142,8 +139,8 @@ mod tests {
let expected = svec![ let expected = svec![
"deno.land", "deno.land",
"deno.land:80", "deno.land:80",
"::", "[::]",
"::1", "[::1]",
"127.0.0.1", "127.0.0.1",
"[::1]", "[::1]",
"1.2.3.4:5678", "1.2.3.4:5678",
@ -174,10 +171,8 @@ mod tests {
#[test] #[test]
fn parse_net_args_ipv6() { fn parse_net_args_ipv6() {
let entries = let entries = svec!["[::1]", "[::]:5678", "[::1]:5678"];
svec!["::", "::1", "[::1]", "[::]:5678", "[::1]:5678", "::cafe"]; let expected = svec!["[::1]", "[::]:5678", "[::1]:5678"];
let expected =
svec!["::", "::1", "[::1]", "[::]:5678", "[::1]:5678", "::cafe"];
let actual = parse(entries).unwrap(); let actual = parse(entries).unwrap();
assert_eq!(actual, expected); assert_eq!(actual, expected);
} }
@ -190,12 +185,36 @@ mod tests {
#[test] #[test]
fn parse_net_args_ipv6_error2() { fn parse_net_args_ipv6_error2() {
let entries = svec!["0123:4567:890a:bcde:fg::"]; let entries = svec!["::1"];
assert!(parse(entries).is_err()); assert!(parse(entries).is_err());
} }
#[test] #[test]
fn parse_net_args_ipv6_error3() { fn parse_net_args_ipv6_error3() {
let entries = svec!["::"];
assert!(parse(entries).is_err());
}
#[test]
fn parse_net_args_ipv6_error4() {
let entries = svec!["::cafe"];
assert!(parse(entries).is_err());
}
#[test]
fn parse_net_args_ipv6_error5() {
let entries = svec!["1::1"];
assert!(parse(entries).is_err());
}
#[test]
fn parse_net_args_ipv6_error6() {
let entries = svec!["0123:4567:890a:bcde:fg::"];
assert!(parse(entries).is_err());
}
#[test]
fn parse_net_args_ipv6_error7() {
let entries = svec!["[::q]:8080"]; let entries = svec!["[::q]:8080"];
assert!(parse(entries).is_err()); assert!(parse(entries).is_err());
} }

View file

@ -1,64 +1,24 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use deno_core::error::AnyError; use deno_core::error::AnyError;
use deno_core::serde_json; use deno_core::serde_json;
use deno_core::url::Url; use deno_core::url::Url;
use deno_runtime::permissions::PermissionsContainer;
use import_map::ImportMap;
use import_map::ImportMapDiagnostic;
use log::warn;
use super::ConfigFile;
use crate::file_fetcher::get_source_from_data_url;
use crate::file_fetcher::FileFetcher; use crate::file_fetcher::FileFetcher;
pub async fn resolve_import_map_from_specifier( pub async fn resolve_import_map_value_from_specifier(
specifier: &Url, specifier: &Url,
maybe_config_file: Option<&ConfigFile>,
file_fetcher: &FileFetcher, file_fetcher: &FileFetcher,
) -> Result<ImportMap, AnyError> { ) -> Result<serde_json::Value, AnyError> {
let value: serde_json::Value = if specifier.scheme() == "data" { if specifier.scheme() == "data" {
serde_json::from_str(&get_source_from_data_url(specifier)?.0)? let data_url_text =
deno_graph::source::RawDataUrl::parse(specifier)?.decode()?;
Ok(serde_json::from_str(&data_url_text)?)
} else { } else {
let import_map_config = maybe_config_file let file = file_fetcher
.as_ref() .fetch_bypass_permissions(specifier)
.filter(|c| c.specifier == *specifier); .await?
match import_map_config { .into_text_decoded()?;
Some(config) => config.to_import_map_value(), Ok(serde_json::from_str(&file.source)?)
None => {
let file = file_fetcher
.fetch(specifier, PermissionsContainer::allow_all())
.await?;
serde_json::from_str(&file.source)?
}
}
};
import_map_from_value(specifier, value)
}
fn import_map_from_value(
specifier: &Url,
json_value: serde_json::Value,
) -> Result<ImportMap, AnyError> {
debug_assert!(
!specifier.as_str().contains("../"),
"Import map specifier incorrectly contained ../: {}",
specifier.as_str()
);
let result = import_map::parse_from_value(specifier, json_value)?;
print_import_map_diagnostics(&result.diagnostics);
Ok(result.import_map)
}
fn print_import_map_diagnostics(diagnostics: &[ImportMapDiagnostic]) {
if !diagnostics.is_empty() {
warn!(
"Import map diagnostics:\n{}",
diagnostics
.iter()
.map(|d| format!(" - {d}"))
.collect::<Vec<_>>()
.join("\n")
);
} }
} }

View file

@ -1,65 +1,267 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use std::collections::HashSet;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc;
use deno_config::deno_json::ConfigFile;
use deno_config::workspace::Workspace;
use deno_core::anyhow::Context;
use deno_core::error::AnyError; use deno_core::error::AnyError;
use deno_core::parking_lot::Mutex; use deno_core::parking_lot::Mutex;
use deno_npm::registry::NpmRegistryApi; use deno_core::parking_lot::MutexGuard;
use deno_npm::resolution::ValidSerializedNpmResolutionSnapshot; use deno_lockfile::WorkspaceMemberConfig;
use deno_package_json::PackageJsonDepValue;
use deno_runtime::deno_node::PackageJson;
use deno_semver::jsr::JsrDepPackageReq;
use crate::args::ConfigFile; use crate::cache;
use crate::util::fs::atomic_write_file_with_retries;
use crate::Flags; use crate::Flags;
use super::DenoSubcommand; use crate::args::DenoSubcommand;
use crate::args::InstallFlags;
use crate::args::InstallKind;
pub use deno_lockfile::Lockfile; use deno_lockfile::Lockfile;
pub use deno_lockfile::LockfileError;
pub fn discover( #[derive(Debug)]
flags: &Flags, pub struct CliLockfileReadFromPathOptions {
maybe_config_file: Option<&ConfigFile>, pub file_path: PathBuf,
) -> Result<Option<Lockfile>, AnyError> { pub frozen: bool,
if flags.no_lock /// Causes the lockfile to only be read from, but not written to.
|| matches!( pub skip_write: bool,
flags.subcommand, }
DenoSubcommand::Install(_) | DenoSubcommand::Uninstall(_)
) #[derive(Debug)]
{ pub struct CliLockfile {
return Ok(None); lockfile: Mutex<Lockfile>,
pub filename: PathBuf,
frozen: bool,
skip_write: bool,
}
pub struct Guard<'a, T> {
guard: MutexGuard<'a, T>,
}
impl<'a, T> std::ops::Deref for Guard<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.guard
}
}
impl<'a, T> std::ops::DerefMut for Guard<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.guard
}
}
impl CliLockfile {
/// Get the inner deno_lockfile::Lockfile.
pub fn lock(&self) -> Guard<Lockfile> {
Guard {
guard: self.lockfile.lock(),
}
} }
let filename = match flags.lock { pub fn set_workspace_config(
Some(ref lock) => PathBuf::from(lock), &self,
None => match maybe_config_file { options: deno_lockfile::SetWorkspaceConfigOptions,
Some(config_file) => { ) {
if config_file.specifier.scheme() == "file" { self.lockfile.lock().set_workspace_config(options);
match config_file.resolve_lockfile_path()? { }
Some(path) => path,
None => return Ok(None), pub fn overwrite(&self) -> bool {
self.lockfile.lock().overwrite
}
pub fn write_if_changed(&self) -> Result<(), AnyError> {
if self.skip_write {
return Ok(());
}
self.error_if_changed()?;
let mut lockfile = self.lockfile.lock();
let Some(bytes) = lockfile.resolve_write_bytes() else {
return Ok(()); // nothing to do
};
// do an atomic write to reduce the chance of multiple deno
// processes corrupting the file
atomic_write_file_with_retries(
&lockfile.filename,
bytes,
cache::CACHE_PERM,
)
.context("Failed writing lockfile.")?;
lockfile.has_content_changed = false;
Ok(())
}
pub fn discover(
flags: &Flags,
workspace: &Workspace,
) -> Result<Option<CliLockfile>, AnyError> {
fn pkg_json_deps(
maybe_pkg_json: Option<&PackageJson>,
) -> HashSet<JsrDepPackageReq> {
let Some(pkg_json) = maybe_pkg_json else {
return Default::default();
};
pkg_json
.resolve_local_package_json_deps()
.values()
.filter_map(|dep| dep.as_ref().ok())
.filter_map(|dep| match dep {
PackageJsonDepValue::Req(req) => {
Some(JsrDepPackageReq::npm(req.clone()))
} }
} else { PackageJsonDepValue::Workspace(_) => None,
return Ok(None); })
} .collect()
}
fn deno_json_deps(
maybe_deno_json: Option<&ConfigFile>,
) -> HashSet<JsrDepPackageReq> {
maybe_deno_json
.map(|c| {
crate::args::deno_json::deno_json_deps(c)
.into_iter()
.collect()
})
.unwrap_or_default()
}
if flags.no_lock
|| matches!(
flags.subcommand,
DenoSubcommand::Install(InstallFlags {
kind: InstallKind::Global(..),
..
}) | DenoSubcommand::Uninstall(_)
)
{
return Ok(None);
}
let file_path = match flags.lock {
Some(ref lock) => PathBuf::from(lock),
None => match workspace.resolve_lockfile_path()? {
Some(path) => path,
None => return Ok(None),
},
};
let root_folder = workspace.root_folder_configs();
// CLI flag takes precedence over the config
let frozen = flags.frozen_lockfile.unwrap_or_else(|| {
root_folder
.deno_json
.as_ref()
.and_then(|c| c.to_lock_config().ok().flatten().map(|c| c.frozen()))
.unwrap_or(false)
});
let lockfile = Self::read_from_path(CliLockfileReadFromPathOptions {
file_path,
frozen,
skip_write: flags.internal.lockfile_skip_write,
})?;
// initialize the lockfile with the workspace's configuration
let root_url = workspace.root_dir();
let config = deno_lockfile::WorkspaceConfig {
root: WorkspaceMemberConfig {
package_json_deps: pkg_json_deps(root_folder.pkg_json.as_deref()),
dependencies: deno_json_deps(root_folder.deno_json.as_deref()),
},
members: workspace
.config_folders()
.iter()
.filter(|(folder_url, _)| *folder_url != root_url)
.filter_map(|(folder_url, folder)| {
Some((
{
// should never be None here, but just ignore members that
// do fail for this
let mut relative_path = root_url.make_relative(folder_url)?;
if relative_path.ends_with('/') {
// make it slightly cleaner by removing the trailing slash
relative_path.pop();
}
relative_path
},
{
let config = WorkspaceMemberConfig {
package_json_deps: pkg_json_deps(folder.pkg_json.as_deref()),
dependencies: deno_json_deps(folder.deno_json.as_deref()),
};
if config.package_json_deps.is_empty()
&& config.dependencies.is_empty()
{
// exclude empty workspace members
return None;
}
config
},
))
})
.collect(),
};
lockfile.set_workspace_config(deno_lockfile::SetWorkspaceConfigOptions {
no_npm: flags.no_npm,
no_config: flags.config_flag == super::ConfigFlag::Disabled,
config,
});
Ok(Some(lockfile))
}
pub fn read_from_path(
opts: CliLockfileReadFromPathOptions,
) -> Result<CliLockfile, AnyError> {
let lockfile = match std::fs::read_to_string(&opts.file_path) {
Ok(text) => Lockfile::new(deno_lockfile::NewLockfileOptions {
file_path: opts.file_path,
content: &text,
overwrite: false,
})?,
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
Lockfile::new_empty(opts.file_path, false)
} }
None => return Ok(None), Err(err) => {
}, return Err(err).with_context(|| {
}; format!("Failed reading lockfile '{}'", opts.file_path.display())
});
}
};
Ok(CliLockfile {
filename: lockfile.filename.clone(),
lockfile: Mutex::new(lockfile),
frozen: opts.frozen,
skip_write: opts.skip_write,
})
}
let lockfile = Lockfile::new(filename, flags.lock_write)?; pub fn error_if_changed(&self) -> Result<(), AnyError> {
Ok(Some(lockfile)) if !self.frozen {
} return Ok(());
}
pub async fn snapshot_from_lockfile( let lockfile = self.lockfile.lock();
lockfile: Arc<Mutex<Lockfile>>, if lockfile.has_content_changed {
api: &dyn NpmRegistryApi, let contents =
) -> Result<ValidSerializedNpmResolutionSnapshot, AnyError> { std::fs::read_to_string(&lockfile.filename).unwrap_or_default();
let incomplete_snapshot = { let new_contents = lockfile.as_json_string();
let lock = lockfile.lock(); let diff = crate::util::diff::diff(&contents, &new_contents);
deno_npm::resolution::incomplete_snapshot_from_lockfile(&lock)? // has an extra newline at the end
}; let diff = diff.trim_end();
let snapshot = Err(deno_core::anyhow::anyhow!(
deno_npm::resolution::snapshot_from_lockfile(incomplete_snapshot, api) "The lockfile is out of date. Run `deno install --frozen=false`, or rerun with `--frozen=false` to update it.\nchanges:\n{diff}"
.await?; ))
Ok(snapshot) } else {
Ok(())
}
}
} }

File diff suppressed because it is too large Load diff

View file

@ -1,298 +1,156 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc;
use deno_core::anyhow::bail; use deno_config::workspace::Workspace;
use deno_core::error::AnyError; use deno_core::serde_json;
use deno_npm::registry::parse_dep_entry_name_and_raw_version; use deno_package_json::PackageJsonDepValue;
use deno_npm::registry::PackageDepNpmSchemeValueParseError; use deno_package_json::PackageJsonDepValueParseError;
use deno_runtime::deno_node::PackageJson; use deno_semver::npm::NpmPackageReqReference;
use deno_semver::package::PackageReq; use deno_semver::package::PackageReq;
use deno_semver::VersionReq;
use deno_semver::VersionReqSpecifierParseError;
use thiserror::Error;
#[derive(Debug, Error, Clone)] #[derive(Debug)]
pub enum PackageJsonDepValueParseError { pub struct InstallNpmRemotePkg {
#[error(transparent)] pub alias: Option<String>,
SchemeValue(#[from] PackageDepNpmSchemeValueParseError), pub base_dir: PathBuf,
#[error(transparent)] pub req: PackageReq,
Specifier(#[from] VersionReqSpecifierParseError),
#[error("Not implemented scheme '{scheme}'")]
Unsupported { scheme: String },
} }
pub type PackageJsonDeps = #[derive(Debug)]
BTreeMap<String, Result<PackageReq, PackageJsonDepValueParseError>>; pub struct InstallNpmWorkspacePkg {
pub alias: Option<String>,
pub target_dir: PathBuf,
}
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub struct PackageJsonDepsProvider(Option<PackageJsonDeps>); pub struct NpmInstallDepsProvider {
remote_pkgs: Vec<InstallNpmRemotePkg>,
impl PackageJsonDepsProvider { workspace_pkgs: Vec<InstallNpmWorkspacePkg>,
pub fn new(deps: Option<PackageJsonDeps>) -> Self { pkg_json_dep_errors: Vec<PackageJsonDepValueParseError>,
Self(deps)
}
pub fn deps(&self) -> Option<&PackageJsonDeps> {
self.0.as_ref()
}
pub fn reqs(&self) -> Vec<&PackageReq> {
match &self.0 {
Some(deps) => {
let mut package_reqs = deps
.values()
.filter_map(|r| r.as_ref().ok())
.collect::<Vec<_>>();
package_reqs.sort(); // deterministic resolution
package_reqs
}
None => Vec::new(),
}
}
} }
/// Gets an application level package.json's npm package requirements. impl NpmInstallDepsProvider {
/// pub fn empty() -> Self {
/// Note that this function is not general purpose. It is specifically for Self::default()
/// parsing the application level package.json that the user has control
/// over. This is a design limitation to allow mapping these dependency
/// entries to npm specifiers which can then be used in the resolver.
pub fn get_local_package_json_version_reqs(
package_json: &PackageJson,
) -> PackageJsonDeps {
fn parse_entry(
key: &str,
value: &str,
) -> Result<PackageReq, PackageJsonDepValueParseError> {
if value.starts_with("workspace:")
|| value.starts_with("file:")
|| value.starts_with("git:")
|| value.starts_with("http:")
|| value.starts_with("https:")
{
return Err(PackageJsonDepValueParseError::Unsupported {
scheme: value.split(':').next().unwrap().to_string(),
});
}
let (name, version_req) = parse_dep_entry_name_and_raw_version(key, value)
.map_err(PackageJsonDepValueParseError::SchemeValue)?;
let result = VersionReq::parse_from_specifier(version_req);
match result {
Ok(version_req) => Ok(PackageReq {
name: name.to_string(),
version_req,
}),
Err(err) => Err(PackageJsonDepValueParseError::Specifier(err)),
}
} }
fn insert_deps( pub fn from_workspace(workspace: &Arc<Workspace>) -> Self {
deps: Option<&HashMap<String, String>>, // todo(dsherret): estimate capacity?
result: &mut PackageJsonDeps, let mut workspace_pkgs = Vec::new();
) { let mut remote_pkgs = Vec::new();
if let Some(deps) = deps { let mut pkg_json_dep_errors = Vec::new();
for (key, value) in deps { let workspace_npm_pkgs = workspace.npm_packages();
result.insert(key.to_string(), parse_entry(key, value));
for (_, folder) in workspace.config_folders() {
// deal with the deno.json first because it takes precedence during resolution
if let Some(deno_json) = &folder.deno_json {
// don't bother with externally referenced import maps as users
// should inline their import map to get this behaviour
if let Some(serde_json::Value::Object(obj)) = &deno_json.json.imports {
let mut pkg_pkgs = Vec::with_capacity(obj.len());
for (_alias, value) in obj {
let serde_json::Value::String(specifier) = value else {
continue;
};
let Ok(npm_req_ref) = NpmPackageReqReference::from_str(specifier)
else {
continue;
};
let pkg_req = npm_req_ref.into_inner().req;
let workspace_pkg = workspace_npm_pkgs
.iter()
.find(|pkg| pkg.matches_req(&pkg_req));
if let Some(pkg) = workspace_pkg {
workspace_pkgs.push(InstallNpmWorkspacePkg {
alias: None,
target_dir: pkg.pkg_json.dir_path().to_path_buf(),
});
} else {
pkg_pkgs.push(InstallNpmRemotePkg {
alias: None,
base_dir: deno_json.dir_path(),
req: pkg_req,
});
}
}
// sort within each package (more like npm resolution)
pkg_pkgs.sort_by(|a, b| a.req.cmp(&b.req));
remote_pkgs.extend(pkg_pkgs);
}
} }
}
}
let deps = package_json.dependencies.as_ref(); if let Some(pkg_json) = &folder.pkg_json {
let dev_deps = package_json.dev_dependencies.as_ref(); let deps = pkg_json.resolve_local_package_json_deps();
let mut result = BTreeMap::new(); let mut pkg_pkgs = Vec::with_capacity(deps.len());
for (alias, dep) in deps {
let dep = match dep {
Ok(dep) => dep,
Err(err) => {
pkg_json_dep_errors.push(err);
continue;
}
};
match dep {
PackageJsonDepValue::Req(pkg_req) => {
let workspace_pkg = workspace_npm_pkgs.iter().find(|pkg| {
pkg.matches_req(&pkg_req)
// do not resolve to the current package
&& pkg.pkg_json.path != pkg_json.path
});
// insert the dev dependencies first so the dependencies will if let Some(pkg) = workspace_pkg {
// take priority and overwrite any collisions workspace_pkgs.push(InstallNpmWorkspacePkg {
insert_deps(dev_deps, &mut result); alias: Some(alias),
insert_deps(deps, &mut result); target_dir: pkg.pkg_json.dir_path().to_path_buf(),
});
result } else {
} pkg_pkgs.push(InstallNpmRemotePkg {
alias: Some(alias),
/// Attempts to discover the package.json file, maybe stopping when it base_dir: pkg_json.dir_path().to_path_buf(),
/// reaches the specified `maybe_stop_at` directory. req: pkg_req,
pub fn discover_from( });
start: &Path, }
maybe_stop_at: Option<PathBuf>, }
) -> Result<Option<PackageJson>, AnyError> { PackageJsonDepValue::Workspace(version_req) => {
const PACKAGE_JSON_NAME: &str = "package.json"; if let Some(pkg) = workspace_npm_pkgs.iter().find(|pkg| {
pkg.matches_name_and_version_req(&alias, &version_req)
// note: ancestors() includes the `start` path }) {
for ancestor in start.ancestors() { workspace_pkgs.push(InstallNpmWorkspacePkg {
let path = ancestor.join(PACKAGE_JSON_NAME); alias: Some(alias),
target_dir: pkg.pkg_json.dir_path().to_path_buf(),
let source = match std::fs::read_to_string(&path) { });
Ok(source) => source, }
Err(err) if err.kind() == std::io::ErrorKind::NotFound => { }
if let Some(stop_at) = maybe_stop_at.as_ref() {
if ancestor == stop_at {
break;
} }
} }
continue;
// sort within each package as npm does
pkg_pkgs.sort_by(|a, b| a.alias.cmp(&b.alias));
remote_pkgs.extend(pkg_pkgs);
} }
Err(err) => bail!( }
"Error loading package.json at {}. {:#}",
path.display(),
err
),
};
let package_json = PackageJson::load_from_string(path.clone(), source)?; remote_pkgs.shrink_to_fit();
log::debug!("package.json file found at '{}'", path.display()); workspace_pkgs.shrink_to_fit();
return Ok(Some(package_json)); Self {
} remote_pkgs,
workspace_pkgs,
log::debug!("No package.json file found"); pkg_json_dep_errors,
Ok(None)
}
#[cfg(test)]
mod test {
use pretty_assertions::assert_eq;
use std::path::PathBuf;
use super::*;
#[test]
fn test_parse_dep_entry_name_and_raw_version() {
let cases = [
("test", "^1.2", Ok(("test", "^1.2"))),
("test", "1.x - 2.6", Ok(("test", "1.x - 2.6"))),
("test", "npm:package@^1.2", Ok(("package", "^1.2"))),
(
"test",
"npm:package",
Err("Could not find @ symbol in npm url 'npm:package'"),
),
];
for (key, value, expected_result) in cases {
let result = parse_dep_entry_name_and_raw_version(key, value);
match result {
Ok(result) => assert_eq!(result, expected_result.unwrap()),
Err(err) => assert_eq!(err.to_string(), expected_result.err().unwrap()),
}
} }
} }
fn get_local_package_json_version_reqs_for_tests( pub fn remote_pkgs(&self) -> &[InstallNpmRemotePkg] {
package_json: &PackageJson, &self.remote_pkgs
) -> BTreeMap<String, Result<PackageReq, String>> {
get_local_package_json_version_reqs(package_json)
.into_iter()
.map(|(k, v)| {
(
k,
match v {
Ok(v) => Ok(v),
Err(err) => Err(err.to_string()),
},
)
})
.collect::<BTreeMap<_, _>>()
} }
#[test] pub fn workspace_pkgs(&self) -> &[InstallNpmWorkspacePkg] {
fn test_get_local_package_json_version_reqs() { &self.workspace_pkgs
let mut package_json = PackageJson::empty(PathBuf::from("/package.json"));
package_json.dependencies = Some(HashMap::from([
("test".to_string(), "^1.2".to_string()),
("other".to_string(), "npm:package@~1.3".to_string()),
]));
package_json.dev_dependencies = Some(HashMap::from([
("package_b".to_string(), "~2.2".to_string()),
// should be ignored
("other".to_string(), "^3.2".to_string()),
]));
let deps = get_local_package_json_version_reqs_for_tests(&package_json);
assert_eq!(
deps,
BTreeMap::from([
(
"test".to_string(),
Ok(PackageReq::from_str("test@^1.2").unwrap())
),
(
"other".to_string(),
Ok(PackageReq::from_str("package@~1.3").unwrap())
),
(
"package_b".to_string(),
Ok(PackageReq::from_str("package_b@~2.2").unwrap())
)
])
);
} }
#[test] pub fn pkg_json_dep_errors(&self) -> &[PackageJsonDepValueParseError] {
fn test_get_local_package_json_version_reqs_errors_non_npm_specifier() { &self.pkg_json_dep_errors
let mut package_json = PackageJson::empty(PathBuf::from("/package.json"));
package_json.dependencies = Some(HashMap::from([(
"test".to_string(),
"1.x - 1.3".to_string(),
)]));
let map = get_local_package_json_version_reqs_for_tests(&package_json);
assert_eq!(
map,
BTreeMap::from([(
"test".to_string(),
Err(
concat!(
"Invalid specifier version requirement. Unexpected character.\n",
" - 1.3\n",
" ~"
)
.to_string()
)
)])
);
}
#[test]
fn test_get_local_package_json_version_reqs_skips_certain_specifiers() {
let mut package_json = PackageJson::empty(PathBuf::from("/package.json"));
package_json.dependencies = Some(HashMap::from([
("test".to_string(), "1".to_string()),
("work-test".to_string(), "workspace:1.1.1".to_string()),
("file-test".to_string(), "file:something".to_string()),
("git-test".to_string(), "git:something".to_string()),
("http-test".to_string(), "http://something".to_string()),
("https-test".to_string(), "https://something".to_string()),
]));
let result = get_local_package_json_version_reqs_for_tests(&package_json);
assert_eq!(
result,
BTreeMap::from([
(
"file-test".to_string(),
Err("Not implemented scheme 'file'".to_string()),
),
(
"git-test".to_string(),
Err("Not implemented scheme 'git'".to_string()),
),
(
"http-test".to_string(),
Err("Not implemented scheme 'http'".to_string()),
),
(
"https-test".to_string(),
Err("Not implemented scheme 'https'".to_string()),
),
(
"test".to_string(),
Ok(PackageReq::from_str("test@1").unwrap())
),
(
"work-test".to_string(),
Err("Not implemented scheme 'workspace'".to_string()),
)
])
);
} }
} }

View file

@ -1,9 +1,17 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use base64::prelude::BASE64_STANDARD;
use base64::Engine;
use deno_core::ModuleSpecifier; use deno_core::ModuleSpecifier;
use log::debug; use log::debug;
use log::error; use log::error;
use std::borrow::Cow;
use std::fmt; use std::fmt;
use std::net::IpAddr;
use std::net::Ipv4Addr;
use std::net::Ipv6Addr;
use std::net::SocketAddr;
use std::str::FromStr;
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub enum AuthTokenData { pub enum AuthTokenData {
@ -13,7 +21,7 @@ pub enum AuthTokenData {
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub struct AuthToken { pub struct AuthToken {
host: String, host: AuthDomain,
token: AuthTokenData, token: AuthTokenData,
} }
@ -23,7 +31,7 @@ impl fmt::Display for AuthToken {
AuthTokenData::Bearer(token) => write!(f, "Bearer {token}"), AuthTokenData::Bearer(token) => write!(f, "Bearer {token}"),
AuthTokenData::Basic { username, password } => { AuthTokenData::Basic { username, password } => {
let credentials = format!("{username}:{password}"); let credentials = format!("{username}:{password}");
write!(f, "Basic {}", base64::encode(credentials)) write!(f, "Basic {}", BASE64_STANDARD.encode(credentials))
} }
} }
} }
@ -35,6 +43,78 @@ impl fmt::Display for AuthToken {
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct AuthTokens(Vec<AuthToken>); pub struct AuthTokens(Vec<AuthToken>);
/// An authorization domain, either an exact or suffix match.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum AuthDomain {
Ip(IpAddr),
IpPort(SocketAddr),
/// Suffix match, no dot. May include a port.
Suffix(Cow<'static, str>),
}
impl<T: ToString> From<T> for AuthDomain {
fn from(value: T) -> Self {
let s = value.to_string().to_lowercase();
if let Ok(ip) = SocketAddr::from_str(&s) {
return AuthDomain::IpPort(ip);
};
if s.starts_with('[') && s.ends_with(']') {
if let Ok(ip) = Ipv6Addr::from_str(&s[1..s.len() - 1]) {
return AuthDomain::Ip(ip.into());
}
} else if let Ok(ip) = Ipv4Addr::from_str(&s) {
return AuthDomain::Ip(ip.into());
}
if let Some(s) = s.strip_prefix('.') {
AuthDomain::Suffix(Cow::Owned(s.to_owned()))
} else {
AuthDomain::Suffix(Cow::Owned(s))
}
}
}
impl AuthDomain {
pub fn matches(&self, specifier: &ModuleSpecifier) -> bool {
let Some(host) = specifier.host_str() else {
return false;
};
match *self {
Self::Ip(ip) => {
let AuthDomain::Ip(parsed) = AuthDomain::from(host) else {
return false;
};
ip == parsed && specifier.port().is_none()
}
Self::IpPort(ip) => {
let AuthDomain::Ip(parsed) = AuthDomain::from(host) else {
return false;
};
ip.ip() == parsed && specifier.port() == Some(ip.port())
}
Self::Suffix(ref suffix) => {
let hostname = if let Some(port) = specifier.port() {
Cow::Owned(format!("{}:{}", host, port))
} else {
Cow::Borrowed(host)
};
if suffix.len() == hostname.len() {
return suffix == &hostname;
}
// If it's a suffix match, ensure a dot
if hostname.ends_with(suffix.as_ref())
&& hostname.ends_with(&format!(".{suffix}"))
{
return true;
}
false
}
}
}
}
impl AuthTokens { impl AuthTokens {
/// Create a new set of tokens based on the provided string. It is intended /// Create a new set of tokens based on the provided string. It is intended
/// that the string be the value of an environment variable and the string is /// that the string be the value of an environment variable and the string is
@ -43,19 +123,19 @@ impl AuthTokens {
pub fn new(maybe_tokens_str: Option<String>) -> Self { pub fn new(maybe_tokens_str: Option<String>) -> Self {
let mut tokens = Vec::new(); let mut tokens = Vec::new();
if let Some(tokens_str) = maybe_tokens_str { if let Some(tokens_str) = maybe_tokens_str {
for token_str in tokens_str.split(';') { for token_str in tokens_str.trim().split(';') {
if token_str.contains('@') { if token_str.contains('@') {
let pair: Vec<&str> = token_str.rsplitn(2, '@').collect(); let mut iter = token_str.rsplitn(2, '@');
let token = pair[1]; let host = AuthDomain::from(iter.next().unwrap());
let host = pair[0].to_lowercase(); let token = iter.next().unwrap();
if token.contains(':') { if token.contains(':') {
let pair: Vec<&str> = token.rsplitn(2, ':').collect(); let mut iter = token.rsplitn(2, ':');
let username = pair[1].to_string(); let password = iter.next().unwrap().to_owned();
let password = pair[0].to_string(); let username = iter.next().unwrap().to_owned();
tokens.push(AuthToken { tokens.push(AuthToken {
host, host,
token: AuthTokenData::Basic { username, password }, token: AuthTokenData::Basic { username, password },
}) });
} else { } else {
tokens.push(AuthToken { tokens.push(AuthToken {
host, host,
@ -79,12 +159,7 @@ impl AuthTokens {
/// matching is case insensitive. /// matching is case insensitive.
pub fn get(&self, specifier: &ModuleSpecifier) -> Option<AuthToken> { pub fn get(&self, specifier: &ModuleSpecifier) -> Option<AuthToken> {
self.0.iter().find_map(|t| { self.0.iter().find_map(|t| {
let hostname = if let Some(port) = specifier.port() { if t.host.matches(specifier) {
format!("{}:{}", specifier.host_str()?, port)
} else {
specifier.host_str()?.to_string()
};
if hostname.to_lowercase().ends_with(&t.host) {
Some(t.clone()) Some(t.clone())
} else { } else {
None None
@ -136,6 +211,40 @@ mod tests {
); );
} }
#[test]
fn test_auth_tokens_space() {
let auth_tokens = AuthTokens::new(Some(
" abc123@deno.land;def456@example.com\t".to_string(),
));
let fixture = resolve_url("https://deno.land/x/mod.ts").unwrap();
assert_eq!(
auth_tokens.get(&fixture).unwrap().to_string(),
"Bearer abc123".to_string()
);
let fixture = resolve_url("http://example.com/a/file.ts").unwrap();
assert_eq!(
auth_tokens.get(&fixture).unwrap().to_string(),
"Bearer def456".to_string()
);
}
#[test]
fn test_auth_tokens_newline() {
let auth_tokens = AuthTokens::new(Some(
"\nabc123@deno.land;def456@example.com\n".to_string(),
));
let fixture = resolve_url("https://deno.land/x/mod.ts").unwrap();
assert_eq!(
auth_tokens.get(&fixture).unwrap().to_string(),
"Bearer abc123".to_string()
);
let fixture = resolve_url("http://example.com/a/file.ts").unwrap();
assert_eq!(
auth_tokens.get(&fixture).unwrap().to_string(),
"Bearer def456".to_string()
);
}
#[test] #[test]
fn test_auth_tokens_port() { fn test_auth_tokens_port() {
let auth_tokens = let auth_tokens =
@ -180,4 +289,81 @@ mod tests {
let fixture = resolve_url("https://deno.land:8080/x/mod.ts").unwrap(); let fixture = resolve_url("https://deno.land:8080/x/mod.ts").unwrap();
assert_eq!(auth_tokens.get(&fixture), None); assert_eq!(auth_tokens.get(&fixture), None);
} }
#[test]
fn test_parse_ip() {
let ip = AuthDomain::from("[2001:db8:a::123]");
assert_eq!("Ip(2001:db8:a::123)", format!("{ip:?}"));
let ip = AuthDomain::from("[2001:db8:a::123]:8080");
assert_eq!("IpPort([2001:db8:a::123]:8080)", format!("{ip:?}"));
let ip = AuthDomain::from("1.1.1.1");
assert_eq!("Ip(1.1.1.1)", format!("{ip:?}"));
}
#[test]
fn test_case_insensitive() {
let domain = AuthDomain::from("EXAMPLE.com");
assert!(
domain.matches(&ModuleSpecifier::parse("http://example.com").unwrap())
);
assert!(
domain.matches(&ModuleSpecifier::parse("http://example.COM").unwrap())
);
}
#[test]
fn test_matches() {
let candidates = [
"example.com",
"www.example.com",
"1.1.1.1",
"[2001:db8:a::123]",
// These will never match
"example.com.evil.com",
"1.1.1.1.evil.com",
"notexample.com",
"www.notexample.com",
];
let domains = [
("example.com", vec!["example.com", "www.example.com"]),
(".example.com", vec!["example.com", "www.example.com"]),
("www.example.com", vec!["www.example.com"]),
("1.1.1.1", vec!["1.1.1.1"]),
("[2001:db8:a::123]", vec!["[2001:db8:a::123]"]),
];
let url = |c: &str| ModuleSpecifier::parse(&format!("http://{c}")).unwrap();
let url_port =
|c: &str| ModuleSpecifier::parse(&format!("http://{c}:8080")).unwrap();
// Generate each candidate with and without a port
let candidates = candidates
.into_iter()
.flat_map(|c| [url(c), url_port(c)])
.collect::<Vec<_>>();
for (domain, expected_domain) in domains {
// Test without a port -- all candidates return without a port
let auth_domain = AuthDomain::from(domain);
let actual = candidates
.iter()
.filter(|c| auth_domain.matches(c))
.cloned()
.collect::<Vec<_>>();
let expected = expected_domain.iter().map(|u| url(u)).collect::<Vec<_>>();
assert_eq!(actual, expected);
// Test with a port, all candidates return with a port
let auth_domain = AuthDomain::from(&format!("{domain}:8080"));
let actual = candidates
.iter()
.filter(|c| auth_domain.matches(c))
.cloned()
.collect::<Vec<_>>();
let expected = expected_domain
.iter()
.map(|u| url_port(u))
.collect::<Vec<_>>();
assert_eq!(actual, expected);
}
}
} }

View file

@ -1,22 +0,0 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
const queueMicrotask = globalThis.queueMicrotask || process.nextTick;
let [total, count] = typeof Deno !== "undefined"
? Deno.args
: [process.argv[2], process.argv[3]];
total = total ? parseInt(total, 0) : 50;
count = count ? parseInt(count, 10) : 1000000;
async function bench(fun) {
const start = Date.now();
for (let i = 0; i < count; i++) await fun();
const elapsed = Date.now() - start;
const rate = Math.floor(count / (elapsed / 1000));
console.log(`time ${elapsed} ms rate ${rate}`);
if (--total) queueMicrotask(() => bench(fun));
}
const core = Deno[Deno.internal].core;
const ops = core.ops;
const opVoidAsync = ops.op_void_async;
bench(() => opVoidAsync());

View file

@ -1,22 +0,0 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
const queueMicrotask = globalThis.queueMicrotask || process.nextTick;
let [total, count] = typeof Deno !== "undefined"
? Deno.args
: [process.argv[2], process.argv[3]];
total = total ? parseInt(total, 0) : 50;
count = count ? parseInt(count, 10) : 1000000;
async function bench(fun) {
const start = Date.now();
for (let i = 0; i < count; i++) await fun();
const elapsed = Date.now() - start;
const rate = Math.floor(count / (elapsed / 1000));
console.log(`time ${elapsed} ms rate ${rate}`);
if (--total) queueMicrotask(() => bench(fun));
}
const core = Deno[Deno.internal].core;
const ops = core.ops;
const opVoidAsyncDeferred = ops.op_void_async_deferred;
bench(() => opVoidAsyncDeferred());

View file

@ -1,4 +1,4 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
const cacheName = "cache-v1"; const cacheName = "cache-v1";
const cache = await caches.open(cacheName); const cache = await caches.open(cacheName);

View file

@ -1,4 +1,4 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
Deno.bench("echo deno", async () => { Deno.bench("echo deno", async () => {
await new Deno.Command("echo", { args: ["deno"] }).output(); await new Deno.Command("echo", { args: ["deno"] }).output();

View file

@ -1,3 +1,6 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
// deno-lint-ignore-file no-console
const count = 100000; const count = 100000;
for (let i = 0; i < count; i++) console.log("Hello World"); for (let i = 0; i < count; i++) console.log("Hello World");

View file

@ -1,41 +1,14 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
// v8 builtin that's close to the upper bound non-NOPs // v8 builtin that's close to the upper bound non-NOPs
Deno.bench("date_now", { n: 5e5 }, () => { Deno.bench("date_now", { n: 5e5 }, () => {
Date.now(); Date.now();
}); });
// Fast API calls function addJS(a, b) {
{ return a + b;
// deno-lint-ignore camelcase
const { op_add } = Deno[Deno.internal].core.ops;
// deno-lint-ignore no-inner-declarations
function add(a, b) {
return op_add(a, b);
}
// deno-lint-ignore no-inner-declarations
function addJS(a, b) {
return a + b;
}
Deno.bench("op_add", () => add(1, 2));
Deno.bench("add_js", () => addJS(1, 2));
} }
Deno.bench("add_js", () => addJS(1, 2));
// deno-lint-ignore camelcase
const { op_void_sync } = Deno[Deno.internal].core.ops;
function sync() {
return op_void_sync();
}
sync(); // Warmup
// Void ops measure op-overhead
Deno.bench("op_void_sync", () => sync());
Deno.bench(
"op_void_async",
{ n: 1e6 },
() => Deno[Deno.internal].core.opAsync("op_void_async"),
);
// A very lightweight op, that should be highly optimizable // A very lightweight op, that should be highly optimizable
Deno.bench("perf_now", { n: 5e5 }, () => { Deno.bench("perf_now", { n: 5e5 }, () => {
@ -43,8 +16,7 @@ Deno.bench("perf_now", { n: 5e5 }, () => {
}); });
Deno.bench("open_file_sync", () => { Deno.bench("open_file_sync", () => {
const file = Deno.openSync("./cli/bench/testdata/128k.bin"); using _file = Deno.openSync("./cli/bench/testdata/128k.bin");
file.close();
}); });
// A common "language feature", that should be fast // A common "language feature", that should be fast
@ -74,7 +46,7 @@ Deno.bench("b64_rt_short", { n: 1e6 }, () => {
const buf = new Uint8Array(100); const buf = new Uint8Array(100);
const file = Deno.openSync("/dev/zero"); const file = Deno.openSync("/dev/zero");
Deno.bench("read_zero", { n: 5e5 }, () => { Deno.bench("read_zero", { n: 5e5 }, () => {
Deno.readSync(file.rid, buf); file.readSync(buf);
}); });
} }
@ -83,7 +55,7 @@ Deno.bench("b64_rt_short", { n: 1e6 }, () => {
const dataChunk = new Uint8Array(100); const dataChunk = new Uint8Array(100);
const file = Deno.openSync("/dev/null", { write: true }); const file = Deno.openSync("/dev/null", { write: true });
Deno.bench("write_null", { n: 5e5 }, () => { Deno.bench("write_null", { n: 5e5 }, () => {
Deno.writeSync(file.rid, dataChunk); file.writeSync(dataChunk);
}); });
} }

View file

@ -1,4 +1,7 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
// deno-lint-ignore-file no-console
let [total, count] = typeof Deno !== "undefined" let [total, count] = typeof Deno !== "undefined"
? Deno.args ? Deno.args
: [process.argv[2], process.argv[3]]; : [process.argv[2], process.argv[3]];

View file

@ -1,4 +1,4 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
let total = 5; let total = 5;
let current = ""; let current = "";

View file

@ -1,4 +1,4 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
/** @jsx h */ /** @jsx h */
import results from "./deno.json" assert { type: "json" }; import results from "./deno.json" assert { type: "json" };

View file

@ -1,4 +1,7 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
// deno-lint-ignore-file no-console
let [total, count] = typeof Deno !== "undefined" let [total, count] = typeof Deno !== "undefined"
? Deno.args ? Deno.args
: [process.argv[2], process.argv[3]]; : [process.argv[2], process.argv[3]];

View file

@ -1,155 +0,0 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
use std::collections::HashMap;
use std::path::Path;
use std::process::Command;
use std::sync::atomic::AtomicU16;
use std::sync::atomic::Ordering;
use std::time::Duration;
use super::Result;
pub use test_util::parse_wrk_output;
pub use test_util::WrkOutput as HttpBenchmarkResult;
// Some of the benchmarks in this file have been renamed. In case the history
// somehow gets messed up:
// "node_http" was once called "node"
// "deno_tcp" was once called "deno"
// "deno_http" was once called "deno_net_http"
const DURATION: &str = "10s";
pub fn benchmark(
target_path: &Path,
) -> Result<HashMap<String, HttpBenchmarkResult>> {
let deno_exe = test_util::deno_exe_path();
let deno_exe = deno_exe.to_string();
let hyper_hello_exe = target_path.join("test_server");
let hyper_hello_exe = hyper_hello_exe.to_str().unwrap();
let mut res = HashMap::new();
let manifest_dir = Path::new(env!("CARGO_MANIFEST_DIR"));
let http_dir = manifest_dir.join("bench").join("http");
for entry in std::fs::read_dir(&http_dir)? {
let entry = entry?;
let pathbuf = entry.path();
let path = pathbuf.to_str().unwrap();
if path.ends_with(".lua") {
continue;
}
let file_stem = pathbuf.file_stem().unwrap().to_str().unwrap();
let lua_script = http_dir.join(format!("{file_stem}.lua"));
let mut maybe_lua = None;
if lua_script.exists() {
maybe_lua = Some(lua_script.to_str().unwrap());
}
let port = get_port();
// deno run -A --unstable <path> <addr>
res.insert(
file_stem.to_string(),
run(
&[
deno_exe.as_str(),
"run",
"--allow-all",
"--unstable",
"--enable-testing-features-do-not-use",
path,
&server_addr(port),
],
port,
None,
None,
maybe_lua,
)?,
);
}
res.insert("hyper".to_string(), hyper_http(hyper_hello_exe)?);
Ok(res)
}
fn run(
server_cmd: &[&str],
port: u16,
env: Option<Vec<(String, String)>>,
origin_cmd: Option<&[&str]>,
lua_script: Option<&str>,
) -> Result<HttpBenchmarkResult> {
// Wait for port 4544 to become available.
// TODO Need to use SO_REUSEPORT with tokio::net::TcpListener.
std::thread::sleep(Duration::from_secs(5));
let mut origin = None;
if let Some(cmd) = origin_cmd {
let mut com = Command::new(cmd[0]);
com.args(&cmd[1..]);
if let Some(env) = env.clone() {
com.envs(env);
}
origin = Some(com.spawn()?);
};
println!("{}", server_cmd.join(" "));
let mut server = {
let mut com = Command::new(server_cmd[0]);
com.args(&server_cmd[1..]);
if let Some(env) = env {
com.envs(env);
}
com.spawn()?
};
std::thread::sleep(Duration::from_secs(5)); // wait for server to wake up. TODO racy.
let wrk = test_util::prebuilt_tool_path("wrk");
assert!(wrk.is_file());
let addr = format!("http://127.0.0.1:{port}/");
let wrk = wrk.to_string();
let mut wrk_cmd = vec![wrk.as_str(), "-d", DURATION, "--latency", &addr];
if let Some(lua_script) = lua_script {
wrk_cmd.push("-s");
wrk_cmd.push(lua_script);
}
println!("{}", wrk_cmd.join(" "));
let output = test_util::run_collect(&wrk_cmd, None, None, None, true).0;
std::thread::sleep(Duration::from_secs(1)); // wait to capture failure. TODO racy.
println!("{output}");
assert!(
server.try_wait()?.map(|s| s.success()).unwrap_or(true),
"server ended with error"
);
server.kill()?;
if let Some(mut origin) = origin {
origin.kill()?;
}
Ok(parse_wrk_output(&output))
}
static NEXT_PORT: AtomicU16 = AtomicU16::new(4544);
pub(crate) fn get_port() -> u16 {
let p = NEXT_PORT.load(Ordering::SeqCst);
NEXT_PORT.store(p.wrapping_add(1), Ordering::SeqCst);
p
}
fn server_addr(port: u16) -> String {
format!("0.0.0.0:{port}")
}
fn hyper_http(exe: &str) -> Result<HttpBenchmarkResult> {
let port = get_port();
println!("http_benchmark testing RUST hyper");
run(&[exe, &port.to_string()], port, None, None, None)
}

View file

@ -1,10 +0,0 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
import { Hono } from "https://deno.land/x/hono@v2.0.9/mod.ts";
const addr = Deno.args[0] || "127.0.0.1:4500";
const [hostname, port] = addr.split(":");
const app = new Hono();
app.get("/", (c) => c.text("Hello, World!"));
Deno.serve({ port: Number(port), hostname }, app.fetch);

View file

@ -1,14 +0,0 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
const addr = Deno.args[0] || "127.0.0.1:4500";
const [hostname, port] = addr.split(":");
const { serve } = Deno;
const path = new URL("../testdata/128k.bin", import.meta.url).pathname;
function handler() {
const file = Deno.openSync(path);
return new Response(file.readable);
}
serve({ hostname, port: Number(port) }, handler);

View file

@ -1,19 +0,0 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
const addr = Deno.args[0] || "127.0.0.1:4500";
const [hostname, port] = addr.split(":");
const listener = Deno.listen({ hostname, port: Number(port) });
console.log("Server listening on", addr);
const encoder = new TextEncoder();
const body = encoder.encode("Hello World");
for await (const conn of listener) {
(async () => {
const requests = Deno.serveHttp(conn);
for await (const event of requests) {
event.respondWith(new Response(body))
.catch((e) => console.log(e));
}
})();
}

View file

@ -1,24 +0,0 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
const addr = Deno.args[0] || "127.0.0.1:4500";
const [hostname, port] = addr.split(":");
const listener = Deno.listen({ hostname, port: Number(port) });
console.log("Server listening on", addr);
for await (const conn of listener) {
(async () => {
const requests = Deno.serveHttp(conn);
for await (const { respondWith } of requests) {
respondWith(
new Response("Hello World", {
status: 200,
headers: {
server: "deno",
"content-type": "text/plain",
},
}),
)
.catch((e) => console.log(e));
}
})();
}

View file

@ -1,43 +0,0 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
const addr = Deno.args[0] || "127.0.0.1:4500";
const [hostname, port] = addr.split(":");
const tcp = Deno.listen({ hostname, port: Number(port) });
console.log("Server listening on", addr);
class Http {
id;
constructor(id) {
this.id = id;
}
[Symbol.asyncIterator]() {
return {
next: async () => {
const reqEvt = await Deno[Deno.internal].core.opAsync(
"op_http_accept",
this.id,
);
return { value: reqEvt ?? undefined, done: reqEvt === null };
},
};
}
}
for await (const conn of tcp) {
const id = Deno[Deno.internal].core.ops.op_http_start(conn.rid);
const http = new Http(id);
(async () => {
for await (const req of http) {
if (req == null) continue;
const { 0: stream } = req;
await Deno[Deno.internal].core.opAsync(
"op_http_write_headers",
stream,
200,
[],
"Hello World",
);
Deno[Deno.internal].core.close(stream);
}
})();
}

View file

@ -1,17 +0,0 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
const addr = Deno.args[0] || "127.0.0.1:4500";
const [hostname, port] = addr.split(":");
const listener = Deno.listen({ hostname, port: Number(port) });
console.log("Server listening on", addr);
for await (const conn of listener) {
(async () => {
const requests = Deno.serveHttp(conn);
for await (const { respondWith, request } of requests) {
const bar = request.headers.get("foo");
respondWith(new Response(bar))
.catch((e) => console.log(e));
}
})();
}

View file

@ -1,5 +0,0 @@
wrk.headers["foo"] = "bar"
wrk.headers["User-Agent"] = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36"
wrk.headers["Viewport-Width"] = "1920"
wrk.headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9"
wrk.headers["Accept-Language"] = "en,la;q=0.9"

View file

@ -1,11 +0,0 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
const addr = Deno.args[0] ?? "127.0.0.1:4500";
const [hostname, port] = addr.split(":");
const { serve } = Deno;
function handler() {
return new Response("Hello World");
}
serve({ hostname, port: Number(port), reusePort: true }, handler);

View file

@ -1,19 +0,0 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
const addr = Deno.args[0] || "127.0.0.1:4500";
const [hostname, port] = addr.split(":");
const listener = Deno.listen({ hostname, port: Number(port) });
console.log("Server listening on", addr);
for await (const conn of listener) {
(async () => {
const requests = Deno.serveHttp(conn);
for await (const { respondWith, request } of requests) {
if (request.method == "POST") {
const buffer = await request.arrayBuffer();
respondWith(new Response(buffer.byteLength))
.catch((e) => console.log(e));
}
}
})();
}

View file

@ -1,5 +0,0 @@
wrk.method = "POST"
wrk.headers["Content-Type"] = "application/octet-stream"
file = io.open("./cli/bench/testdata/128k.bin", "rb")
wrk.body = file:read("*a")

View file

@ -1,19 +0,0 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
const addr = Deno.args[0] || "127.0.0.1:4500";
const [hostname, port] = addr.split(":");
const listener = Deno.listen({ hostname, port: Number(port) });
console.log("Server listening on", addr);
for await (const conn of listener) {
(async () => {
const requests = Deno.serveHttp(conn);
for await (const { respondWith, request } of requests) {
if (request.method == "POST") {
const json = await request.json();
respondWith(new Response(json.hello))
.catch((e) => console.log(e));
}
}
})();
}

View file

@ -1,3 +0,0 @@
wrk.method = "POST"
wrk.headers["Content-Type"] = "application/json"
wrk.body = '{"hello":"deno"}'

View file

@ -1,23 +0,0 @@
import { renderToReadableStream } from "https://esm.run/react-dom/server";
import * as React from "https://esm.run/react";
const { serve } = Deno;
const addr = Deno.args[0] || "127.0.0.1:4500";
const [hostname, port] = addr.split(":");
const App = () => (
<html>
<body>
<h1>Hello World</h1>
</body>
</html>
);
const headers = {
headers: {
"Content-Type": "text/html",
},
};
serve({ hostname, port: Number(port) }, async () => {
return new Response(await renderToReadableStream(<App />), headers);
});

View file

@ -1,33 +0,0 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
// Used for benchmarking Deno's networking.
// TODO(bartlomieju): Replace this with a real HTTP server once
// https://github.com/denoland/deno/issues/726 is completed.
// Note: this is a keep-alive server.
const addr = Deno.args[0] || "127.0.0.1:4500";
const [hostname, port] = addr.split(":");
const listener = Deno.listen({ hostname, port: Number(port) });
const response = new TextEncoder().encode(
"HTTP/1.1 200 OK\r\nContent-Length: 12\r\n\r\nHello World\n",
);
async function handle(conn: Deno.Conn): Promise<void> {
const buffer = new Uint8Array(1024);
try {
while (true) {
await conn.read(buffer);
await conn.write(response);
}
} catch (e) {
if (
!(e instanceof Deno.errors.BrokenPipe) &&
!(e instanceof Deno.errors.ConnectionReset)
) {
throw e;
}
}
conn.close();
}
console.log("Listening on", addr);
for await (const conn of listener) {
handle(conn);
}

View file

@ -1,19 +1,23 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use deno_core::serde::Deserialize; use deno_core::serde::Deserialize;
use deno_core::serde_json; use deno_core::serde_json;
use deno_core::serde_json::json; use deno_core::serde_json::json;
use deno_core::serde_json::Value; use deno_core::serde_json::Value;
use deno_core::url::Url; use lsp_types::Uri;
use std::collections::HashMap; use std::collections::HashMap;
use std::path::Path; use std::path::Path;
use std::str::FromStr;
use std::time::Duration; use std::time::Duration;
use test_util::lsp::LspClientBuilder; use test_util::lsp::LspClientBuilder;
use test_util::PathRef;
use tower_lsp::lsp_types as lsp; use tower_lsp::lsp_types as lsp;
static FIXTURE_CODE_LENS_TS: &str = include_str!("testdata/code_lens.ts"); static FIXTURE_CODE_LENS_TS: &str = include_str!("testdata/code_lens.ts");
static FIXTURE_DB_TS: &str = include_str!("testdata/db.ts"); static FIXTURE_DB_TS: &str = include_str!("testdata/db.ts");
static FIXTURE_DB_MESSAGES: &[u8] = include_bytes!("testdata/db_messages.json"); static FIXTURE_DB_MESSAGES: &[u8] = include_bytes!("testdata/db_messages.json");
static FIXTURE_DECO_APPS: &[u8] =
include_bytes!("testdata/deco_apps_requests.json");
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
enum FixtureType { enum FixtureType {
@ -36,6 +40,107 @@ struct FixtureMessage {
params: Value, params: Value,
} }
/// replaces the root directory in the URIs of the requests
/// with the given root path
fn patch_uris<'a>(
reqs: impl IntoIterator<Item = &'a mut tower_lsp::jsonrpc::Request>,
root: &PathRef,
) {
for req in reqs {
let mut params = req.params().unwrap().clone();
let new_req = if let Some(doc) = params.get_mut("textDocument") {
if let Some(uri_val) = doc.get_mut("uri") {
let uri = uri_val.as_str().unwrap();
*uri_val =
Value::from(uri.replace(
"file:///",
&format!("file://{}/", root.to_string_lossy()),
));
}
let builder = tower_lsp::jsonrpc::Request::build(req.method().to_owned());
let builder = if let Some(id) = req.id() {
builder.id(id.clone())
} else {
builder
};
Some(builder.params(params).finish())
} else {
None
};
if let Some(new_req) = new_req {
*req = new_req;
}
}
}
fn bench_deco_apps_edits(deno_exe: &Path) -> Duration {
let mut requests: Vec<tower_lsp::jsonrpc::Request> =
serde_json::from_slice(FIXTURE_DECO_APPS).unwrap();
let apps =
test_util::root_path().join("cli/bench/testdata/lsp_benchdata/apps");
// it's a bit wasteful to do this for every run, but it's the easiest with the way things
// are currently structured
patch_uris(&mut requests, &apps);
let mut client = LspClientBuilder::new()
.use_diagnostic_sync(false)
.set_root_dir(apps.clone())
.deno_exe(deno_exe)
.build();
client.initialize(|c| {
c.set_workspace_folders(vec![lsp_types::WorkspaceFolder {
uri: apps.uri_dir(),
name: "apps".to_string(),
}]);
c.set_deno_enable(true);
c.set_unstable(true);
c.set_preload_limit(1000);
c.set_config(apps.join("deno.json").as_path().to_string_lossy());
});
let start = std::time::Instant::now();
let mut reqs = 0;
for req in requests {
if req.id().is_none() {
client.write_notification(req.method(), req.params());
} else {
reqs += 1;
client.write_jsonrpc(req.method(), req.params());
}
}
for _ in 0..reqs {
let _ = client.read_latest_response();
}
let end = start.elapsed();
// part of the motivation of including this benchmark is to see how we perform
// with a fairly large number of documents in memory.
// make sure that's the case
let res = client.write_request(
"deno/virtualTextDocument",
json!({
"textDocument": {
"uri": "deno:/status.md"
}
}),
);
let re = lazy_regex::regex!(r"Documents in memory: (\d+)");
let res = res.as_str().unwrap().to_string();
assert!(res.starts_with("# Deno Language Server Status"));
let captures = re.captures(&res).unwrap();
let count = captures.get(1).unwrap().as_str().parse::<usize>().unwrap();
assert!(count > 1000, "count: {}", count);
client.shutdown();
end
}
/// A benchmark that opens a 8000+ line TypeScript document, adds a function to /// A benchmark that opens a 8000+ line TypeScript document, adds a function to
/// the end of the document and does a level of hovering and gets quick fix /// the end of the document and does a level of hovering and gets quick fix
/// code actions. /// code actions.
@ -45,6 +150,11 @@ fn bench_big_file_edits(deno_exe: &Path) -> Duration {
.deno_exe(deno_exe) .deno_exe(deno_exe)
.build(); .build();
client.initialize_default(); client.initialize_default();
let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "deno/didRefreshDenoConfigurationTree");
client.change_configuration(json!({ "deno": { "enable": true } }));
let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "deno/didRefreshDenoConfigurationTree");
client.write_notification( client.write_notification(
"textDocument/didOpen", "textDocument/didOpen",
@ -58,16 +168,6 @@ fn bench_big_file_edits(deno_exe: &Path) -> Duration {
}), }),
); );
let (id, method, _): (u64, String, Option<Value>) = client.read_request();
assert_eq!(method, "workspace/configuration");
client.write_response(
id,
json!({
"enable": true
}),
);
let (method, _): (String, Option<Value>) = client.read_notification(); let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "textDocument/publishDiagnostics"); assert_eq!(method, "textDocument/publishDiagnostics");
let (method, _): (String, Option<Value>) = client.read_notification(); let (method, _): (String, Option<Value>) = client.read_notification();
@ -110,6 +210,18 @@ fn bench_code_lens(deno_exe: &Path) -> Duration {
.deno_exe(deno_exe) .deno_exe(deno_exe)
.build(); .build();
client.initialize_default(); client.initialize_default();
let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "deno/didRefreshDenoConfigurationTree");
client.change_configuration(json!({ "deno": {
"enable": true,
"codeLens": {
"implementations": true,
"references": true,
"test": true,
},
} }));
let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "deno/didRefreshDenoConfigurationTree");
client.write_notification( client.write_notification(
"textDocument/didOpen", "textDocument/didOpen",
@ -123,16 +235,6 @@ fn bench_code_lens(deno_exe: &Path) -> Duration {
}), }),
); );
let (id, method, _): (u64, String, Option<Value>) = client.read_request();
assert_eq!(method, "workspace/configuration");
client.write_response(
id,
json!({
"enable": true
}),
);
let (method, _): (String, Option<Value>) = client.read_notification(); let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "textDocument/publishDiagnostics"); assert_eq!(method, "textDocument/publishDiagnostics");
let (method, _): (String, Option<Value>) = client.read_notification(); let (method, _): (String, Option<Value>) = client.read_notification();
@ -163,6 +265,11 @@ fn bench_find_replace(deno_exe: &Path) -> Duration {
.deno_exe(deno_exe) .deno_exe(deno_exe)
.build(); .build();
client.initialize_default(); client.initialize_default();
let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "deno/didRefreshDenoConfigurationTree");
client.change_configuration(json!({ "deno": { "enable": true } }));
let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "deno/didRefreshDenoConfigurationTree");
for i in 0..10 { for i in 0..10 {
client.write_notification( client.write_notification(
@ -178,12 +285,6 @@ fn bench_find_replace(deno_exe: &Path) -> Duration {
); );
} }
for _ in 0..10 {
let (id, method, _) = client.read_request::<Value>();
assert_eq!(method, "workspace/configuration");
client.write_response(id, json!({ "enable": true }));
}
for _ in 0..3 { for _ in 0..3 {
let (method, _): (String, Option<Value>) = client.read_notification(); let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "textDocument/publishDiagnostics"); assert_eq!(method, "textDocument/publishDiagnostics");
@ -195,7 +296,7 @@ fn bench_find_replace(deno_exe: &Path) -> Duration {
"textDocument/didChange", "textDocument/didChange",
lsp::DidChangeTextDocumentParams { lsp::DidChangeTextDocumentParams {
text_document: lsp::VersionedTextDocumentIdentifier { text_document: lsp::VersionedTextDocumentIdentifier {
uri: Url::parse(&file_name).unwrap(), uri: Uri::from_str(&file_name).unwrap(),
version: 2, version: 2,
}, },
content_changes: vec![lsp::TextDocumentContentChangeEvent { content_changes: vec![lsp::TextDocumentContentChangeEvent {
@ -222,7 +323,7 @@ fn bench_find_replace(deno_exe: &Path) -> Duration {
"textDocument/formatting", "textDocument/formatting",
lsp::DocumentFormattingParams { lsp::DocumentFormattingParams {
text_document: lsp::TextDocumentIdentifier { text_document: lsp::TextDocumentIdentifier {
uri: Url::parse(&file_name).unwrap(), uri: Uri::from_str(&file_name).unwrap(),
}, },
options: lsp::FormattingOptions { options: lsp::FormattingOptions {
tab_size: 2, tab_size: 2,
@ -252,6 +353,11 @@ fn bench_startup_shutdown(deno_exe: &Path) -> Duration {
.deno_exe(deno_exe) .deno_exe(deno_exe)
.build(); .build();
client.initialize_default(); client.initialize_default();
let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "deno/didRefreshDenoConfigurationTree");
client.change_configuration(json!({ "deno": { "enable": true } }));
let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "deno/didRefreshDenoConfigurationTree");
client.write_notification( client.write_notification(
"textDocument/didOpen", "textDocument/didOpen",
@ -265,16 +371,6 @@ fn bench_startup_shutdown(deno_exe: &Path) -> Duration {
}), }),
); );
let (id, method, _) = client.read_request::<Value>();
assert_eq!(method, "workspace/configuration");
client.write_response(
id,
json!({
"enable": true
}),
);
let (method, _): (String, Option<Value>) = client.read_notification(); let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "textDocument/publishDiagnostics"); assert_eq!(method, "textDocument/publishDiagnostics");
let (method, _): (String, Option<Value>) = client.read_notification(); let (method, _): (String, Option<Value>) = client.read_notification();
@ -334,6 +430,16 @@ pub fn benchmarks(deno_exe: &Path) -> HashMap<String, i64> {
println!(" ({} runs, mean: {}ms)", times.len(), mean); println!(" ({} runs, mean: {}ms)", times.len(), mean);
exec_times.insert("code_lens".to_string(), mean); exec_times.insert("code_lens".to_string(), mean);
println!(" - deco-cx/apps Multiple Edits + Navigation");
let mut times = Vec::new();
for _ in 0..5 {
times.push(bench_deco_apps_edits(deno_exe));
}
let mean =
(times.iter().sum::<Duration>() / times.len() as u32).as_millis() as i64;
println!(" ({} runs, mean: {}ms)", times.len(), mean);
exec_times.insert("deco_apps_edits_nav".to_string(), mean);
println!("<- End benchmarking lsp"); println!("<- End benchmarking lsp");
exec_times exec_times

View file

@ -1,4 +1,4 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use deno_bench_util::bencher::benchmark_group; use deno_bench_util::bencher::benchmark_group;
use deno_bench_util::bencher::benchmark_main; use deno_bench_util::bencher::benchmark_main;
@ -13,6 +13,11 @@ use test_util::lsp::LspClientBuilder;
fn incremental_change_wait(bench: &mut Bencher) { fn incremental_change_wait(bench: &mut Bencher) {
let mut client = LspClientBuilder::new().use_diagnostic_sync(false).build(); let mut client = LspClientBuilder::new().use_diagnostic_sync(false).build();
client.initialize_default(); client.initialize_default();
let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "deno/didRefreshDenoConfigurationTree");
client.change_configuration(json!({ "deno": { "enable": true } }));
let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "deno/didRefreshDenoConfigurationTree");
client.write_notification( client.write_notification(
"textDocument/didOpen", "textDocument/didOpen",
@ -26,15 +31,6 @@ fn incremental_change_wait(bench: &mut Bencher) {
}), }),
); );
let (id, method, _): (u64, String, Option<Value>) = client.read_request();
assert_eq!(method, "workspace/configuration");
client.write_response(
id,
json!({
"enable": true
}),
);
let (method, _maybe_diag): (String, Option<Value>) = let (method, _maybe_diag): (String, Option<Value>) =
client.read_notification(); client.read_notification();
assert_eq!(method, "textDocument/publishDiagnostics"); assert_eq!(method, "textDocument/publishDiagnostics");

View file

@ -1,4 +1,7 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
#![allow(clippy::print_stdout)]
#![allow(clippy::print_stderr)]
use deno_core::error::AnyError; use deno_core::error::AnyError;
use deno_core::serde_json; use deno_core::serde_json;
@ -14,9 +17,6 @@ use std::process::Stdio;
use std::time::SystemTime; use std::time::SystemTime;
use test_util::PathRef; use test_util::PathRef;
include!("../util/time.rs");
mod http;
mod lsp; mod lsp;
fn read_json(filename: &Path) -> Result<Value> { fn read_json(filename: &Path) -> Result<Value> {
@ -37,7 +37,7 @@ const EXEC_TIME_BENCHMARKS: &[(&str, &[&str], Option<i32>)] = &[
// invalidating that cache. // invalidating that cache.
( (
"cold_hello", "cold_hello",
&["run", "--reload", "cli/tests/testdata/run/002_hello.ts"], &["run", "--reload", "tests/testdata/run/002_hello.ts"],
None, None,
), ),
( (
@ -45,23 +45,19 @@ const EXEC_TIME_BENCHMARKS: &[(&str, &[&str], Option<i32>)] = &[
&[ &[
"run", "run",
"--reload", "--reload",
"cli/tests/testdata/run/003_relative_import.ts", "tests/testdata/run/003_relative_import.ts",
], ],
None, None,
), ),
( ("hello", &["run", "tests/testdata/run/002_hello.ts"], None),
"hello",
&["run", "cli/tests/testdata/run/002_hello.ts"],
None,
),
( (
"relative_import", "relative_import",
&["run", "cli/tests/testdata/run/003_relative_import.ts"], &["run", "tests/testdata/run/003_relative_import.ts"],
None, None,
), ),
( (
"error_001", "error_001",
&["run", "cli/tests/testdata/run/error_001.ts"], &["run", "tests/testdata/run/error_001.ts"],
Some(1), Some(1),
), ),
( (
@ -70,7 +66,7 @@ const EXEC_TIME_BENCHMARKS: &[(&str, &[&str], Option<i32>)] = &[
"run", "run",
"--reload", "--reload",
"--no-check", "--no-check",
"cli/tests/testdata/run/002_hello.ts", "tests/testdata/run/002_hello.ts",
], ],
None, None,
), ),
@ -79,7 +75,7 @@ const EXEC_TIME_BENCHMARKS: &[(&str, &[&str], Option<i32>)] = &[
&[ &[
"run", "run",
"--allow-read", "--allow-read",
"cli/tests/testdata/workers/bench_startup.ts", "tests/testdata/workers/bench_startup.ts",
], ],
None, None,
), ),
@ -88,7 +84,7 @@ const EXEC_TIME_BENCHMARKS: &[(&str, &[&str], Option<i32>)] = &[
&[ &[
"run", "run",
"--allow-read", "--allow-read",
"cli/tests/testdata/workers/bench_round_robin.ts", "tests/testdata/workers/bench_round_robin.ts",
], ],
None, None,
), ),
@ -97,31 +93,28 @@ const EXEC_TIME_BENCHMARKS: &[(&str, &[&str], Option<i32>)] = &[
&[ &[
"run", "run",
"--allow-read", "--allow-read",
"cli/tests/testdata/workers/bench_large_message.ts", "tests/testdata/workers/bench_large_message.ts",
], ],
None, None,
), ),
( (
"text_decoder", "text_decoder",
&["run", "cli/tests/testdata/benches/text_decoder_perf.js"], &["run", "tests/testdata/benches/text_decoder_perf.js"],
None, None,
), ),
( (
"text_encoder", "text_encoder",
&["run", "cli/tests/testdata/benches/text_encoder_perf.js"], &["run", "tests/testdata/benches/text_encoder_perf.js"],
None, None,
), ),
( (
"text_encoder_into", "text_encoder_into",
&[ &["run", "tests/testdata/benches/text_encoder_into_perf.js"],
"run",
"cli/tests/testdata/benches/text_encoder_into_perf.js",
],
None, None,
), ),
( (
"response_string", "response_string",
&["run", "cli/tests/testdata/benches/response_string_perf.js"], &["run", "tests/testdata/benches/response_string_perf.js"],
None, None,
), ),
( (
@ -130,7 +123,9 @@ const EXEC_TIME_BENCHMARKS: &[(&str, &[&str], Option<i32>)] = &[
"check", "check",
"--reload", "--reload",
"--unstable", "--unstable",
"test_util/std/examples/chat/server_test.ts", "--config",
"tests/config/deno.json",
"tests/util/std/http/file_server_test.ts",
], ],
None, None,
), ),
@ -141,26 +136,9 @@ const EXEC_TIME_BENCHMARKS: &[(&str, &[&str], Option<i32>)] = &[
"--reload", "--reload",
"--no-check", "--no-check",
"--unstable", "--unstable",
"test_util/std/examples/chat/server_test.ts", "--config",
], "tests/config/deno.json",
None, "tests/util/std/http/file_server_test.ts",
),
(
"bundle",
&[
"bundle",
"--unstable",
"test_util/std/examples/chat/server_test.ts",
],
None,
),
(
"bundle_no_check",
&[
"bundle",
"--no-check",
"--unstable",
"test_util/std/examples/chat/server_test.ts",
], ],
None, None,
), ),
@ -312,38 +290,6 @@ fn get_binary_sizes(target_dir: &Path) -> Result<HashMap<String, i64>> {
Ok(sizes) Ok(sizes)
} }
const BUNDLES: &[(&str, &str)] = &[
("file_server", "./test_util/std/http/file_server.ts"),
("gist", "./test_util/std/examples/gist.ts"),
];
fn bundle_benchmark(deno_exe: &Path) -> Result<HashMap<String, i64>> {
let mut sizes = HashMap::<String, i64>::new();
for (name, url) in BUNDLES {
let path = format!("{name}.bundle.js");
test_util::run(
&[
deno_exe.to_str().unwrap(),
"bundle",
"--unstable",
url,
&path,
],
None,
None,
None,
true,
);
let file = PathBuf::from(path);
assert!(file.is_file());
sizes.insert(name.to_string(), file.metadata()?.len() as i64);
let _ = fs::remove_file(file);
}
Ok(sizes)
}
fn run_max_mem_benchmark(deno_exe: &Path) -> Result<HashMap<String, i64>> { fn run_max_mem_benchmark(deno_exe: &Path) -> Result<HashMap<String, i64>> {
let mut results = HashMap::<String, i64>::new(); let mut results = HashMap::<String, i64>::new();
@ -398,9 +344,11 @@ struct BenchResult {
binary_size: HashMap<String, i64>, binary_size: HashMap<String, i64>,
bundle_size: HashMap<String, i64>, bundle_size: HashMap<String, i64>,
cargo_deps: usize, cargo_deps: usize,
// TODO(bartlomieju): remove
max_latency: HashMap<String, f64>, max_latency: HashMap<String, f64>,
max_memory: HashMap<String, i64>, max_memory: HashMap<String, i64>,
lsp_exec_time: HashMap<String, i64>, lsp_exec_time: HashMap<String, i64>,
// TODO(bartlomieju): remove
req_per_sec: HashMap<String, i64>, req_per_sec: HashMap<String, i64>,
syscall_count: HashMap<String, i64>, syscall_count: HashMap<String, i64>,
thread_count: HashMap<String, i64>, thread_count: HashMap<String, i64>,
@ -411,12 +359,10 @@ async fn main() -> Result<()> {
let mut args = env::args(); let mut args = env::args();
let mut benchmarks = vec![ let mut benchmarks = vec![
"bundle",
"exec_time", "exec_time",
"binary_size", "binary_size",
"cargo_deps", "cargo_deps",
"lsp", "lsp",
"http",
"strace", "strace",
"mem_usage", "mem_usage",
]; ];
@ -438,11 +384,16 @@ async fn main() -> Result<()> {
println!("Starting Deno benchmark"); println!("Starting Deno benchmark");
let target_dir = test_util::target_dir(); let target_dir = test_util::target_dir();
let deno_exe = test_util::deno_exe_path().to_path_buf(); let deno_exe = if let Ok(p) = std::env::var("DENO_BENCH_EXE") {
PathBuf::from(p)
} else {
test_util::deno_exe_path().to_path_buf()
};
env::set_current_dir(test_util::root_path())?; env::set_current_dir(test_util::root_path())?;
let mut new_data = BenchResult { let mut new_data = BenchResult {
created_at: utc_now().to_rfc3339_opts(chrono::SecondsFormat::Secs, true), created_at: chrono::Utc::now()
.to_rfc3339_opts(chrono::SecondsFormat::Secs, true),
sha1: test_util::run_collect( sha1: test_util::run_collect(
&["git", "rev-parse", "HEAD"], &["git", "rev-parse", "HEAD"],
None, None,
@ -456,11 +407,6 @@ async fn main() -> Result<()> {
..Default::default() ..Default::default()
}; };
if benchmarks.contains(&"bundle") {
let bundle_size = bundle_benchmark(&deno_exe)?;
new_data.bundle_size = bundle_size;
}
if benchmarks.contains(&"exec_time") { if benchmarks.contains(&"exec_time") {
let exec_times = run_exec_time(&deno_exe, &target_dir)?; let exec_times = run_exec_time(&deno_exe, &target_dir)?;
new_data.benchmark = exec_times; new_data.benchmark = exec_times;
@ -481,21 +427,6 @@ async fn main() -> Result<()> {
new_data.lsp_exec_time = lsp_exec_times; new_data.lsp_exec_time = lsp_exec_times;
} }
if benchmarks.contains(&"http") && cfg!(not(target_os = "windows")) {
let stats = http::benchmark(target_dir.as_path())?;
let req_per_sec = stats
.iter()
.map(|(name, result)| (name.clone(), result.requests as i64))
.collect();
new_data.req_per_sec = req_per_sec;
let max_latency = stats
.iter()
.map(|(name, result)| (name.clone(), result.latency))
.collect();
new_data.max_latency = max_latency;
}
if cfg!(target_os = "linux") && benchmarks.contains(&"strace") { if cfg!(target_os = "linux") && benchmarks.contains(&"strace") {
use std::io::Read; use std::io::Read;

View file

@ -1,6 +1,6 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
import { loadTestLibrary } from "../../../test_napi/common.js"; import { loadTestLibrary } from "../../../tests/napi/common.js";
const lib = loadTestLibrary(); const lib = loadTestLibrary();

View file

@ -1,8 +1,10 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
import { bench, run } from "mitata"; import { bench, run } from "mitata";
import { createRequire } from "module"; import { createRequire } from "module";
const require = createRequire(import.meta.url); const require = createRequire(import.meta.url);
const lib = require("../../../test_napi.node"); const lib = require("../../../tests/napi.node");
bench("warmup", () => {}); bench("warmup", () => {});
bench("napi_get_undefined", () => lib.test_get_undefined(0)); bench("napi_get_undefined", () => lib.test_get_undefined(0));

View file

@ -1,4 +1,7 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
// deno-lint-ignore-file no-console
const queueMicrotask = globalThis.queueMicrotask || process.nextTick; const queueMicrotask = globalThis.queueMicrotask || process.nextTick;
let [total, count] = typeof Deno !== "undefined" let [total, count] = typeof Deno !== "undefined"
? Deno.args ? Deno.args

View file

@ -1,4 +1,7 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
// deno-lint-ignore-file no-console
let [total, count] = typeof Deno !== "undefined" let [total, count] = typeof Deno !== "undefined"
? Deno.args ? Deno.args
: [process.argv[2], process.argv[3]]; : [process.argv[2], process.argv[3]];

View file

@ -1,5 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
// From https://github.com/just-js/benchmarks/tree/main/01-stdio // From https://github.com/just-js/benchmarks/tree/main/01-stdio
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
#include <stdlib.h> #include <stdlib.h>
#include <stdio.h> #include <stdio.h>
@ -26,4 +26,4 @@ int main(int argc, char *argv[]) {
exit(1); exit(1);
} }
fprintf(stdout, "size %lu reads %u blocksize %u\n", size, reads, blocksize); fprintf(stdout, "size %lu reads %u blocksize %u\n", size, reads, blocksize);
} }

View file

@ -1,7 +1,9 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
// //
// From https://github.com/just-js/benchmarks/tree/main/01-stdio // From https://github.com/just-js/benchmarks/tree/main/01-stdio
// deno-lint-ignore-file no-console
const blocksize = parseInt(Deno.args[0] || 65536); const blocksize = parseInt(Deno.args[0] || 65536);
const buf = new Uint8Array(blocksize); const buf = new Uint8Array(blocksize);
let size = 0; let size = 0;

View file

@ -1,4 +1,4 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
const listener = Deno.listen({ port: 4500 }); const listener = Deno.listen({ port: 4500 });
const response = new TextEncoder().encode( const response = new TextEncoder().encode(

2905
cli/bench/testdata/deco_apps_requests.json vendored Normal file

File diff suppressed because it is too large Load diff

1
cli/bench/testdata/lsp_benchdata vendored Submodule

@ -0,0 +1 @@
Subproject commit af4c6a1eee825f19d3b3cce74cfdd03ebe1a3b92

View file

@ -1,4 +1,7 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
// deno-lint-ignore-file no-console
const queueMicrotask = globalThis.queueMicrotask || process.nextTick; const queueMicrotask = globalThis.queueMicrotask || process.nextTick;
let [total, count] = typeof Deno !== "undefined" let [total, count] = typeof Deno !== "undefined"
? Deno.args ? Deno.args

View file

@ -1,4 +1,7 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
// deno-lint-ignore-file no-console
const queueMicrotask = globalThis.queueMicrotask || process.nextTick; const queueMicrotask = globalThis.queueMicrotask || process.nextTick;
let [total, count] = typeof Deno !== "undefined" let [total, count] = typeof Deno !== "undefined"
? Deno.args ? Deno.args

View file

@ -1,4 +1,6 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
// deno-lint-ignore-file no-console
// Note: when benchmarking across different Deno version, make sure to clear // Note: when benchmarking across different Deno version, make sure to clear
// the DENO_DIR cache. // the DENO_DIR cache.

View file

@ -1,4 +1,7 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
// deno-lint-ignore-file no-console
const queueMicrotask = globalThis.queueMicrotask || process.nextTick; const queueMicrotask = globalThis.queueMicrotask || process.nextTick;
let [total, count] = typeof Deno !== "undefined" let [total, count] = typeof Deno !== "undefined"
? Deno.args ? Deno.args

View file

@ -1,79 +1,92 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use std::env; use std::env;
use std::path::PathBuf; use std::path::PathBuf;
use deno_core::snapshot_util::*; use deno_core::snapshot::*;
use deno_core::ExtensionFileSource;
use deno_core::ExtensionFileSourceCode;
use deno_runtime::*; use deno_runtime::*;
mod shared;
mod ts { mod ts {
use super::*; use super::*;
use deno_core::error::custom_error; use deno_core::error::custom_error;
use deno_core::error::AnyError; use deno_core::error::AnyError;
use deno_core::op; use deno_core::op2;
use deno_core::OpState; use deno_core::OpState;
use deno_runtime::deno_node::SUPPORTED_BUILTIN_NODE_MODULES; use serde::Serialize;
use serde::Deserialize;
use serde_json::json;
use serde_json::Value;
use std::collections::HashMap; use std::collections::HashMap;
use std::io::Write;
use std::path::Path; use std::path::Path;
use std::path::PathBuf; use std::path::PathBuf;
#[derive(Debug, Deserialize)] #[derive(Debug, Serialize)]
struct LoadArgs { #[serde(rename_all = "camelCase")]
/// The fully qualified specifier that should be loaded. struct BuildInfoResponse {
specifier: String, build_specifier: String,
libs: Vec<String>,
} }
#[op] #[op2]
fn op_build_info(state: &mut OpState) -> Value { #[serde]
let build_specifier = "asset:///bootstrap.ts"; fn op_build_info(state: &mut OpState) -> BuildInfoResponse {
let build_specifier = "asset:///bootstrap.ts".to_string();
let node_built_in_module_names = SUPPORTED_BUILTIN_NODE_MODULES.to_vec(); let build_libs = state
let build_libs = state.borrow::<Vec<&str>>(); .borrow::<Vec<&str>>()
json!({ .iter()
"buildSpecifier": build_specifier, .map(|s| s.to_string())
"libs": build_libs, .collect();
"nodeBuiltInModuleNames": node_built_in_module_names, BuildInfoResponse {
}) build_specifier,
libs: build_libs,
}
} }
#[op] #[op2(fast)]
fn op_is_node_file() -> bool { fn op_is_node_file() -> bool {
false false
} }
#[op] #[op2]
#[string]
fn op_script_version( fn op_script_version(
_state: &mut OpState, _state: &mut OpState,
_args: Value, #[string] _arg: &str,
) -> Result<Option<String>, AnyError> { ) -> Result<Option<String>, AnyError> {
Ok(Some("1".to_string())) Ok(Some("1".to_string()))
} }
#[op] #[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct LoadResponse {
data: String,
version: String,
script_kind: i32,
}
#[op2]
#[serde]
// using the same op that is used in `tsc.rs` for loading modules and reading // using the same op that is used in `tsc.rs` for loading modules and reading
// files, but a slightly different implementation at build time. // files, but a slightly different implementation at build time.
fn op_load(state: &mut OpState, args: LoadArgs) -> Result<Value, AnyError> { fn op_load(
state: &mut OpState,
#[string] load_specifier: &str,
) -> Result<LoadResponse, AnyError> {
let op_crate_libs = state.borrow::<HashMap<&str, PathBuf>>(); let op_crate_libs = state.borrow::<HashMap<&str, PathBuf>>();
let path_dts = state.borrow::<PathBuf>(); let path_dts = state.borrow::<PathBuf>();
let re_asset = lazy_regex::regex!(r"asset:/{3}lib\.(\S+)\.d\.ts"); let re_asset = lazy_regex::regex!(r"asset:/{3}lib\.(\S+)\.d\.ts");
let build_specifier = "asset:///bootstrap.ts"; let build_specifier = "asset:///bootstrap.ts";
// we need a basic file to send to tsc to warm it up. // we need a basic file to send to tsc to warm it up.
if args.specifier == build_specifier { if load_specifier == build_specifier {
Ok(json!({ Ok(LoadResponse {
"data": r#"Deno.writeTextFile("hello.txt", "hello deno!");"#, data: r#"Deno.writeTextFile("hello.txt", "hello deno!");"#.to_string(),
"version": "1", version: "1".to_string(),
// this corresponds to `ts.ScriptKind.TypeScript` // this corresponds to `ts.ScriptKind.TypeScript`
"scriptKind": 3 script_kind: 3,
})) })
// specifiers come across as `asset:///lib.{lib_name}.d.ts` and we need to // specifiers come across as `asset:///lib.{lib_name}.d.ts` and we need to
// parse out just the name so we can lookup the asset. // parse out just the name so we can lookup the asset.
} else if let Some(caps) = re_asset.captures(&args.specifier) { } else if let Some(caps) = re_asset.captures(load_specifier) {
if let Some(lib) = caps.get(1).map(|m| m.as_str()) { if let Some(lib) = caps.get(1).map(|m| m.as_str()) {
// if it comes from an op crate, we were supplied with the path to the // if it comes from an op crate, we were supplied with the path to the
// file. // file.
@ -84,22 +97,22 @@ mod ts {
path_dts.join(format!("lib.{lib}.d.ts")) path_dts.join(format!("lib.{lib}.d.ts"))
}; };
let data = std::fs::read_to_string(path)?; let data = std::fs::read_to_string(path)?;
Ok(json!({ Ok(LoadResponse {
"data": data, data,
"version": "1", version: "1".to_string(),
// this corresponds to `ts.ScriptKind.TypeScript` // this corresponds to `ts.ScriptKind.TypeScript`
"scriptKind": 3 script_kind: 3,
})) })
} else { } else {
Err(custom_error( Err(custom_error(
"InvalidSpecifier", "InvalidSpecifier",
format!("An invalid specifier was requested: {}", args.specifier), format!("An invalid specifier was requested: {}", load_specifier),
)) ))
} }
} else { } else {
Err(custom_error( Err(custom_error(
"InvalidSpecifier", "InvalidSpecifier",
format!("An invalid specifier was requested: {}", args.specifier), format!("An invalid specifier was requested: {}", load_specifier),
)) ))
} }
} }
@ -131,8 +144,10 @@ mod ts {
op_crate_libs.insert("deno.url", deno_url::get_declaration()); op_crate_libs.insert("deno.url", deno_url::get_declaration());
op_crate_libs.insert("deno.web", deno_web::get_declaration()); op_crate_libs.insert("deno.web", deno_web::get_declaration());
op_crate_libs.insert("deno.fetch", deno_fetch::get_declaration()); op_crate_libs.insert("deno.fetch", deno_fetch::get_declaration());
op_crate_libs.insert("deno.webgpu", deno_webgpu_get_declaration());
op_crate_libs.insert("deno.websocket", deno_websocket::get_declaration()); op_crate_libs.insert("deno.websocket", deno_websocket::get_declaration());
op_crate_libs.insert("deno.webstorage", deno_webstorage::get_declaration()); op_crate_libs.insert("deno.webstorage", deno_webstorage::get_declaration());
op_crate_libs.insert("deno.canvas", deno_canvas::get_declaration());
op_crate_libs.insert("deno.crypto", deno_crypto::get_declaration()); op_crate_libs.insert("deno.crypto", deno_crypto::get_declaration());
op_crate_libs.insert( op_crate_libs.insert(
"deno.broadcast_channel", "deno.broadcast_channel",
@ -168,8 +183,10 @@ mod ts {
"es2015.symbol", "es2015.symbol",
"es2015.symbol.wellknown", "es2015.symbol.wellknown",
"es2016.array.include", "es2016.array.include",
"es2016.intl",
"es2016", "es2016",
"es2017", "es2017",
"es2017.date",
"es2017.intl", "es2017.intl",
"es2017.object", "es2017.object",
"es2017.sharedmemory", "es2017.sharedmemory",
@ -211,9 +228,19 @@ mod ts {
"es2022.string", "es2022.string",
"es2023", "es2023",
"es2023.array", "es2023.array",
"es2023.collection",
"es2023.intl",
"esnext", "esnext",
"esnext.array", "esnext.array",
"esnext.collection",
"esnext.decorators",
"esnext.disposable",
"esnext.intl", "esnext.intl",
"esnext.iterator",
"esnext.object",
"esnext.promise",
"esnext.regexp",
"esnext.string",
]; ];
let path_dts = cwd.join("tsc/dts"); let path_dts = cwd.join("tsc/dts");
@ -241,33 +268,40 @@ mod ts {
) )
.unwrap(); .unwrap();
let output = create_snapshot(CreateSnapshotOptions { let output = create_snapshot(
cargo_manifest_dir: env!("CARGO_MANIFEST_DIR"), CreateSnapshotOptions {
snapshot_path, cargo_manifest_dir: env!("CARGO_MANIFEST_DIR"),
startup_snapshot: None, startup_snapshot: None,
extensions: vec![deno_tsc::init_ops_and_esm( extensions: vec![deno_tsc::init_ops_and_esm(
op_crate_libs, op_crate_libs,
build_libs, build_libs,
path_dts, path_dts,
)], )],
extension_transpiler: None,
with_runtime_cb: None,
skip_op_registration: false,
},
None,
)
.unwrap();
// NOTE(bartlomieju): Compressing the TSC snapshot in debug build took // NOTE(bartlomieju): Compressing the TSC snapshot in debug build took
// ~45s on M1 MacBook Pro; without compression it took ~1s. // ~45s on M1 MacBook Pro; without compression it took ~1s.
// Thus we're not not using compressed snapshot, trading off // Thus we're not using compressed snapshot, trading off
// a lot of build time for some startup time in debug build. // a lot of build time for some startup time in debug build.
#[cfg(debug_assertions)] let mut file = std::fs::File::create(snapshot_path).unwrap();
compression_cb: None, if cfg!(debug_assertions) {
file.write_all(&output.output).unwrap();
} else {
let mut vec = Vec::with_capacity(output.output.len());
vec.extend((output.output.len() as u32).to_le_bytes());
vec.extend_from_slice(
&zstd::bulk::compress(&output.output, 22)
.expect("snapshot compression failed"),
);
file.write_all(&vec).unwrap();
}
#[cfg(not(debug_assertions))]
compression_cb: Some(Box::new(|vec, snapshot_slice| {
eprintln!("Compressing TSC snapshot...");
vec.extend_from_slice(
&zstd::bulk::compress(snapshot_slice, 22)
.expect("snapshot compression failed"),
);
})),
with_runtime_cb: None,
});
for path in output.files_loaded_during_snapshot { for path in output.files_loaded_during_snapshot {
println!("cargo:rerun-if-changed={}", path.display()); println!("cargo:rerun-if-changed={}", path.display());
} }
@ -275,7 +309,7 @@ mod ts {
pub(crate) fn version() -> String { pub(crate) fn version() -> String {
let file_text = std::fs::read_to_string("tsc/00_typescript.js").unwrap(); let file_text = std::fs::read_to_string("tsc/00_typescript.js").unwrap();
let version_text = " version = \""; let version_text = " version = \"";
for line in file_text.lines() { for line in file_text.lines() {
if let Some(index) = line.find(version_text) { if let Some(index) = line.find(version_text) {
let remaining_line = &line[index + version_text.len()..]; let remaining_line = &line[index + version_text.len()..];
@ -286,87 +320,21 @@ mod ts {
} }
} }
// Duplicated in `ops/mod.rs`. Keep in sync! #[cfg(not(feature = "hmr"))]
deno_core::extension!( fn create_cli_snapshot(snapshot_path: PathBuf) {
cli, use deno_runtime::ops::bootstrap::SnapshotOptions;
deps = [runtime],
esm_entry_point = "ext:cli/99_main.js",
esm = [
dir "js",
"40_testing.js",
"99_main.js"
],
customizer = |ext: &mut deno_core::Extension| {
ext.esm_files.to_mut().push(ExtensionFileSource {
specifier: "ext:cli/runtime/js/99_main.js",
code: ExtensionFileSourceCode::LoadedFromFsDuringSnapshot(
deno_runtime::js::PATH_FOR_99_MAIN_JS,
),
});
}
);
#[cfg(not(feature = "__runtime_js_sources"))] let snapshot_options = SnapshotOptions {
#[must_use = "The files listed by create_cli_snapshot should be printed as 'cargo:rerun-if-changed' lines"] ts_version: ts::version(),
fn create_cli_snapshot(snapshot_path: PathBuf) -> CreateSnapshotOutput { v8_version: deno_core::v8::VERSION_STRING,
use deno_core::Extension; target: std::env::var("TARGET").unwrap(),
use deno_runtime::deno_cache::SqliteBackedCache; };
use deno_runtime::deno_http::DefaultHttpPropertyExtractor;
use deno_runtime::deno_kv::sqlite::SqliteDbHandler;
use deno_runtime::permissions::PermissionsContainer;
use std::sync::Arc;
// NOTE(bartlomieju): ordering is important here, keep it in sync with deno_runtime::snapshot::create_runtime_snapshot(
// `runtime/worker.rs`, `runtime/web_worker.rs` and `runtime/build.rs`!
let fs = Arc::new(deno_fs::RealFs);
let extensions: Vec<Extension> = vec![
deno_webidl::deno_webidl::init_ops(),
deno_console::deno_console::init_ops(),
deno_url::deno_url::init_ops(),
deno_web::deno_web::init_ops::<PermissionsContainer>(
Default::default(),
Default::default(),
),
deno_fetch::deno_fetch::init_ops::<PermissionsContainer>(Default::default()),
deno_cache::deno_cache::init_ops::<SqliteBackedCache>(None),
deno_websocket::deno_websocket::init_ops::<PermissionsContainer>(
"".to_owned(),
None,
None,
),
deno_webstorage::deno_webstorage::init_ops(None),
deno_crypto::deno_crypto::init_ops(None),
deno_broadcast_channel::deno_broadcast_channel::init_ops(
deno_broadcast_channel::InMemoryBroadcastChannel::default(),
false, // No --unstable.
),
deno_ffi::deno_ffi::init_ops::<PermissionsContainer>(false),
deno_net::deno_net::init_ops::<PermissionsContainer>(
None, false, // No --unstable.
None,
),
deno_tls::deno_tls::init_ops(),
deno_kv::deno_kv::init_ops(
SqliteDbHandler::<PermissionsContainer>::new(None),
false, // No --unstable.
),
deno_napi::deno_napi::init_ops::<PermissionsContainer>(),
deno_http::deno_http::init_ops::<DefaultHttpPropertyExtractor>(),
deno_io::deno_io::init_ops(Default::default()),
deno_fs::deno_fs::init_ops::<PermissionsContainer>(false, fs.clone()),
deno_node::deno_node::init_ops::<PermissionsContainer>(None, fs),
deno_runtime::runtime::init_ops(),
cli::init_ops_and_esm(), // NOTE: This needs to be init_ops_and_esm!
];
create_snapshot(CreateSnapshotOptions {
cargo_manifest_dir: env!("CARGO_MANIFEST_DIR"),
snapshot_path, snapshot_path,
startup_snapshot: deno_runtime::js::deno_isolate_init(), snapshot_options,
extensions, vec![],
compression_cb: None, );
with_runtime_cb: None,
})
} }
fn git_commit_hash() -> String { fn git_commit_hash() -> String {
@ -397,52 +365,18 @@ fn main() {
return; return;
} }
deno_napi::print_linker_flags("deno");
deno_napi::print_linker_flags("denort");
// Host snapshots won't work when cross compiling. // Host snapshots won't work when cross compiling.
let target = env::var("TARGET").unwrap(); let target = env::var("TARGET").unwrap();
let host = env::var("HOST").unwrap(); let host = env::var("HOST").unwrap();
if target != host { let skip_cross_check =
env::var("DENO_SKIP_CROSS_BUILD_CHECK").map_or(false, |v| v == "1");
if !skip_cross_check && target != host {
panic!("Cross compiling with snapshot is not supported."); panic!("Cross compiling with snapshot is not supported.");
} }
let symbols_path = std::path::Path::new("napi").join(
format!("generated_symbol_exports_list_{}.def", env::consts::OS).as_str(),
)
.canonicalize()
.expect(
"Missing symbols list! Generate using tools/napi/generate_symbols_lists.js",
);
#[cfg(target_os = "windows")]
println!(
"cargo:rustc-link-arg-bin=deno=/DEF:{}",
symbols_path.display()
);
#[cfg(target_os = "macos")]
println!(
"cargo:rustc-link-arg-bin=deno=-Wl,-exported_symbols_list,{}",
symbols_path.display()
);
#[cfg(target_os = "linux")]
{
// If a custom compiler is set, the glibc version is not reliable.
// Here, we assume that if a custom compiler is used, that it will be modern enough to support a dynamic symbol list.
if env::var("CC").is_err()
&& glibc_version::get_version()
.map(|ver| ver.major <= 2 && ver.minor < 35)
.unwrap_or(false)
{
println!("cargo:warning=Compiling with all symbols exported, this will result in a larger binary. Please use glibc 2.35 or later for an optimised build.");
println!("cargo:rustc-link-arg-bin=deno=-rdynamic");
} else {
println!(
"cargo:rustc-link-arg-bin=deno=-Wl,--export-dynamic-symbol-list={}",
symbols_path.display()
);
}
}
// To debug snapshot issues uncomment: // To debug snapshot issues uncomment:
// op_fetch_asset::trace_serializer(); // op_fetch_asset::trace_serializer();
@ -459,7 +393,7 @@ fn main() {
); );
let ts_version = ts::version(); let ts_version = ts::version();
debug_assert_eq!(ts_version, "5.1.6"); // bump this assertion when it changes debug_assert_eq!(ts_version, "5.6.2"); // bump this assertion when it changes
println!("cargo:rustc-env=TS_VERSION={}", ts_version); println!("cargo:rustc-env=TS_VERSION={}", ts_version);
println!("cargo:rerun-if-env-changed=TS_VERSION"); println!("cargo:rerun-if-env-changed=TS_VERSION");
@ -472,13 +406,10 @@ fn main() {
let compiler_snapshot_path = o.join("COMPILER_SNAPSHOT.bin"); let compiler_snapshot_path = o.join("COMPILER_SNAPSHOT.bin");
ts::create_compiler_snapshot(compiler_snapshot_path, &c); ts::create_compiler_snapshot(compiler_snapshot_path, &c);
#[cfg(not(feature = "__runtime_js_sources"))] #[cfg(not(feature = "hmr"))]
{ {
let cli_snapshot_path = o.join("CLI_SNAPSHOT.bin"); let cli_snapshot_path = o.join("CLI_SNAPSHOT.bin");
let output = create_cli_snapshot(cli_snapshot_path); create_cli_snapshot(cli_snapshot_path);
for path in output.files_loaded_during_snapshot {
println!("cargo:rerun-if-changed={}", path.display())
}
} }
#[cfg(target_os = "windows")] #[cfg(target_os = "windows")]
@ -492,3 +423,11 @@ fn main() {
res.compile().unwrap(); res.compile().unwrap();
} }
} }
fn deno_webgpu_get_declaration() -> PathBuf {
let manifest_dir = std::path::Path::new(env!("CARGO_MANIFEST_DIR"));
manifest_dir
.join("tsc")
.join("dts")
.join("lib.deno_webgpu.d.ts")
}

322
cli/cache/cache_db.rs vendored
View file

@ -1,4 +1,4 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use deno_core::error::AnyError; use deno_core::error::AnyError;
use deno_core::parking_lot::Mutex; use deno_core::parking_lot::Mutex;
@ -10,9 +10,52 @@ use deno_runtime::deno_webstorage::rusqlite::OptionalExtension;
use deno_runtime::deno_webstorage::rusqlite::Params; use deno_runtime::deno_webstorage::rusqlite::Params;
use once_cell::sync::OnceCell; use once_cell::sync::OnceCell;
use std::io::IsTerminal; use std::io::IsTerminal;
use std::path::Path;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use super::FastInsecureHasher;
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CacheDBHash(u64);
impl CacheDBHash {
pub fn new(hash: u64) -> Self {
Self(hash)
}
pub fn from_source(source: impl std::hash::Hash) -> Self {
Self::new(
// always write in the deno version just in case
// the clearing on deno version change doesn't work
FastInsecureHasher::new_deno_versioned()
.write_hashable(source)
.finish(),
)
}
}
impl rusqlite::types::ToSql for CacheDBHash {
fn to_sql(&self) -> rusqlite::Result<rusqlite::types::ToSqlOutput<'_>> {
Ok(rusqlite::types::ToSqlOutput::Owned(
// sqlite doesn't support u64, but it does support i64 so store
// this value "incorrectly" as i64 then convert back to u64 on read
rusqlite::types::Value::Integer(self.0 as i64),
))
}
}
impl rusqlite::types::FromSql for CacheDBHash {
fn column_result(
value: rusqlite::types::ValueRef,
) -> rusqlite::types::FromSqlResult<Self> {
match value {
rusqlite::types::ValueRef::Integer(i) => Ok(Self::new(i as u64)),
_ => Err(rusqlite::types::FromSqlError::InvalidType),
}
}
}
/// What should the cache should do on failure? /// What should the cache should do on failure?
#[derive(Default)] #[derive(Default)]
pub enum CacheFailure { pub enum CacheFailure {
@ -40,21 +83,16 @@ pub struct CacheDBConfiguration {
impl CacheDBConfiguration { impl CacheDBConfiguration {
fn create_combined_sql(&self) -> String { fn create_combined_sql(&self) -> String {
format!( format!(
" concat!(
PRAGMA journal_mode=TRUNCATE; "PRAGMA journal_mode=WAL;",
PRAGMA synchronous=NORMAL; "PRAGMA synchronous=NORMAL;",
PRAGMA temp_store=memory; "PRAGMA temp_store=memory;",
PRAGMA page_size=4096; "PRAGMA page_size=4096;",
PRAGMA mmap_size=6000000; "PRAGMA mmap_size=6000000;",
PRAGMA optimize; "PRAGMA optimize;",
"CREATE TABLE IF NOT EXISTS info (key TEXT PRIMARY KEY, value TEXT NOT NULL);",
CREATE TABLE IF NOT EXISTS info ( "{}",
key TEXT PRIMARY KEY, ),
value TEXT NOT NULL
);
{}
",
self.table_initializer self.table_initializer
) )
} }
@ -178,7 +216,7 @@ impl CacheDB {
/// Open the connection in memory or on disk. /// Open the connection in memory or on disk.
fn actually_open_connection( fn actually_open_connection(
&self, &self,
path: &Option<PathBuf>, path: Option<&Path>,
) -> Result<Connection, rusqlite::Error> { ) -> Result<Connection, rusqlite::Error> {
match path { match path {
// This should never fail unless something is very wrong // This should never fail unless something is very wrong
@ -224,7 +262,7 @@ impl CacheDB {
/// Open and initialize a connection. /// Open and initialize a connection.
fn open_connection_and_init( fn open_connection_and_init(
&self, &self,
path: &Option<PathBuf>, path: Option<&Path>,
) -> Result<Connection, AnyError> { ) -> Result<Connection, AnyError> {
let conn = self.actually_open_connection(path)?; let conn = self.actually_open_connection(path)?;
Self::initialize_connection(self.config, &conn, self.version)?; Self::initialize_connection(self.config, &conn, self.version)?;
@ -234,83 +272,9 @@ impl CacheDB {
/// This function represents the policy for dealing with corrupted cache files. We try fairly aggressively /// This function represents the policy for dealing with corrupted cache files. We try fairly aggressively
/// to repair the situation, and if we can't, we prefer to log noisily and continue with in-memory caches. /// to repair the situation, and if we can't, we prefer to log noisily and continue with in-memory caches.
fn open_connection(&self) -> Result<ConnectionState, AnyError> { fn open_connection(&self) -> Result<ConnectionState, AnyError> {
// Success on first try? We hope that this is the case. open_connection(self.config, self.path.as_deref(), |maybe_path| {
let err = match self.open_connection_and_init(&self.path) { self.open_connection_and_init(maybe_path)
Ok(conn) => return Ok(ConnectionState::Connected(conn)), })
Err(err) => err,
};
if self.path.is_none() {
// If an in-memory DB fails, that's game over
log::error!("Failed to initialize in-memory cache database.");
return Err(err);
}
let path = self.path.as_ref().unwrap();
// There are rare times in the tests when we can't initialize a cache DB the first time, but it succeeds the second time, so
// we don't log these at a debug level.
log::trace!(
"Could not initialize cache database '{}', retrying... ({err:?})",
path.to_string_lossy(),
);
// Try a second time
let err = match self.open_connection_and_init(&self.path) {
Ok(conn) => return Ok(ConnectionState::Connected(conn)),
Err(err) => err,
};
// Failed, try deleting it
let is_tty = std::io::stderr().is_terminal();
log::log!(
if is_tty { log::Level::Warn } else { log::Level::Trace },
"Could not initialize cache database '{}', deleting and retrying... ({err:?})",
path.to_string_lossy()
);
if std::fs::remove_file(path).is_ok() {
// Try a third time if we successfully deleted it
let res = self.open_connection_and_init(&self.path);
if let Ok(conn) = res {
return Ok(ConnectionState::Connected(conn));
};
}
match self.config.on_failure {
CacheFailure::InMemory => {
log::log!(
if is_tty {
log::Level::Error
} else {
log::Level::Trace
},
"Failed to open cache file '{}', opening in-memory cache.",
path.to_string_lossy()
);
Ok(ConnectionState::Connected(
self.open_connection_and_init(&None)?,
))
}
CacheFailure::Blackhole => {
log::log!(
if is_tty {
log::Level::Error
} else {
log::Level::Trace
},
"Failed to open cache file '{}', performance may be degraded.",
path.to_string_lossy()
);
Ok(ConnectionState::Blackhole)
}
CacheFailure::Error => {
log::error!(
"Failed to open cache file '{}', expect further errors.",
path.to_string_lossy()
);
Err(err)
}
}
} }
fn initialize<'a>( fn initialize<'a>(
@ -397,8 +361,105 @@ impl CacheDB {
} }
} }
/// This function represents the policy for dealing with corrupted cache files. We try fairly aggressively
/// to repair the situation, and if we can't, we prefer to log noisily and continue with in-memory caches.
fn open_connection(
config: &CacheDBConfiguration,
path: Option<&Path>,
open_connection_and_init: impl Fn(Option<&Path>) -> Result<Connection, AnyError>,
) -> Result<ConnectionState, AnyError> {
// Success on first try? We hope that this is the case.
let err = match open_connection_and_init(path) {
Ok(conn) => return Ok(ConnectionState::Connected(conn)),
Err(err) => err,
};
let Some(path) = path.as_ref() else {
// If an in-memory DB fails, that's game over
log::error!("Failed to initialize in-memory cache database.");
return Err(err);
};
// ensure the parent directory exists
if let Some(parent) = path.parent() {
match std::fs::create_dir_all(parent) {
Ok(_) => {
log::debug!("Created parent directory for cache db.");
}
Err(err) => {
log::debug!("Failed creating the cache db parent dir: {:#}", err);
}
}
}
// There are rare times in the tests when we can't initialize a cache DB the first time, but it succeeds the second time, so
// we don't log these at a debug level.
log::trace!(
"Could not initialize cache database '{}', retrying... ({err:?})",
path.to_string_lossy(),
);
// Try a second time
let err = match open_connection_and_init(Some(path)) {
Ok(conn) => return Ok(ConnectionState::Connected(conn)),
Err(err) => err,
};
// Failed, try deleting it
let is_tty = std::io::stderr().is_terminal();
log::log!(
if is_tty { log::Level::Warn } else { log::Level::Trace },
"Could not initialize cache database '{}', deleting and retrying... ({err:?})",
path.to_string_lossy()
);
if std::fs::remove_file(path).is_ok() {
// Try a third time if we successfully deleted it
let res = open_connection_and_init(Some(path));
if let Ok(conn) = res {
return Ok(ConnectionState::Connected(conn));
};
}
match config.on_failure {
CacheFailure::InMemory => {
log::log!(
if is_tty {
log::Level::Error
} else {
log::Level::Trace
},
"Failed to open cache file '{}', opening in-memory cache.",
path.to_string_lossy()
);
Ok(ConnectionState::Connected(open_connection_and_init(None)?))
}
CacheFailure::Blackhole => {
log::log!(
if is_tty {
log::Level::Error
} else {
log::Level::Trace
},
"Failed to open cache file '{}', performance may be degraded.",
path.to_string_lossy()
);
Ok(ConnectionState::Blackhole)
}
CacheFailure::Error => {
log::error!(
"Failed to open cache file '{}', expect further errors.",
path.to_string_lossy()
);
Err(err)
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use deno_core::anyhow::anyhow;
use test_util::TempDir;
use super::*; use super::*;
static TEST_DB: CacheDBConfiguration = CacheDBConfiguration { static TEST_DB: CacheDBConfiguration = CacheDBConfiguration {
@ -409,15 +470,15 @@ mod tests {
}; };
static TEST_DB_BLACKHOLE: CacheDBConfiguration = CacheDBConfiguration { static TEST_DB_BLACKHOLE: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: "create table if not exists test(value TEXT);", table_initializer: "syntax error", // intentionally cause an error
on_version_change: "delete from test;", on_version_change: "",
preheat_queries: &[], preheat_queries: &[],
on_failure: CacheFailure::Blackhole, on_failure: CacheFailure::Blackhole,
}; };
static TEST_DB_ERROR: CacheDBConfiguration = CacheDBConfiguration { static TEST_DB_ERROR: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: "create table if not exists test(value TEXT);", table_initializer: "syntax error", // intentionally cause an error
on_version_change: "delete from test;", on_version_change: "",
preheat_queries: &[], preheat_queries: &[],
on_failure: CacheFailure::Error, on_failure: CacheFailure::Error,
}; };
@ -429,8 +490,6 @@ mod tests {
on_failure: CacheFailure::InMemory, on_failure: CacheFailure::InMemory,
}; };
static FAILURE_PATH: &str = "/tmp/this/doesnt/exist/so/will/always/fail";
#[tokio::test] #[tokio::test]
async fn simple_database() { async fn simple_database() {
let db = CacheDB::in_memory(&TEST_DB, "1.0"); let db = CacheDB::in_memory(&TEST_DB, "1.0");
@ -443,7 +502,7 @@ mod tests {
Ok(row.get::<_, String>(0).unwrap()) Ok(row.get::<_, String>(0).unwrap())
}) })
.unwrap(); .unwrap();
assert_eq!(Some("1".into()), res); assert_eq!(res, Some("1".into()));
} }
#[tokio::test] #[tokio::test]
@ -455,22 +514,23 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn failure_mode_in_memory() { async fn failure_mode_in_memory() {
let db = CacheDB::from_path(&TEST_DB, FAILURE_PATH.into(), "1.0"); let temp_dir = TempDir::new();
db.ensure_connected() let path = temp_dir.path().join("data").to_path_buf();
.expect("Should have created a database"); let state = open_connection(&TEST_DB, Some(path.as_path()), |maybe_path| {
match maybe_path {
db.execute("insert into test values (?1)", [1]).unwrap(); Some(_) => Err(anyhow!("fail")),
let res = db None => Ok(Connection::open_in_memory().unwrap()),
.query_row("select * from test", [], |row| { }
Ok(row.get::<_, String>(0).unwrap()) })
}) .unwrap();
.unwrap(); assert!(matches!(state, ConnectionState::Connected(_)));
assert_eq!(Some("1".into()), res);
} }
#[tokio::test] #[tokio::test]
async fn failure_mode_blackhole() { async fn failure_mode_blackhole() {
let db = CacheDB::from_path(&TEST_DB_BLACKHOLE, FAILURE_PATH.into(), "1.0"); let temp_dir = TempDir::new();
let path = temp_dir.path().join("data");
let db = CacheDB::from_path(&TEST_DB_BLACKHOLE, path.to_path_buf(), "1.0");
db.ensure_connected() db.ensure_connected()
.expect("Should have created a database"); .expect("Should have created a database");
@ -480,12 +540,14 @@ mod tests {
Ok(row.get::<_, String>(0).unwrap()) Ok(row.get::<_, String>(0).unwrap())
}) })
.unwrap(); .unwrap();
assert_eq!(None, res); assert_eq!(res, None);
} }
#[tokio::test] #[tokio::test]
async fn failure_mode_error() { async fn failure_mode_error() {
let db = CacheDB::from_path(&TEST_DB_ERROR, FAILURE_PATH.into(), "1.0"); let temp_dir = TempDir::new();
let path = temp_dir.path().join("data");
let db = CacheDB::from_path(&TEST_DB_ERROR, path.to_path_buf(), "1.0");
db.ensure_connected().expect_err("Should have failed"); db.ensure_connected().expect_err("Should have failed");
db.execute("insert into test values (?1)", [1]) db.execute("insert into test values (?1)", [1])
@ -495,4 +557,32 @@ mod tests {
}) })
.expect_err("Should have failed"); .expect_err("Should have failed");
} }
#[test]
fn cache_db_hash_max_u64_value() {
assert_same_serialize_deserialize(CacheDBHash::new(u64::MAX));
assert_same_serialize_deserialize(CacheDBHash::new(u64::MAX - 1));
assert_same_serialize_deserialize(CacheDBHash::new(u64::MIN));
assert_same_serialize_deserialize(CacheDBHash::new(u64::MIN + 1));
}
fn assert_same_serialize_deserialize(original_hash: CacheDBHash) {
use rusqlite::types::FromSql;
use rusqlite::types::ValueRef;
use rusqlite::ToSql;
let value = original_hash.to_sql().unwrap();
match value {
rusqlite::types::ToSqlOutput::Owned(rusqlite::types::Value::Integer(
value,
)) => {
let value_ref = ValueRef::Integer(value);
assert_eq!(
original_hash,
CacheDBHash::column_result(value_ref).unwrap()
);
}
_ => unreachable!(),
}
}
} }

44
cli/cache/caches.rs vendored
View file

@ -1,4 +1,4 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
@ -8,18 +8,22 @@ use once_cell::sync::OnceCell;
use super::cache_db::CacheDB; use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration; use super::cache_db::CacheDBConfiguration;
use super::check::TYPE_CHECK_CACHE_DB; use super::check::TYPE_CHECK_CACHE_DB;
use super::code_cache::CODE_CACHE_DB;
use super::deno_dir::DenoDirProvider; use super::deno_dir::DenoDirProvider;
use super::fast_check::FAST_CHECK_CACHE_DB;
use super::incremental::INCREMENTAL_CACHE_DB; use super::incremental::INCREMENTAL_CACHE_DB;
use super::module_info::MODULE_INFO_CACHE_DB;
use super::node::NODE_ANALYSIS_CACHE_DB; use super::node::NODE_ANALYSIS_CACHE_DB;
use super::parsed_source::PARSED_SOURCE_CACHE_DB;
pub struct Caches { pub struct Caches {
dir_provider: Arc<DenoDirProvider>, dir_provider: Arc<DenoDirProvider>,
fmt_incremental_cache_db: OnceCell<CacheDB>, fmt_incremental_cache_db: OnceCell<CacheDB>,
lint_incremental_cache_db: OnceCell<CacheDB>, lint_incremental_cache_db: OnceCell<CacheDB>,
dep_analysis_db: OnceCell<CacheDB>, dep_analysis_db: OnceCell<CacheDB>,
fast_check_db: OnceCell<CacheDB>,
node_analysis_db: OnceCell<CacheDB>, node_analysis_db: OnceCell<CacheDB>,
type_checking_cache_db: OnceCell<CacheDB>, type_checking_cache_db: OnceCell<CacheDB>,
code_cache_db: OnceCell<CacheDB>,
} }
impl Caches { impl Caches {
@ -29,8 +33,10 @@ impl Caches {
fmt_incremental_cache_db: Default::default(), fmt_incremental_cache_db: Default::default(),
lint_incremental_cache_db: Default::default(), lint_incremental_cache_db: Default::default(),
dep_analysis_db: Default::default(), dep_analysis_db: Default::default(),
fast_check_db: Default::default(),
node_analysis_db: Default::default(), node_analysis_db: Default::default(),
type_checking_cache_db: Default::default(), type_checking_cache_db: Default::default(),
code_cache_db: Default::default(),
} }
} }
@ -42,9 +48,13 @@ impl Caches {
cell cell
.get_or_init(|| { .get_or_init(|| {
if let Some(path) = path { if let Some(path) = path {
CacheDB::from_path(config, path, crate::version::deno()) CacheDB::from_path(
config,
path,
crate::version::DENO_VERSION_INFO.deno,
)
} else { } else {
CacheDB::in_memory(config, crate::version::deno()) CacheDB::in_memory(config, crate::version::DENO_VERSION_INFO.deno)
} }
}) })
.clone() .clone()
@ -77,7 +87,7 @@ impl Caches {
pub fn dep_analysis_db(&self) -> CacheDB { pub fn dep_analysis_db(&self) -> CacheDB {
Self::make_db( Self::make_db(
&self.dep_analysis_db, &self.dep_analysis_db,
&PARSED_SOURCE_CACHE_DB, &MODULE_INFO_CACHE_DB,
self self
.dir_provider .dir_provider
.get_or_create() .get_or_create()
@ -86,6 +96,18 @@ impl Caches {
) )
} }
pub fn fast_check_db(&self) -> CacheDB {
Self::make_db(
&self.fast_check_db,
&FAST_CHECK_CACHE_DB,
self
.dir_provider
.get_or_create()
.ok()
.map(|dir| dir.fast_check_cache_db_file_path()),
)
}
pub fn node_analysis_db(&self) -> CacheDB { pub fn node_analysis_db(&self) -> CacheDB {
Self::make_db( Self::make_db(
&self.node_analysis_db, &self.node_analysis_db,
@ -109,4 +131,16 @@ impl Caches {
.map(|dir| dir.type_checking_cache_db_file_path()), .map(|dir| dir.type_checking_cache_db_file_path()),
) )
} }
pub fn code_cache_db(&self) -> CacheDB {
Self::make_db(
&self.code_cache_db,
&CODE_CACHE_DB,
self
.dir_provider
.get_or_create()
.ok()
.map(|dir| dir.code_cache_db_file_path()),
)
}
} }

57
cli/cache/check.rs vendored
View file

@ -1,7 +1,8 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use super::cache_db::CacheDB; use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration; use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheDBHash;
use super::cache_db::CacheFailure; use super::cache_db::CacheFailure;
use deno_ast::ModuleSpecifier; use deno_ast::ModuleSpecifier;
use deno_core::error::AnyError; use deno_core::error::AnyError;
@ -9,13 +10,13 @@ use deno_runtime::deno_webstorage::rusqlite::params;
pub static TYPE_CHECK_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration { pub static TYPE_CHECK_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: concat!( table_initializer: concat!(
"CREATE TABLE IF NOT EXISTS checkcache ( "CREATE TABLE IF NOT EXISTS checkcache (",
check_hash TEXT PRIMARY KEY "check_hash INT PRIMARY KEY",
);", ");",
"CREATE TABLE IF NOT EXISTS tsbuildinfo ( "CREATE TABLE IF NOT EXISTS tsbuildinfo (",
specifier TEXT PRIMARY KEY, "specifier TEXT PRIMARY KEY,",
text TEXT NOT NULL "text TEXT NOT NULL",
);", ");",
), ),
on_version_change: concat!( on_version_change: concat!(
"DELETE FROM checkcache;", "DELETE FROM checkcache;",
@ -37,7 +38,7 @@ impl TypeCheckCache {
Self(db) Self(db)
} }
pub fn has_check_hash(&self, hash: u64) -> bool { pub fn has_check_hash(&self, hash: CacheDBHash) -> bool {
match self.hash_check_hash_result(hash) { match self.hash_check_hash_result(hash) {
Ok(val) => val, Ok(val) => val,
Err(err) => { Err(err) => {
@ -52,14 +53,17 @@ impl TypeCheckCache {
} }
} }
fn hash_check_hash_result(&self, hash: u64) -> Result<bool, AnyError> { fn hash_check_hash_result(
&self,
hash: CacheDBHash,
) -> Result<bool, AnyError> {
self.0.exists( self.0.exists(
"SELECT * FROM checkcache WHERE check_hash=?1 LIMIT 1", "SELECT * FROM checkcache WHERE check_hash=?1 LIMIT 1",
params![hash.to_string()], params![hash],
) )
} }
pub fn add_check_hash(&self, check_hash: u64) { pub fn add_check_hash(&self, check_hash: CacheDBHash) {
if let Err(err) = self.add_check_hash_result(check_hash) { if let Err(err) = self.add_check_hash_result(check_hash) {
if cfg!(debug_assertions) { if cfg!(debug_assertions) {
panic!("Error saving check hash: {err}"); panic!("Error saving check hash: {err}");
@ -69,13 +73,16 @@ impl TypeCheckCache {
} }
} }
fn add_check_hash_result(&self, check_hash: u64) -> Result<(), AnyError> { fn add_check_hash_result(
&self,
check_hash: CacheDBHash,
) -> Result<(), AnyError> {
let sql = " let sql = "
INSERT OR REPLACE INTO INSERT OR REPLACE INTO
checkcache (check_hash) checkcache (check_hash)
VALUES VALUES
(?1)"; (?1)";
self.0.execute(sql, params![&check_hash.to_string(),])?; self.0.execute(sql, params![check_hash])?;
Ok(()) Ok(())
} }
@ -123,10 +130,10 @@ mod test {
let conn = CacheDB::in_memory(&TYPE_CHECK_CACHE_DB, "1.0.0"); let conn = CacheDB::in_memory(&TYPE_CHECK_CACHE_DB, "1.0.0");
let cache = TypeCheckCache::new(conn); let cache = TypeCheckCache::new(conn);
assert!(!cache.has_check_hash(1)); assert!(!cache.has_check_hash(CacheDBHash::new(1)));
cache.add_check_hash(1); cache.add_check_hash(CacheDBHash::new(1));
assert!(cache.has_check_hash(1)); assert!(cache.has_check_hash(CacheDBHash::new(1)));
assert!(!cache.has_check_hash(2)); assert!(!cache.has_check_hash(CacheDBHash::new(2)));
let specifier1 = ModuleSpecifier::parse("file:///test.json").unwrap(); let specifier1 = ModuleSpecifier::parse("file:///test.json").unwrap();
assert_eq!(cache.get_tsbuildinfo(&specifier1), None); assert_eq!(cache.get_tsbuildinfo(&specifier1), None);
@ -137,9 +144,9 @@ mod test {
let conn = cache.0.recreate_with_version("2.0.0"); let conn = cache.0.recreate_with_version("2.0.0");
let cache = TypeCheckCache::new(conn); let cache = TypeCheckCache::new(conn);
assert!(!cache.has_check_hash(1)); assert!(!cache.has_check_hash(CacheDBHash::new(1)));
cache.add_check_hash(1); cache.add_check_hash(CacheDBHash::new(1));
assert!(cache.has_check_hash(1)); assert!(cache.has_check_hash(CacheDBHash::new(1)));
assert_eq!(cache.get_tsbuildinfo(&specifier1), None); assert_eq!(cache.get_tsbuildinfo(&specifier1), None);
cache.set_tsbuildinfo(&specifier1, "test"); cache.set_tsbuildinfo(&specifier1, "test");
assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string())); assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string()));
@ -148,13 +155,13 @@ mod test {
let conn = cache.0.recreate_with_version("2.0.0"); let conn = cache.0.recreate_with_version("2.0.0");
let cache = TypeCheckCache::new(conn); let cache = TypeCheckCache::new(conn);
assert!(cache.has_check_hash(1)); assert!(cache.has_check_hash(CacheDBHash::new(1)));
assert!(!cache.has_check_hash(2)); assert!(!cache.has_check_hash(CacheDBHash::new(2)));
assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string())); assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string()));
// adding when already exists should not cause issue // adding when already exists should not cause issue
cache.add_check_hash(1); cache.add_check_hash(CacheDBHash::new(1));
assert!(cache.has_check_hash(1)); assert!(cache.has_check_hash(CacheDBHash::new(1)));
cache.set_tsbuildinfo(&specifier1, "other"); cache.set_tsbuildinfo(&specifier1, "other");
assert_eq!( assert_eq!(
cache.get_tsbuildinfo(&specifier1), cache.get_tsbuildinfo(&specifier1),

250
cli/cache/code_cache.rs vendored Normal file
View file

@ -0,0 +1,250 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use deno_ast::ModuleSpecifier;
use deno_core::error::AnyError;
use deno_runtime::code_cache;
use deno_runtime::deno_webstorage::rusqlite::params;
use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheDBHash;
use super::cache_db::CacheFailure;
pub static CODE_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: concat!(
"CREATE TABLE IF NOT EXISTS codecache (",
"specifier TEXT NOT NULL,",
"type INTEGER NOT NULL,",
"source_hash INTEGER NOT NULL,",
"data BLOB NOT NULL,",
"PRIMARY KEY (specifier, type)",
");"
),
on_version_change: "DELETE FROM codecache;",
preheat_queries: &[],
on_failure: CacheFailure::Blackhole,
};
pub struct CodeCache {
inner: CodeCacheInner,
}
impl CodeCache {
pub fn new(db: CacheDB) -> Self {
Self {
inner: CodeCacheInner::new(db),
}
}
fn ensure_ok<T: Default>(res: Result<T, AnyError>) -> T {
match res {
Ok(x) => x,
Err(err) => {
// TODO(mmastrac): This behavior was inherited from before the refactoring but it probably makes sense to move it into the cache
// at some point.
// should never error here, but if it ever does don't fail
if cfg!(debug_assertions) {
panic!("Error using code cache: {err:#}");
} else {
log::debug!("Error using code cache: {:#}", err);
}
T::default()
}
}
}
pub fn get_sync(
&self,
specifier: &ModuleSpecifier,
code_cache_type: code_cache::CodeCacheType,
source_hash: u64,
) -> Option<Vec<u8>> {
Self::ensure_ok(self.inner.get_sync(
specifier.as_str(),
code_cache_type,
CacheDBHash::new(source_hash),
))
}
pub fn set_sync(
&self,
specifier: &ModuleSpecifier,
code_cache_type: code_cache::CodeCacheType,
source_hash: u64,
data: &[u8],
) {
Self::ensure_ok(self.inner.set_sync(
specifier.as_str(),
code_cache_type,
CacheDBHash::new(source_hash),
data,
));
}
}
impl code_cache::CodeCache for CodeCache {
fn get_sync(
&self,
specifier: &ModuleSpecifier,
code_cache_type: code_cache::CodeCacheType,
source_hash: u64,
) -> Option<Vec<u8>> {
self.get_sync(specifier, code_cache_type, source_hash)
}
fn set_sync(
&self,
specifier: ModuleSpecifier,
code_cache_type: code_cache::CodeCacheType,
source_hash: u64,
data: &[u8],
) {
self.set_sync(&specifier, code_cache_type, source_hash, data);
}
}
struct CodeCacheInner {
conn: CacheDB,
}
impl CodeCacheInner {
pub fn new(conn: CacheDB) -> Self {
Self { conn }
}
pub fn get_sync(
&self,
specifier: &str,
code_cache_type: code_cache::CodeCacheType,
source_hash: CacheDBHash,
) -> Result<Option<Vec<u8>>, AnyError> {
let query = "
SELECT
data
FROM
codecache
WHERE
specifier=?1 AND type=?2 AND source_hash=?3
LIMIT 1";
let params = params![
specifier,
serialize_code_cache_type(code_cache_type),
source_hash,
];
self.conn.query_row(query, params, |row| {
let value: Vec<u8> = row.get(0)?;
Ok(value)
})
}
pub fn set_sync(
&self,
specifier: &str,
code_cache_type: code_cache::CodeCacheType,
source_hash: CacheDBHash,
data: &[u8],
) -> Result<(), AnyError> {
let sql = "
INSERT OR REPLACE INTO
codecache (specifier, type, source_hash, data)
VALUES
(?1, ?2, ?3, ?4)";
let params = params![
specifier,
serialize_code_cache_type(code_cache_type),
source_hash,
data
];
self.conn.execute(sql, params)?;
Ok(())
}
}
fn serialize_code_cache_type(
code_cache_type: code_cache::CodeCacheType,
) -> i64 {
match code_cache_type {
code_cache::CodeCacheType::Script => 0,
code_cache::CodeCacheType::EsModule => 1,
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
pub fn end_to_end() {
let conn = CacheDB::in_memory(&CODE_CACHE_DB, "1.0.0");
let cache = CodeCacheInner::new(conn);
assert!(cache
.get_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::EsModule,
CacheDBHash::new(1),
)
.unwrap()
.is_none());
let data_esm = vec![1, 2, 3];
cache
.set_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::EsModule,
CacheDBHash::new(1),
&data_esm,
)
.unwrap();
assert_eq!(
cache
.get_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::EsModule,
CacheDBHash::new(1),
)
.unwrap()
.unwrap(),
data_esm
);
assert!(cache
.get_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::Script,
CacheDBHash::new(1),
)
.unwrap()
.is_none());
let data_script = vec![4, 5, 6];
cache
.set_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::Script,
CacheDBHash::new(1),
&data_script,
)
.unwrap();
assert_eq!(
cache
.get_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::Script,
CacheDBHash::new(1),
)
.unwrap()
.unwrap(),
data_script
);
assert_eq!(
cache
.get_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::EsModule,
CacheDBHash::new(1),
)
.unwrap()
.unwrap(),
data_esm
);
}
}

13
cli/cache/common.rs vendored
View file

@ -1,18 +1,19 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use std::hash::Hasher; use std::hash::Hasher;
/// A very fast insecure hasher that uses the xxHash algorithm. /// A very fast insecure hasher that uses the xxHash algorithm.
#[derive(Default)]
pub struct FastInsecureHasher(twox_hash::XxHash64); pub struct FastInsecureHasher(twox_hash::XxHash64);
impl FastInsecureHasher { impl FastInsecureHasher {
pub fn new() -> Self { pub fn new_without_deno_version() -> Self {
Self::default() Self(Default::default())
} }
pub fn hash(hashable: impl std::hash::Hash) -> u64 { pub fn new_deno_versioned() -> Self {
Self::new().write_hashable(hashable).finish() let mut hasher = Self::new_without_deno_version();
hasher.write_str(crate::version::DENO_VERSION_INFO.deno);
hasher
} }
pub fn write_str(&mut self, text: &str) -> &mut Self { pub fn write_str(&mut self, text: &str) -> &mut Self {

45
cli/cache/deno_dir.rs vendored
View file

@ -1,4 +1,4 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use once_cell::sync::OnceCell; use once_cell::sync::OnceCell;
@ -33,11 +33,10 @@ impl DenoDirProvider {
/// `DenoDir` serves as coordinator for multiple `DiskCache`s containing them /// `DenoDir` serves as coordinator for multiple `DiskCache`s containing them
/// in single directory that can be controlled with `$DENO_DIR` env variable. /// in single directory that can be controlled with `$DENO_DIR` env variable.
#[derive(Clone)] #[derive(Debug, Clone)]
pub struct DenoDir { pub struct DenoDir {
/// Example: /Users/rld/.deno/ /// Example: /Users/rld/.deno/
/// Note: This is not exposed in order to encourage using re-usable methods. pub root: PathBuf,
root: PathBuf,
/// Used by TsCompiler to cache compiler output. /// Used by TsCompiler to cache compiler output.
pub gen_cache: DiskCache, pub gen_cache: DiskCache,
} }
@ -80,34 +79,46 @@ impl DenoDir {
self.root.display() self.root.display()
} }
/// Path for the V8 code cache.
pub fn code_cache_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache
self.root.join("v8_code_cache_v2")
}
/// Path for the incremental cache used for formatting. /// Path for the incremental cache used for formatting.
pub fn fmt_incremental_cache_db_file_path(&self) -> PathBuf { pub fn fmt_incremental_cache_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache // bump this version name to invalidate the entire cache
self.root.join("fmt_incremental_cache_v1") self.root.join("fmt_incremental_cache_v2")
} }
/// Path for the incremental cache used for linting. /// Path for the incremental cache used for linting.
pub fn lint_incremental_cache_db_file_path(&self) -> PathBuf { pub fn lint_incremental_cache_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache // bump this version name to invalidate the entire cache
self.root.join("lint_incremental_cache_v1") self.root.join("lint_incremental_cache_v2")
} }
/// Path for caching swc dependency analysis. /// Path for caching swc dependency analysis.
pub fn dep_analysis_db_file_path(&self) -> PathBuf { pub fn dep_analysis_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache // bump this version name to invalidate the entire cache
self.root.join("dep_analysis_cache_v1") self.root.join("dep_analysis_cache_v2")
}
/// Path for the cache used for fast check.
pub fn fast_check_cache_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache
self.root.join("fast_check_cache_v2")
} }
/// Path for caching node analysis. /// Path for caching node analysis.
pub fn node_analysis_db_file_path(&self) -> PathBuf { pub fn node_analysis_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache // bump this version name to invalidate the entire cache
self.root.join("node_analysis_cache_v1") self.root.join("node_analysis_cache_v2")
} }
/// Path for the cache used for type checking. /// Path for the cache used for type checking.
pub fn type_checking_cache_db_file_path(&self) -> PathBuf { pub fn type_checking_cache_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache // bump this version name to invalidate the entire cache
self.root.join("check_cache_v1") self.root.join("check_cache_v2")
} }
/// Path to the registries cache, used for the lps. /// Path to the registries cache, used for the lps.
@ -115,9 +126,9 @@ impl DenoDir {
self.root.join("registries") self.root.join("registries")
} }
/// Path to the dependencies cache folder. /// Path to the remote cache folder.
pub fn deps_folder_path(&self) -> PathBuf { pub fn remote_folder_path(&self) -> PathBuf {
self.root.join("deps") self.root.join("remote")
} }
/// Path to the origin data cache folder. /// Path to the origin data cache folder.
@ -158,7 +169,7 @@ impl DenoDir {
/// To avoid the poorly managed dirs crate /// To avoid the poorly managed dirs crate
#[cfg(not(windows))] #[cfg(not(windows))]
mod dirs { pub mod dirs {
use std::path::PathBuf; use std::path::PathBuf;
pub fn cache_dir() -> Option<PathBuf> { pub fn cache_dir() -> Option<PathBuf> {
@ -216,7 +227,7 @@ mod dirs {
// https://github.com/dirs-dev/dirs-sys-rs/blob/ec7cee0b3e8685573d847f0a0f60aae3d9e07fa2/src/lib.rs#L140-L164 // https://github.com/dirs-dev/dirs-sys-rs/blob/ec7cee0b3e8685573d847f0a0f60aae3d9e07fa2/src/lib.rs#L140-L164
// MIT license. Copyright (c) 2018-2019 dirs-rs contributors // MIT license. Copyright (c) 2018-2019 dirs-rs contributors
#[cfg(windows)] #[cfg(windows)]
mod dirs { pub mod dirs {
use std::ffi::OsString; use std::ffi::OsString;
use std::os::windows::ffi::OsStringExt; use std::os::windows::ffi::OsStringExt;
use std::path::PathBuf; use std::path::PathBuf;
@ -255,6 +266,12 @@ mod dirs {
} }
pub fn home_dir() -> Option<PathBuf> { pub fn home_dir() -> Option<PathBuf> {
if let Some(userprofile) = std::env::var_os("USERPROFILE") {
if !userprofile.is_empty() {
return Some(PathBuf::from(userprofile));
}
}
known_folder(&knownfolders::FOLDERID_Profile) known_folder(&knownfolders::FOLDERID_Profile)
} }
} }

View file

@ -1,7 +1,7 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use super::CACHE_PERM; use super::CACHE_PERM;
use crate::util::fs::atomic_write_file; use crate::util::fs::atomic_write_file_with_retries;
use deno_cache_dir::url_to_filename; use deno_cache_dir::url_to_filename;
use deno_core::url::Host; use deno_core::url::Host;
@ -14,7 +14,7 @@ use std::path::PathBuf;
use std::path::Prefix; use std::path::Prefix;
use std::str; use std::str;
#[derive(Clone)] #[derive(Debug, Clone)]
pub struct DiskCache { pub struct DiskCache {
pub location: PathBuf, pub location: PathBuf,
} }
@ -120,7 +120,7 @@ impl DiskCache {
pub fn set(&self, filename: &Path, data: &[u8]) -> std::io::Result<()> { pub fn set(&self, filename: &Path, data: &[u8]) -> std::io::Result<()> {
let path = self.location.join(filename); let path = self.location.join(filename);
atomic_write_file(&path, data, CACHE_PERM) atomic_write_file_with_retries(&path, data, CACHE_PERM)
} }
} }

194
cli/cache/emit.rs vendored
View file

@ -1,35 +1,29 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use std::path::PathBuf; use std::path::PathBuf;
use deno_ast::ModuleSpecifier; use deno_ast::ModuleSpecifier;
use deno_core::anyhow::anyhow; use deno_core::anyhow::anyhow;
use deno_core::error::AnyError; use deno_core::error::AnyError;
use deno_core::serde_json; use deno_core::unsync::sync::AtomicFlag;
use serde::Deserialize;
use serde::Serialize;
use super::DiskCache; use super::DiskCache;
use super::FastInsecureHasher;
#[derive(Debug, Deserialize, Serialize)]
struct EmitMetadata {
pub source_hash: String,
pub emit_hash: String,
}
/// The cache that stores previously emitted files. /// The cache that stores previously emitted files.
#[derive(Clone)]
pub struct EmitCache { pub struct EmitCache {
disk_cache: DiskCache, disk_cache: DiskCache,
cli_version: &'static str, emit_failed_flag: AtomicFlag,
file_serializer: EmitFileSerializer,
} }
impl EmitCache { impl EmitCache {
pub fn new(disk_cache: DiskCache) -> Self { pub fn new(disk_cache: DiskCache) -> Self {
Self { Self {
disk_cache, disk_cache,
cli_version: crate::version::deno(), emit_failed_flag: Default::default(),
file_serializer: EmitFileSerializer {
cli_version: crate::version::DENO_VERSION_INFO.deno,
},
} }
} }
@ -46,38 +40,11 @@ impl EmitCache {
specifier: &ModuleSpecifier, specifier: &ModuleSpecifier,
expected_source_hash: u64, expected_source_hash: u64,
) -> Option<String> { ) -> Option<String> {
let meta_filename = self.get_meta_filename(specifier)?;
let emit_filename = self.get_emit_filename(specifier)?; let emit_filename = self.get_emit_filename(specifier)?;
let bytes = self.disk_cache.get(&emit_filename).ok()?;
// load and verify the meta data file is for this source and CLI version self
let bytes = self.disk_cache.get(&meta_filename).ok()?; .file_serializer
let meta: EmitMetadata = serde_json::from_slice(&bytes).ok()?; .deserialize(bytes, expected_source_hash)
if meta.source_hash != expected_source_hash.to_string() {
return None;
}
// load and verify the emit is for the meta data
let emit_bytes = self.disk_cache.get(&emit_filename).ok()?;
if meta.emit_hash != compute_emit_hash(&emit_bytes, self.cli_version) {
return None;
}
// everything looks good, return it
let emit_text = String::from_utf8(emit_bytes).ok()?;
Some(emit_text)
}
/// Gets the filepath which stores the emit.
pub fn get_emit_filepath(
&self,
specifier: &ModuleSpecifier,
) -> Option<PathBuf> {
Some(
self
.disk_cache
.location
.join(self.get_emit_filename(specifier)?),
)
} }
/// Sets the emit code in the cache. /// Sets the emit code in the cache.
@ -85,15 +52,13 @@ impl EmitCache {
&self, &self,
specifier: &ModuleSpecifier, specifier: &ModuleSpecifier,
source_hash: u64, source_hash: u64,
code: &str, code: &[u8],
) { ) {
if let Err(err) = self.set_emit_code_result(specifier, source_hash, code) { if let Err(err) = self.set_emit_code_result(specifier, source_hash, code) {
// should never error here, but if it ever does don't fail // might error in cases such as a readonly file system
if cfg!(debug_assertions) { log::debug!("Error saving emit data ({}): {}", specifier, err);
panic!("Error saving emit data ({specifier}): {err}"); // assume the cache can't be written to and disable caching to it
} else { self.emit_failed_flag.raise();
log::debug!("Error saving emit data({}): {}", specifier, err);
}
} }
} }
@ -101,36 +66,22 @@ impl EmitCache {
&self, &self,
specifier: &ModuleSpecifier, specifier: &ModuleSpecifier,
source_hash: u64, source_hash: u64,
code: &str, code: &[u8],
) -> Result<(), AnyError> { ) -> Result<(), AnyError> {
let meta_filename = self if self.emit_failed_flag.is_raised() {
.get_meta_filename(specifier) log::debug!("Skipped emit cache save of {}", specifier);
.ok_or_else(|| anyhow!("Could not get meta filename."))?; return Ok(());
}
let emit_filename = self let emit_filename = self
.get_emit_filename(specifier) .get_emit_filename(specifier)
.ok_or_else(|| anyhow!("Could not get emit filename."))?; .ok_or_else(|| anyhow!("Could not get emit filename."))?;
let cache_data = self.file_serializer.serialize(code, source_hash);
// save the metadata self.disk_cache.set(&emit_filename, &cache_data)?;
let metadata = EmitMetadata {
source_hash: source_hash.to_string(),
emit_hash: compute_emit_hash(code.as_bytes(), self.cli_version),
};
self
.disk_cache
.set(&meta_filename, &serde_json::to_vec(&metadata)?)?;
// save the emit source
self.disk_cache.set(&emit_filename, code.as_bytes())?;
Ok(()) Ok(())
} }
fn get_meta_filename(&self, specifier: &ModuleSpecifier) -> Option<PathBuf> {
self
.disk_cache
.get_cache_filename_with_extension(specifier, "meta")
}
fn get_emit_filename(&self, specifier: &ModuleSpecifier) -> Option<PathBuf> { fn get_emit_filename(&self, specifier: &ModuleSpecifier) -> Option<PathBuf> {
self self
.disk_cache .disk_cache
@ -138,16 +89,68 @@ impl EmitCache {
} }
} }
fn compute_emit_hash(bytes: &[u8], cli_version: &str) -> String { const LAST_LINE_PREFIX: &str = "\n// denoCacheMetadata=";
// it's ok to use an insecure hash here because
// if someone can change the emit source then they struct EmitFileSerializer {
// can also change the version hash cli_version: &'static str,
FastInsecureHasher::new() }
.write(bytes)
// emit should not be re-used between cli versions impl EmitFileSerializer {
.write(cli_version.as_bytes()) pub fn deserialize(
.finish() &self,
.to_string() mut bytes: Vec<u8>,
expected_source_hash: u64,
) -> Option<String> {
let last_newline_index = bytes.iter().rposition(|&b| b == b'\n')?;
let (content, last_line) = bytes.split_at(last_newline_index);
let hashes = last_line.strip_prefix(LAST_LINE_PREFIX.as_bytes())?;
let hashes = String::from_utf8_lossy(hashes);
let (source_hash, emit_hash) = hashes.split_once(',')?;
// verify the meta data file is for this source and CLI version
let source_hash = source_hash.parse::<u64>().ok()?;
if source_hash != expected_source_hash {
return None;
}
let emit_hash = emit_hash.parse::<u64>().ok()?;
// prevent using an emit from a different cli version or emits that were tampered with
if emit_hash != self.compute_emit_hash(content) {
return None;
}
// everything looks good, truncate and return it
bytes.truncate(content.len());
String::from_utf8(bytes).ok()
}
pub fn serialize(&self, code: &[u8], source_hash: u64) -> Vec<u8> {
let source_hash = source_hash.to_string();
let emit_hash = self.compute_emit_hash(code).to_string();
let capacity = code.len()
+ LAST_LINE_PREFIX.len()
+ source_hash.len()
+ 1
+ emit_hash.len();
let mut cache_data = Vec::with_capacity(capacity);
cache_data.extend(code);
cache_data.extend(LAST_LINE_PREFIX.as_bytes());
cache_data.extend(source_hash.as_bytes());
cache_data.push(b',');
cache_data.extend(emit_hash.as_bytes());
debug_assert_eq!(cache_data.len(), capacity);
cache_data
}
fn compute_emit_hash(&self, bytes: &[u8]) -> u64 {
// it's ok to use an insecure hash here because
// if someone can change the emit source then they
// can also change the version hash
crate::cache::FastInsecureHasher::new_without_deno_version() // use cli_version property instead
.write(bytes)
// emit should not be re-used between cli versions
.write_str(self.cli_version)
.finish()
}
} }
#[cfg(test)] #[cfg(test)]
@ -162,7 +165,10 @@ mod test {
let disk_cache = DiskCache::new(temp_dir.path().as_path()); let disk_cache = DiskCache::new(temp_dir.path().as_path());
let cache = EmitCache { let cache = EmitCache {
disk_cache: disk_cache.clone(), disk_cache: disk_cache.clone(),
cli_version: "1.0.0", file_serializer: EmitFileSerializer {
cli_version: "1.0.0",
},
emit_failed_flag: Default::default(),
}; };
let specifier1 = let specifier1 =
@ -174,8 +180,8 @@ mod test {
assert_eq!(cache.get_emit_code(&specifier1, 1), None); assert_eq!(cache.get_emit_code(&specifier1, 1), None);
let emit_code1 = "text1".to_string(); let emit_code1 = "text1".to_string();
let emit_code2 = "text2".to_string(); let emit_code2 = "text2".to_string();
cache.set_emit_code(&specifier1, 10, &emit_code1); cache.set_emit_code(&specifier1, 10, emit_code1.as_bytes());
cache.set_emit_code(&specifier2, 2, &emit_code2); cache.set_emit_code(&specifier2, 2, emit_code2.as_bytes());
// providing the incorrect source hash // providing the incorrect source hash
assert_eq!(cache.get_emit_code(&specifier1, 5), None); assert_eq!(cache.get_emit_code(&specifier1, 5), None);
// providing the correct source hash // providing the correct source hash
@ -188,21 +194,27 @@ mod test {
// try changing the cli version (should not load previous ones) // try changing the cli version (should not load previous ones)
let cache = EmitCache { let cache = EmitCache {
disk_cache: disk_cache.clone(), disk_cache: disk_cache.clone(),
cli_version: "2.0.0", file_serializer: EmitFileSerializer {
cli_version: "2.0.0",
},
emit_failed_flag: Default::default(),
}; };
assert_eq!(cache.get_emit_code(&specifier1, 10), None); assert_eq!(cache.get_emit_code(&specifier1, 10), None);
cache.set_emit_code(&specifier1, 5, &emit_code1); cache.set_emit_code(&specifier1, 5, emit_code1.as_bytes());
// recreating the cache should still load the data because the CLI version is the same // recreating the cache should still load the data because the CLI version is the same
let cache = EmitCache { let cache = EmitCache {
disk_cache, disk_cache,
cli_version: "2.0.0", file_serializer: EmitFileSerializer {
cli_version: "2.0.0",
},
emit_failed_flag: Default::default(),
}; };
assert_eq!(cache.get_emit_code(&specifier1, 5), Some(emit_code1)); assert_eq!(cache.get_emit_code(&specifier1, 5), Some(emit_code1));
// adding when already exists should not cause issue // adding when already exists should not cause issue
let emit_code3 = "asdf".to_string(); let emit_code3 = "asdf".to_string();
cache.set_emit_code(&specifier1, 20, &emit_code3); cache.set_emit_code(&specifier1, 20, emit_code3.as_bytes());
assert_eq!(cache.get_emit_code(&specifier1, 5), None); assert_eq!(cache.get_emit_code(&specifier1, 5), None);
assert_eq!(cache.get_emit_code(&specifier1, 20), Some(emit_code3)); assert_eq!(cache.get_emit_code(&specifier1, 20), Some(emit_code3));
} }

169
cli/cache/fast_check.rs vendored Normal file
View file

@ -0,0 +1,169 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use deno_core::error::AnyError;
use deno_graph::FastCheckCacheItem;
use deno_graph::FastCheckCacheKey;
use deno_runtime::deno_webstorage::rusqlite::params;
use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheDBHash;
use super::cache_db::CacheFailure;
pub static FAST_CHECK_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: concat!(
"CREATE TABLE IF NOT EXISTS fastcheckcache (",
"hash INTEGER PRIMARY KEY,",
"data TEXT NOT NULL",
");"
),
on_version_change: "DELETE FROM fastcheckcache;",
preheat_queries: &[],
on_failure: CacheFailure::Blackhole,
};
#[derive(Clone)]
pub struct FastCheckCache {
inner: FastCheckCacheInner,
}
impl FastCheckCache {
pub fn new(db: CacheDB) -> Self {
Self {
inner: FastCheckCacheInner::new(db),
}
}
fn ensure_ok<T: Default>(res: Result<T, AnyError>) -> T {
match res {
Ok(x) => x,
Err(err) => {
// TODO(mmastrac): This behavior was inherited from before the refactoring but it probably makes sense to move it into the cache
// at some point.
// should never error here, but if it ever does don't fail
if cfg!(debug_assertions) {
panic!("Error using fast check cache: {err:#}");
} else {
log::debug!("Error using fast check cache: {:#}", err);
}
T::default()
}
}
}
}
impl deno_graph::FastCheckCache for FastCheckCache {
fn get(&self, key: FastCheckCacheKey) -> Option<FastCheckCacheItem> {
Self::ensure_ok(self.inner.get(key))
}
fn set(&self, key: FastCheckCacheKey, value: FastCheckCacheItem) {
Self::ensure_ok(self.inner.set(key, &value));
}
}
#[derive(Clone)]
struct FastCheckCacheInner {
conn: CacheDB,
}
impl FastCheckCacheInner {
pub fn new(conn: CacheDB) -> Self {
Self { conn }
}
pub fn get(
&self,
key: FastCheckCacheKey,
) -> Result<Option<FastCheckCacheItem>, AnyError> {
let query = "
SELECT
data
FROM
fastcheckcache
WHERE
hash=?1
LIMIT 1";
let res = self.conn.query_row(
query,
params![CacheDBHash::new(key.as_u64())],
|row| {
let value: Vec<u8> = row.get(0)?;
Ok(bincode::deserialize::<FastCheckCacheItem>(&value)?)
},
)?;
Ok(res)
}
pub fn set(
&self,
key: FastCheckCacheKey,
data: &FastCheckCacheItem,
) -> Result<(), AnyError> {
let sql = "
INSERT OR REPLACE INTO
fastcheckcache (hash, data)
VALUES
(?1, ?2)";
self.conn.execute(
sql,
params![CacheDBHash::new(key.as_u64()), &bincode::serialize(data)?],
)?;
Ok(())
}
}
#[cfg(test)]
mod test {
use std::collections::BTreeSet;
use deno_ast::ModuleSpecifier;
use deno_graph::FastCheckCache as _;
use deno_graph::FastCheckCacheModuleItem;
use deno_graph::FastCheckCacheModuleItemDiagnostic;
use deno_semver::package::PackageNv;
use super::*;
#[test]
pub fn cache_general_use() {
let conn = CacheDB::in_memory(&FAST_CHECK_CACHE_DB, "1.0.0");
let cache = FastCheckCache::new(conn);
let key = FastCheckCacheKey::build(
cache.hash_seed(),
&PackageNv::from_str("@scope/a@1.0.0").unwrap(),
&Default::default(),
);
let cache = cache.inner;
assert!(cache.get(key).unwrap().is_none());
let value = FastCheckCacheItem {
dependencies: BTreeSet::from([
PackageNv::from_str("@scope/b@1.0.0").unwrap()
]),
modules: vec![(
ModuleSpecifier::parse("https://jsr.io/test.ts").unwrap(),
FastCheckCacheModuleItem::Diagnostic(
FastCheckCacheModuleItemDiagnostic { source_hash: 123 },
),
)],
};
cache.set(key, &value).unwrap();
let stored_value = cache.get(key).unwrap().unwrap();
assert_eq!(stored_value, value);
// adding when already exists should not cause issue
cache.set(key, &value).unwrap();
// recreating with same cli version should still have it
let conn = cache.conn.recreate_with_version("1.0.0");
let cache = FastCheckCacheInner::new(conn);
let stored_value = cache.get(key).unwrap().unwrap();
assert_eq!(stored_value, value);
// now changing the cli version should clear it
let conn = cache.conn.recreate_with_version("2.0.0");
let cache = FastCheckCacheInner::new(conn);
assert!(cache.get(key).unwrap().is_none());
}
}

View file

@ -1,4 +1,4 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use std::collections::HashMap; use std::collections::HashMap;
use std::path::Path; use std::path::Path;
@ -6,23 +6,23 @@ use std::path::PathBuf;
use deno_core::error::AnyError; use deno_core::error::AnyError;
use deno_core::parking_lot::Mutex; use deno_core::parking_lot::Mutex;
use deno_core::serde_json;
use deno_core::unsync::spawn; use deno_core::unsync::spawn;
use deno_core::unsync::JoinHandle; use deno_core::unsync::JoinHandle;
use deno_runtime::deno_webstorage::rusqlite::params; use deno_runtime::deno_webstorage::rusqlite::params;
use serde::Serialize;
use super::cache_db::CacheDB; use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration; use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheDBHash;
use super::cache_db::CacheFailure; use super::cache_db::CacheFailure;
use super::common::FastInsecureHasher;
pub static INCREMENTAL_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration { pub static INCREMENTAL_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: "CREATE TABLE IF NOT EXISTS incrementalcache ( table_initializer: concat!(
file_path TEXT PRIMARY KEY, "CREATE TABLE IF NOT EXISTS incrementalcache (",
state_hash TEXT NOT NULL, "file_path TEXT PRIMARY KEY,",
source_hash TEXT NOT NULL "state_hash INTEGER NOT NULL,",
);", "source_hash INTEGER NOT NULL",
");"
),
on_version_change: "DELETE FROM incrementalcache;", on_version_change: "DELETE FROM incrementalcache;",
preheat_queries: &[], preheat_queries: &[],
// If the cache fails, just ignore all caching attempts // If the cache fails, just ignore all caching attempts
@ -34,7 +34,7 @@ pub static INCREMENTAL_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
pub struct IncrementalCache(IncrementalCacheInner); pub struct IncrementalCache(IncrementalCacheInner);
impl IncrementalCache { impl IncrementalCache {
pub fn new<TState: Serialize>( pub fn new<TState: std::hash::Hash>(
db: CacheDB, db: CacheDB,
state: &TState, state: &TState,
initial_file_paths: &[PathBuf], initial_file_paths: &[PathBuf],
@ -56,24 +56,23 @@ impl IncrementalCache {
} }
enum ReceiverMessage { enum ReceiverMessage {
Update(PathBuf, u64), Update(PathBuf, CacheDBHash),
Exit, Exit,
} }
struct IncrementalCacheInner { struct IncrementalCacheInner {
previous_hashes: HashMap<PathBuf, u64>, previous_hashes: HashMap<PathBuf, CacheDBHash>,
sender: tokio::sync::mpsc::UnboundedSender<ReceiverMessage>, sender: tokio::sync::mpsc::UnboundedSender<ReceiverMessage>,
handle: Mutex<Option<JoinHandle<()>>>, handle: Mutex<Option<JoinHandle<()>>>,
} }
impl IncrementalCacheInner { impl IncrementalCacheInner {
pub fn new<TState: Serialize>( pub fn new<TState: std::hash::Hash>(
db: CacheDB, db: CacheDB,
state: &TState, state: &TState,
initial_file_paths: &[PathBuf], initial_file_paths: &[PathBuf],
) -> Self { ) -> Self {
let state_hash = let state_hash = CacheDBHash::from_source(state);
FastInsecureHasher::hash(serde_json::to_string(state).unwrap());
let sql_cache = SqlIncrementalCache::new(db, state_hash); let sql_cache = SqlIncrementalCache::new(db, state_hash);
Self::from_sql_incremental_cache(sql_cache, initial_file_paths) Self::from_sql_incremental_cache(sql_cache, initial_file_paths)
} }
@ -113,13 +112,13 @@ impl IncrementalCacheInner {
pub fn is_file_same(&self, file_path: &Path, file_text: &str) -> bool { pub fn is_file_same(&self, file_path: &Path, file_text: &str) -> bool {
match self.previous_hashes.get(file_path) { match self.previous_hashes.get(file_path) {
Some(hash) => *hash == FastInsecureHasher::hash(file_text), Some(hash) => *hash == CacheDBHash::from_source(file_text),
None => false, None => false,
} }
} }
pub fn update_file(&self, file_path: &Path, file_text: &str) { pub fn update_file(&self, file_path: &Path, file_text: &str) {
let hash = FastInsecureHasher::hash(file_text); let hash = CacheDBHash::from_source(file_text);
if let Some(previous_hash) = self.previous_hashes.get(file_path) { if let Some(previous_hash) = self.previous_hashes.get(file_path) {
if *previous_hash == hash { if *previous_hash == hash {
return; // do not bother updating the db file because nothing has changed return; // do not bother updating the db file because nothing has changed
@ -146,15 +145,15 @@ struct SqlIncrementalCache {
/// A hash of the state used to produce the formatting/linting other than /// A hash of the state used to produce the formatting/linting other than
/// the CLI version. This state is a hash of the configuration and ensures /// the CLI version. This state is a hash of the configuration and ensures
/// we format/lint a file when the configuration changes. /// we format/lint a file when the configuration changes.
state_hash: u64, state_hash: CacheDBHash,
} }
impl SqlIncrementalCache { impl SqlIncrementalCache {
pub fn new(conn: CacheDB, state_hash: u64) -> Self { pub fn new(conn: CacheDB, state_hash: CacheDBHash) -> Self {
Self { conn, state_hash } Self { conn, state_hash }
} }
pub fn get_source_hash(&self, path: &Path) -> Option<u64> { pub fn get_source_hash(&self, path: &Path) -> Option<CacheDBHash> {
match self.get_source_hash_result(path) { match self.get_source_hash_result(path) {
Ok(option) => option, Ok(option) => option,
Err(err) => { Err(err) => {
@ -171,7 +170,7 @@ impl SqlIncrementalCache {
fn get_source_hash_result( fn get_source_hash_result(
&self, &self,
path: &Path, path: &Path,
) -> Result<Option<u64>, AnyError> { ) -> Result<Option<CacheDBHash>, AnyError> {
let query = " let query = "
SELECT SELECT
source_hash source_hash
@ -183,10 +182,10 @@ impl SqlIncrementalCache {
LIMIT 1"; LIMIT 1";
let res = self.conn.query_row( let res = self.conn.query_row(
query, query,
params![path.to_string_lossy(), self.state_hash.to_string()], params![path.to_string_lossy(), self.state_hash],
|row| { |row| {
let hash: String = row.get(0)?; let hash: CacheDBHash = row.get(0)?;
Ok(hash.parse::<u64>()?) Ok(hash)
}, },
)?; )?;
Ok(res) Ok(res)
@ -195,7 +194,7 @@ impl SqlIncrementalCache {
pub fn set_source_hash( pub fn set_source_hash(
&self, &self,
path: &Path, path: &Path,
source_hash: u64, source_hash: CacheDBHash,
) -> Result<(), AnyError> { ) -> Result<(), AnyError> {
let sql = " let sql = "
INSERT OR REPLACE INTO INSERT OR REPLACE INTO
@ -204,11 +203,7 @@ impl SqlIncrementalCache {
(?1, ?2, ?3)"; (?1, ?2, ?3)";
self.conn.execute( self.conn.execute(
sql, sql,
params![ params![path.to_string_lossy(), self.state_hash, source_hash],
path.to_string_lossy(),
&self.state_hash.to_string(),
&source_hash,
],
)?; )?;
Ok(()) Ok(())
} }
@ -223,51 +218,51 @@ mod test {
#[test] #[test]
pub fn sql_cache_general_use() { pub fn sql_cache_general_use() {
let conn = CacheDB::in_memory(&INCREMENTAL_CACHE_DB, "1.0.0"); let conn = CacheDB::in_memory(&INCREMENTAL_CACHE_DB, "1.0.0");
let cache = SqlIncrementalCache::new(conn, 1); let cache = SqlIncrementalCache::new(conn, CacheDBHash::new(1));
let path = PathBuf::from("/mod.ts"); let path = PathBuf::from("/mod.ts");
assert_eq!(cache.get_source_hash(&path), None); assert_eq!(cache.get_source_hash(&path), None);
cache.set_source_hash(&path, 2).unwrap(); cache.set_source_hash(&path, CacheDBHash::new(2)).unwrap();
assert_eq!(cache.get_source_hash(&path), Some(2)); assert_eq!(cache.get_source_hash(&path), Some(CacheDBHash::new(2)));
// try changing the cli version (should clear) // try changing the cli version (should clear)
let conn = cache.conn.recreate_with_version("2.0.0"); let conn = cache.conn.recreate_with_version("2.0.0");
let mut cache = SqlIncrementalCache::new(conn, 1); let mut cache = SqlIncrementalCache::new(conn, CacheDBHash::new(1));
assert_eq!(cache.get_source_hash(&path), None); assert_eq!(cache.get_source_hash(&path), None);
// add back the file to the cache // add back the file to the cache
cache.set_source_hash(&path, 2).unwrap(); cache.set_source_hash(&path, CacheDBHash::new(2)).unwrap();
assert_eq!(cache.get_source_hash(&path), Some(2)); assert_eq!(cache.get_source_hash(&path), Some(CacheDBHash::new(2)));
// try changing the state hash // try changing the state hash
cache.state_hash = 2; cache.state_hash = CacheDBHash::new(2);
assert_eq!(cache.get_source_hash(&path), None); assert_eq!(cache.get_source_hash(&path), None);
cache.state_hash = 1; cache.state_hash = CacheDBHash::new(1);
// should return now that everything is back // should return now that everything is back
assert_eq!(cache.get_source_hash(&path), Some(2)); assert_eq!(cache.get_source_hash(&path), Some(CacheDBHash::new(2)));
// recreating the cache should not remove the data because the CLI version and state hash is the same // recreating the cache should not remove the data because the CLI version and state hash is the same
let conn = cache.conn.recreate_with_version("2.0.0"); let conn = cache.conn.recreate_with_version("2.0.0");
let cache = SqlIncrementalCache::new(conn, 1); let cache = SqlIncrementalCache::new(conn, CacheDBHash::new(1));
assert_eq!(cache.get_source_hash(&path), Some(2)); assert_eq!(cache.get_source_hash(&path), Some(CacheDBHash::new(2)));
// now try replacing and using another path // now try replacing and using another path
cache.set_source_hash(&path, 3).unwrap(); cache.set_source_hash(&path, CacheDBHash::new(3)).unwrap();
cache.set_source_hash(&path, 4).unwrap(); cache.set_source_hash(&path, CacheDBHash::new(4)).unwrap();
let path2 = PathBuf::from("/mod2.ts"); let path2 = PathBuf::from("/mod2.ts");
cache.set_source_hash(&path2, 5).unwrap(); cache.set_source_hash(&path2, CacheDBHash::new(5)).unwrap();
assert_eq!(cache.get_source_hash(&path), Some(4)); assert_eq!(cache.get_source_hash(&path), Some(CacheDBHash::new(4)));
assert_eq!(cache.get_source_hash(&path2), Some(5)); assert_eq!(cache.get_source_hash(&path2), Some(CacheDBHash::new(5)));
} }
#[tokio::test] #[tokio::test]
pub async fn incremental_cache_general_use() { pub async fn incremental_cache_general_use() {
let conn = CacheDB::in_memory(&INCREMENTAL_CACHE_DB, "1.0.0"); let conn = CacheDB::in_memory(&INCREMENTAL_CACHE_DB, "1.0.0");
let sql_cache = SqlIncrementalCache::new(conn, 1); let sql_cache = SqlIncrementalCache::new(conn, CacheDBHash::new(1));
let file_path = PathBuf::from("/mod.ts"); let file_path = PathBuf::from("/mod.ts");
let file_text = "test"; let file_text = "test";
let file_hash = FastInsecureHasher::hash(file_text); let file_hash = CacheDBHash::from_source(file_text);
sql_cache.set_source_hash(&file_path, file_hash).unwrap(); sql_cache.set_source_hash(&file_path, file_hash).unwrap();
let cache = IncrementalCacheInner::from_sql_incremental_cache( let cache = IncrementalCacheInner::from_sql_incremental_cache(
sql_cache, sql_cache,

360
cli/cache/mod.rs vendored
View file

@ -1,9 +1,23 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use crate::args::jsr_url;
use crate::args::CacheSetting;
use crate::errors::get_error_class_name; use crate::errors::get_error_class_name;
use crate::file_fetcher::FetchNoFollowOptions;
use crate::file_fetcher::FetchOptions;
use crate::file_fetcher::FetchPermissionsOptionRef;
use crate::file_fetcher::FileFetcher; use crate::file_fetcher::FileFetcher;
use crate::util::fs::atomic_write_file; use crate::file_fetcher::FileOrRedirect;
use crate::npm::CliNpmResolver;
use crate::resolver::CliNodeResolver;
use crate::util::fs::atomic_write_file_with_retries;
use crate::util::fs::atomic_write_file_with_retries_and_fs;
use crate::util::fs::AtomicWriteFileFsAdapter;
use crate::util::path::specifier_has_extension;
use crate::util::text_encoding::arc_str_to_bytes;
use crate::util::text_encoding::from_utf8_lossy_owned;
use deno_ast::MediaType;
use deno_core::futures; use deno_core::futures;
use deno_core::futures::FutureExt; use deno_core::futures::FutureExt;
use deno_core::ModuleSpecifier; use deno_core::ModuleSpecifier;
@ -11,7 +25,7 @@ use deno_graph::source::CacheInfo;
use deno_graph::source::LoadFuture; use deno_graph::source::LoadFuture;
use deno_graph::source::LoadResponse; use deno_graph::source::LoadResponse;
use deno_graph::source::Loader; use deno_graph::source::Loader;
use deno_runtime::permissions::PermissionsContainer; use deno_runtime::deno_permissions::PermissionsContainer;
use std::collections::HashMap; use std::collections::HashMap;
use std::path::Path; use std::path::Path;
use std::path::PathBuf; use std::path::PathBuf;
@ -21,23 +35,33 @@ use std::time::SystemTime;
mod cache_db; mod cache_db;
mod caches; mod caches;
mod check; mod check;
mod code_cache;
mod common; mod common;
mod deno_dir; mod deno_dir;
mod disk_cache; mod disk_cache;
mod emit; mod emit;
mod fast_check;
mod incremental; mod incremental;
mod module_info;
mod node; mod node;
mod parsed_source; mod parsed_source;
pub use cache_db::CacheDBHash;
pub use caches::Caches; pub use caches::Caches;
pub use check::TypeCheckCache; pub use check::TypeCheckCache;
pub use code_cache::CodeCache;
pub use common::FastInsecureHasher; pub use common::FastInsecureHasher;
pub use deno_dir::dirs::home_dir;
pub use deno_dir::DenoDir; pub use deno_dir::DenoDir;
pub use deno_dir::DenoDirProvider; pub use deno_dir::DenoDirProvider;
pub use disk_cache::DiskCache; pub use disk_cache::DiskCache;
pub use emit::EmitCache; pub use emit::EmitCache;
pub use fast_check::FastCheckCache;
pub use incremental::IncrementalCache; pub use incremental::IncrementalCache;
pub use module_info::ModuleInfoCache;
pub use node::NodeAnalysisCache; pub use node::NodeAnalysisCache;
pub use parsed_source::EsmOrCjsChecker;
pub use parsed_source::LazyGraphSourceParser;
pub use parsed_source::ParsedSourceCache; pub use parsed_source::ParsedSourceCache;
/// Permissions used to save a file in the disk caches. /// Permissions used to save a file in the disk caches.
@ -47,12 +71,8 @@ pub const CACHE_PERM: u32 = 0o644;
pub struct RealDenoCacheEnv; pub struct RealDenoCacheEnv;
impl deno_cache_dir::DenoCacheEnv for RealDenoCacheEnv { impl deno_cache_dir::DenoCacheEnv for RealDenoCacheEnv {
fn read_file_bytes(&self, path: &Path) -> std::io::Result<Option<Vec<u8>>> { fn read_file_bytes(&self, path: &Path) -> std::io::Result<Vec<u8>> {
match std::fs::read(path) { std::fs::read(path)
Ok(s) => Ok(Some(s)),
Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(None),
Err(err) => Err(err),
}
} }
fn atomic_write_file( fn atomic_write_file(
@ -60,7 +80,15 @@ impl deno_cache_dir::DenoCacheEnv for RealDenoCacheEnv {
path: &Path, path: &Path,
bytes: &[u8], bytes: &[u8],
) -> std::io::Result<()> { ) -> std::io::Result<()> {
atomic_write_file(path, bytes, CACHE_PERM) atomic_write_file_with_retries(path, bytes, CACHE_PERM)
}
fn canonicalize_path(&self, path: &Path) -> std::io::Result<PathBuf> {
crate::util::fs::canonicalize_path(path)
}
fn create_dir_all(&self, path: &Path) -> std::io::Result<()> {
std::fs::create_dir_all(path)
} }
fn modified(&self, path: &Path) -> std::io::Result<Option<SystemTime>> { fn modified(&self, path: &Path) -> std::io::Result<Option<SystemTime>> {
@ -82,42 +110,118 @@ impl deno_cache_dir::DenoCacheEnv for RealDenoCacheEnv {
} }
} }
#[derive(Debug, Clone)]
pub struct DenoCacheEnvFsAdapter<'a>(
pub &'a dyn deno_runtime::deno_fs::FileSystem,
);
impl<'a> deno_cache_dir::DenoCacheEnv for DenoCacheEnvFsAdapter<'a> {
fn read_file_bytes(&self, path: &Path) -> std::io::Result<Vec<u8>> {
self
.0
.read_file_sync(path, None)
.map_err(|err| err.into_io_error())
}
fn atomic_write_file(
&self,
path: &Path,
bytes: &[u8],
) -> std::io::Result<()> {
atomic_write_file_with_retries_and_fs(
&AtomicWriteFileFsAdapter {
fs: self.0,
write_mode: CACHE_PERM,
},
path,
bytes,
)
}
fn canonicalize_path(&self, path: &Path) -> std::io::Result<PathBuf> {
self.0.realpath_sync(path).map_err(|e| e.into_io_error())
}
fn create_dir_all(&self, path: &Path) -> std::io::Result<()> {
self
.0
.mkdir_sync(path, true, None)
.map_err(|e| e.into_io_error())
}
fn modified(&self, path: &Path) -> std::io::Result<Option<SystemTime>> {
self
.0
.stat_sync(path)
.map(|stat| {
stat
.mtime
.map(|ts| SystemTime::UNIX_EPOCH + std::time::Duration::from_secs(ts))
})
.map_err(|e| e.into_io_error())
}
fn is_file(&self, path: &Path) -> bool {
self.0.is_file_sync(path)
}
fn time_now(&self) -> SystemTime {
SystemTime::now()
}
}
pub type GlobalHttpCache = deno_cache_dir::GlobalHttpCache<RealDenoCacheEnv>; pub type GlobalHttpCache = deno_cache_dir::GlobalHttpCache<RealDenoCacheEnv>;
pub type LocalHttpCache = deno_cache_dir::LocalHttpCache<RealDenoCacheEnv>; pub type LocalHttpCache = deno_cache_dir::LocalHttpCache<RealDenoCacheEnv>;
pub type LocalLspHttpCache = pub type LocalLspHttpCache =
deno_cache_dir::LocalLspHttpCache<RealDenoCacheEnv>; deno_cache_dir::LocalLspHttpCache<RealDenoCacheEnv>;
pub use deno_cache_dir::CachedUrlMetadata;
pub use deno_cache_dir::HttpCache; pub use deno_cache_dir::HttpCache;
pub struct FetchCacherOptions {
pub file_header_overrides: HashMap<ModuleSpecifier, HashMap<String, String>>,
pub permissions: PermissionsContainer,
/// If we're publishing for `deno publish`.
pub is_deno_publish: bool,
pub unstable_detect_cjs: bool,
}
/// A "wrapper" for the FileFetcher and DiskCache for the Deno CLI that provides /// A "wrapper" for the FileFetcher and DiskCache for the Deno CLI that provides
/// a concise interface to the DENO_DIR when building module graphs. /// a concise interface to the DENO_DIR when building module graphs.
pub struct FetchCacher { pub struct FetchCacher {
emit_cache: EmitCache, pub file_header_overrides: HashMap<ModuleSpecifier, HashMap<String, String>>,
esm_or_cjs_checker: Arc<EsmOrCjsChecker>,
file_fetcher: Arc<FileFetcher>, file_fetcher: Arc<FileFetcher>,
file_header_overrides: HashMap<ModuleSpecifier, HashMap<String, String>>,
global_http_cache: Arc<GlobalHttpCache>, global_http_cache: Arc<GlobalHttpCache>,
node_resolver: Arc<CliNodeResolver>,
npm_resolver: Arc<dyn CliNpmResolver>,
module_info_cache: Arc<ModuleInfoCache>,
permissions: PermissionsContainer, permissions: PermissionsContainer,
is_deno_publish: bool,
unstable_detect_cjs: bool,
cache_info_enabled: bool, cache_info_enabled: bool,
maybe_local_node_modules_url: Option<ModuleSpecifier>,
} }
impl FetchCacher { impl FetchCacher {
pub fn new( pub fn new(
emit_cache: EmitCache, esm_or_cjs_checker: Arc<EsmOrCjsChecker>,
file_fetcher: Arc<FileFetcher>, file_fetcher: Arc<FileFetcher>,
file_header_overrides: HashMap<ModuleSpecifier, HashMap<String, String>>,
global_http_cache: Arc<GlobalHttpCache>, global_http_cache: Arc<GlobalHttpCache>,
permissions: PermissionsContainer, node_resolver: Arc<CliNodeResolver>,
maybe_local_node_modules_url: Option<ModuleSpecifier>, npm_resolver: Arc<dyn CliNpmResolver>,
module_info_cache: Arc<ModuleInfoCache>,
options: FetchCacherOptions,
) -> Self { ) -> Self {
Self { Self {
emit_cache,
file_fetcher, file_fetcher,
file_header_overrides, esm_or_cjs_checker,
global_http_cache, global_http_cache,
permissions, node_resolver,
npm_resolver,
module_info_cache,
file_header_overrides: options.file_header_overrides,
permissions: options.permissions,
is_deno_publish: options.is_deno_publish,
unstable_detect_cjs: options.unstable_detect_cjs,
cache_info_enabled: false, cache_info_enabled: false,
maybe_local_node_modules_url,
} }
} }
@ -127,15 +231,7 @@ impl FetchCacher {
self.cache_info_enabled = true; self.cache_info_enabled = true;
} }
// DEPRECATED: Where the file is stored and how it's stored should be an implementation /// Only use this for `deno info`.
// detail of the cache.
//
// todo(dsheret): remove once implementing
// * https://github.com/denoland/deno/issues/17707
// * https://github.com/denoland/deno/issues/17703
#[deprecated(
note = "There should not be a way to do this because the file may not be cached at a local path in the future."
)]
fn get_local_path(&self, specifier: &ModuleSpecifier) -> Option<PathBuf> { fn get_local_path(&self, specifier: &ModuleSpecifier) -> Option<PathBuf> {
// TODO(@kitsonk) fix when deno_graph does not query cache for synthetic // TODO(@kitsonk) fix when deno_graph does not query cache for synthetic
// modules // modules
@ -162,78 +258,198 @@ impl Loader for FetchCacher {
#[allow(deprecated)] #[allow(deprecated)]
let local = self.get_local_path(specifier)?; let local = self.get_local_path(specifier)?;
if local.is_file() { if local.is_file() {
let emit = self Some(CacheInfo { local: Some(local) })
.emit_cache
.get_emit_filepath(specifier)
.filter(|p| p.is_file());
Some(CacheInfo {
local: Some(local),
emit,
map: None,
})
} else { } else {
None None
} }
} }
fn load( fn load(
&mut self, &self,
specifier: &ModuleSpecifier, specifier: &ModuleSpecifier,
_is_dynamic: bool, options: deno_graph::source::LoadOptions,
) -> LoadFuture { ) -> LoadFuture {
if let Some(node_modules_url) = self.maybe_local_node_modules_url.as_ref() { use deno_graph::source::CacheSetting as LoaderCacheSetting;
// The specifier might be in a completely different symlinked tree than
// what the resolved node_modules_url is in (ex. `/my-project-1/node_modules` if specifier.scheme() == "file" {
// symlinked to `/my-project-2/node_modules`), so first check if the path
// is in a node_modules dir to avoid needlessly canonicalizing, then compare
// against the canonicalized specifier.
if specifier.path().contains("/node_modules/") { if specifier.path().contains("/node_modules/") {
// The specifier might be in a completely different symlinked tree than
// what the node_modules url is in (ex. `/my-project-1/node_modules`
// symlinked to `/my-project-2/node_modules`), so first we checked if the path
// is in a node_modules dir to avoid needlessly canonicalizing, then now compare
// against the canonicalized specifier.
let specifier = let specifier =
crate::node::resolve_specifier_into_node_modules(specifier); crate::node::resolve_specifier_into_node_modules(specifier);
if specifier.as_str().starts_with(node_modules_url.as_str()) { if self.npm_resolver.in_npm_package(&specifier) {
return Box::pin(futures::future::ready(Ok(Some( return Box::pin(futures::future::ready(Ok(Some(
LoadResponse::External { specifier }, LoadResponse::External { specifier },
)))); ))));
} }
} }
// make local CJS modules external to the graph
if specifier_has_extension(specifier, "cjs") {
return Box::pin(futures::future::ready(Ok(Some(
LoadResponse::External {
specifier: specifier.clone(),
},
))));
}
if self.unstable_detect_cjs && specifier_has_extension(specifier, "js") {
if let Ok(Some(pkg_json)) =
self.node_resolver.get_closest_package_json(specifier)
{
if pkg_json.typ == "commonjs" {
if let Ok(path) = specifier.to_file_path() {
if let Ok(bytes) = std::fs::read(&path) {
let text: Arc<str> = from_utf8_lossy_owned(bytes).into();
let is_es_module = match self.esm_or_cjs_checker.is_esm(
specifier,
text.clone(),
MediaType::JavaScript,
) {
Ok(value) => value,
Err(err) => {
return Box::pin(futures::future::ready(Err(err.into())));
}
};
if !is_es_module {
self.node_resolver.mark_cjs_resolution(specifier.clone());
return Box::pin(futures::future::ready(Ok(Some(
LoadResponse::External {
specifier: specifier.clone(),
},
))));
} else {
return Box::pin(futures::future::ready(Ok(Some(
LoadResponse::Module {
specifier: specifier.clone(),
content: arc_str_to_bytes(text),
maybe_headers: None,
},
))));
}
}
}
}
}
}
}
if self.is_deno_publish
&& matches!(specifier.scheme(), "http" | "https")
&& !specifier.as_str().starts_with(jsr_url().as_str())
{
// mark non-JSR remote modules as external so we don't need --allow-import
// permissions as these will error out later when publishing
return Box::pin(futures::future::ready(Ok(Some(
LoadResponse::External {
specifier: specifier.clone(),
},
))));
} }
let permissions = self.permissions.clone();
let file_fetcher = self.file_fetcher.clone(); let file_fetcher = self.file_fetcher.clone();
let file_header_overrides = self.file_header_overrides.clone(); let file_header_overrides = self.file_header_overrides.clone();
let permissions = self.permissions.clone();
let specifier = specifier.clone(); let specifier = specifier.clone();
let is_statically_analyzable = !options.was_dynamic_root;
async move { async move {
let maybe_cache_setting = match options.cache_setting {
LoaderCacheSetting::Use => None,
LoaderCacheSetting::Reload => {
if matches!(file_fetcher.cache_setting(), CacheSetting::Only) {
return Err(deno_core::anyhow::anyhow!(
"Could not resolve version constraint using only cached data. Try running again without --cached-only"
));
}
Some(CacheSetting::ReloadAll)
}
LoaderCacheSetting::Only => Some(CacheSetting::Only),
};
file_fetcher file_fetcher
.fetch(&specifier, permissions) .fetch_no_follow_with_options(FetchNoFollowOptions {
fetch_options: FetchOptions {
specifier: &specifier,
permissions: if is_statically_analyzable {
FetchPermissionsOptionRef::StaticContainer(&permissions)
} else {
FetchPermissionsOptionRef::DynamicContainer(&permissions)
},
maybe_auth: None,
maybe_accept: None,
maybe_cache_setting: maybe_cache_setting.as_ref(),
},
maybe_checksum: options.maybe_checksum.as_ref(),
})
.await .await
.map(|file| { .map(|file_or_redirect| {
let maybe_headers = match file_or_redirect {
match (file.maybe_headers, file_header_overrides.get(&specifier)) { FileOrRedirect::File(file) => {
(Some(headers), Some(overrides)) => { let maybe_headers =
Some(headers.into_iter().chain(overrides.clone()).collect()) match (file.maybe_headers, file_header_overrides.get(&specifier)) {
} (Some(headers), Some(overrides)) => {
(Some(headers), None) => Some(headers), Some(headers.into_iter().chain(overrides.clone()).collect())
(None, Some(overrides)) => Some(overrides.clone()), }
(None, None) => None, (Some(headers), None) => Some(headers),
}; (None, Some(overrides)) => Some(overrides.clone()),
Ok(Some(LoadResponse::Module { (None, None) => None,
specifier: file.specifier, };
maybe_headers, Ok(Some(LoadResponse::Module {
content: file.source, specifier: file.specifier,
})) maybe_headers,
content: file.source,
}))
},
FileOrRedirect::Redirect(redirect_specifier) => {
Ok(Some(LoadResponse::Redirect {
specifier: redirect_specifier,
}))
},
}
}) })
.unwrap_or_else(|err| { .unwrap_or_else(|err| {
if let Some(err) = err.downcast_ref::<std::io::Error>() { if let Some(io_err) = err.downcast_ref::<std::io::Error>() {
if err.kind() == std::io::ErrorKind::NotFound { if io_err.kind() == std::io::ErrorKind::NotFound {
return Ok(None); return Ok(None);
} else {
return Err(err);
} }
} else if get_error_class_name(&err) == "NotFound" {
return Ok(None);
} }
Err(err) let error_class_name = get_error_class_name(&err);
match error_class_name {
"NotFound" => Ok(None),
"NotCached" if options.cache_setting == LoaderCacheSetting::Only => Ok(None),
_ => Err(err),
}
}) })
} }
.boxed() .boxed_local()
}
fn cache_module_info(
&self,
specifier: &ModuleSpecifier,
media_type: MediaType,
source: &Arc<[u8]>,
module_info: &deno_graph::ModuleInfo,
) {
log::debug!("Caching module info for {}", specifier);
let source_hash = CacheDBHash::from_source(source);
let result = self.module_info_cache.set_module_info(
specifier,
media_type,
source_hash,
module_info,
);
if let Err(err) = result {
log::debug!(
"Error saving module cache info for {}. {:#}",
specifier,
err
);
}
} }
} }

331
cli/cache/module_info.rs vendored Normal file
View file

@ -0,0 +1,331 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use std::sync::Arc;
use deno_ast::MediaType;
use deno_ast::ModuleSpecifier;
use deno_core::error::AnyError;
use deno_core::serde_json;
use deno_graph::ModuleInfo;
use deno_graph::ParserModuleAnalyzer;
use deno_runtime::deno_webstorage::rusqlite::params;
use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheDBHash;
use super::cache_db::CacheFailure;
use super::ParsedSourceCache;
const SELECT_MODULE_INFO: &str = "
SELECT
module_info
FROM
moduleinfocache
WHERE
specifier=?1
AND media_type=?2
AND source_hash=?3
LIMIT 1";
pub static MODULE_INFO_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: concat!(
"CREATE TABLE IF NOT EXISTS moduleinfocache (",
"specifier TEXT PRIMARY KEY,",
"media_type INTEGER NOT NULL,",
"source_hash INTEGER NOT NULL,",
"module_info TEXT NOT NULL",
");"
),
on_version_change: "DELETE FROM moduleinfocache;",
preheat_queries: &[SELECT_MODULE_INFO],
on_failure: CacheFailure::InMemory,
};
/// A cache of `deno_graph::ModuleInfo` objects. Using this leads to a considerable
/// performance improvement because when it exists we can skip parsing a module for
/// deno_graph.
pub struct ModuleInfoCache {
conn: CacheDB,
}
impl ModuleInfoCache {
#[cfg(test)]
pub fn new_in_memory(version: &'static str) -> Self {
Self::new(CacheDB::in_memory(&MODULE_INFO_CACHE_DB, version))
}
pub fn new(conn: CacheDB) -> Self {
Self { conn }
}
/// Useful for testing: re-create this cache DB with a different current version.
#[cfg(test)]
pub(crate) fn recreate_with_version(self, version: &'static str) -> Self {
Self {
conn: self.conn.recreate_with_version(version),
}
}
pub fn get_module_info(
&self,
specifier: &ModuleSpecifier,
media_type: MediaType,
expected_source_hash: CacheDBHash,
) -> Result<Option<ModuleInfo>, AnyError> {
let query = SELECT_MODULE_INFO;
let res = self.conn.query_row(
query,
params![
&specifier.as_str(),
serialize_media_type(media_type),
expected_source_hash,
],
|row| {
let module_info: String = row.get(0)?;
let module_info = serde_json::from_str(&module_info)?;
Ok(module_info)
},
)?;
Ok(res)
}
pub fn set_module_info(
&self,
specifier: &ModuleSpecifier,
media_type: MediaType,
source_hash: CacheDBHash,
module_info: &ModuleInfo,
) -> Result<(), AnyError> {
let sql = "
INSERT OR REPLACE INTO
moduleinfocache (specifier, media_type, source_hash, module_info)
VALUES
(?1, ?2, ?3, ?4)";
self.conn.execute(
sql,
params![
specifier.as_str(),
serialize_media_type(media_type),
source_hash,
&serde_json::to_string(&module_info)?,
],
)?;
Ok(())
}
pub fn as_module_analyzer<'a>(
&'a self,
parsed_source_cache: &'a Arc<ParsedSourceCache>,
) -> ModuleInfoCacheModuleAnalyzer<'a> {
ModuleInfoCacheModuleAnalyzer {
module_info_cache: self,
parsed_source_cache,
}
}
}
pub struct ModuleInfoCacheModuleAnalyzer<'a> {
module_info_cache: &'a ModuleInfoCache,
parsed_source_cache: &'a Arc<ParsedSourceCache>,
}
#[async_trait::async_trait(?Send)]
impl<'a> deno_graph::ModuleAnalyzer for ModuleInfoCacheModuleAnalyzer<'a> {
async fn analyze(
&self,
specifier: &ModuleSpecifier,
source: Arc<str>,
media_type: MediaType,
) -> Result<ModuleInfo, deno_ast::ParseDiagnostic> {
// attempt to load from the cache
let source_hash = CacheDBHash::from_source(&source);
match self.module_info_cache.get_module_info(
specifier,
media_type,
source_hash,
) {
Ok(Some(info)) => return Ok(info),
Ok(None) => {}
Err(err) => {
log::debug!(
"Error loading module cache info for {}. {:#}",
specifier,
err
);
}
}
// otherwise, get the module info from the parsed source cache
let module_info = deno_core::unsync::spawn_blocking({
let cache = self.parsed_source_cache.clone();
let specifier = specifier.clone();
move || {
let parser = cache.as_capturing_parser();
let analyzer = ParserModuleAnalyzer::new(&parser);
analyzer.analyze_sync(&specifier, source, media_type)
}
})
.await
.unwrap()?;
// then attempt to cache it
if let Err(err) = self.module_info_cache.set_module_info(
specifier,
media_type,
source_hash,
&module_info,
) {
log::debug!(
"Error saving module cache info for {}. {:#}",
specifier,
err
);
}
Ok(module_info)
}
}
fn serialize_media_type(media_type: MediaType) -> i64 {
use MediaType::*;
match media_type {
JavaScript => 1,
Jsx => 2,
Mjs => 3,
Cjs => 4,
TypeScript => 5,
Mts => 6,
Cts => 7,
Dts => 8,
Dmts => 9,
Dcts => 10,
Tsx => 11,
Json => 12,
Wasm => 13,
TsBuildInfo => 14,
SourceMap => 15,
Unknown => 16,
}
}
#[cfg(test)]
mod test {
use deno_graph::PositionRange;
use deno_graph::SpecifierWithRange;
use super::*;
#[test]
pub fn module_info_cache_general_use() {
let cache = ModuleInfoCache::new_in_memory("1.0.0");
let specifier1 =
ModuleSpecifier::parse("https://localhost/mod.ts").unwrap();
let specifier2 =
ModuleSpecifier::parse("https://localhost/mod2.ts").unwrap();
assert_eq!(
cache
.get_module_info(
&specifier1,
MediaType::JavaScript,
CacheDBHash::new(1)
)
.unwrap(),
None
);
let mut module_info = ModuleInfo::default();
module_info.jsdoc_imports.push(SpecifierWithRange {
range: PositionRange {
start: deno_graph::Position {
line: 0,
character: 3,
},
end: deno_graph::Position {
line: 1,
character: 2,
},
},
text: "test".to_string(),
});
cache
.set_module_info(
&specifier1,
MediaType::JavaScript,
CacheDBHash::new(1),
&module_info,
)
.unwrap();
assert_eq!(
cache
.get_module_info(
&specifier1,
MediaType::JavaScript,
CacheDBHash::new(1)
)
.unwrap(),
Some(module_info.clone())
);
assert_eq!(
cache
.get_module_info(
&specifier2,
MediaType::JavaScript,
CacheDBHash::new(1)
)
.unwrap(),
None,
);
// different media type
assert_eq!(
cache
.get_module_info(
&specifier1,
MediaType::TypeScript,
CacheDBHash::new(1)
)
.unwrap(),
None,
);
// different source hash
assert_eq!(
cache
.get_module_info(
&specifier1,
MediaType::JavaScript,
CacheDBHash::new(2)
)
.unwrap(),
None,
);
// try recreating with the same version
let cache = cache.recreate_with_version("1.0.0");
// should get it
assert_eq!(
cache
.get_module_info(
&specifier1,
MediaType::JavaScript,
CacheDBHash::new(1)
)
.unwrap(),
Some(module_info)
);
// try recreating with a different version
let cache = cache.recreate_with_version("1.0.1");
// should no longer exist
assert_eq!(
cache
.get_module_info(
&specifier1,
MediaType::JavaScript,
CacheDBHash::new(1)
)
.unwrap(),
None,
);
}
}

82
cli/cache/node.rs vendored
View file

@ -1,22 +1,25 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use deno_ast::CjsAnalysis;
use deno_core::error::AnyError; use deno_core::error::AnyError;
use deno_core::serde_json; use deno_core::serde_json;
use deno_runtime::deno_webstorage::rusqlite::params; use deno_runtime::deno_webstorage::rusqlite::params;
use crate::node::CliCjsAnalysis;
use super::cache_db::CacheDB; use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration; use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheFailure; use super::cache_db::CacheFailure;
use super::FastInsecureHasher; use super::CacheDBHash;
pub static NODE_ANALYSIS_CACHE_DB: CacheDBConfiguration = pub static NODE_ANALYSIS_CACHE_DB: CacheDBConfiguration =
CacheDBConfiguration { CacheDBConfiguration {
table_initializer: "CREATE TABLE IF NOT EXISTS cjsanalysiscache ( table_initializer: concat!(
specifier TEXT PRIMARY KEY, "CREATE TABLE IF NOT EXISTS cjsanalysiscache (",
source_hash TEXT NOT NULL, "specifier TEXT PRIMARY KEY,",
data TEXT NOT NULL "source_hash INTEGER NOT NULL,",
);", "data TEXT NOT NULL",
");"
),
on_version_change: "DELETE FROM cjsanalysiscache;", on_version_change: "DELETE FROM cjsanalysiscache;",
preheat_queries: &[], preheat_queries: &[],
on_failure: CacheFailure::InMemory, on_failure: CacheFailure::InMemory,
@ -34,10 +37,6 @@ impl NodeAnalysisCache {
} }
} }
pub fn compute_source_hash(text: &str) -> String {
FastInsecureHasher::hash(text).to_string()
}
fn ensure_ok<T: Default>(res: Result<T, AnyError>) -> T { fn ensure_ok<T: Default>(res: Result<T, AnyError>) -> T {
match res { match res {
Ok(x) => x, Ok(x) => x,
@ -58,8 +57,8 @@ impl NodeAnalysisCache {
pub fn get_cjs_analysis( pub fn get_cjs_analysis(
&self, &self,
specifier: &str, specifier: &str,
expected_source_hash: &str, expected_source_hash: CacheDBHash,
) -> Option<CjsAnalysis> { ) -> Option<CliCjsAnalysis> {
Self::ensure_ok( Self::ensure_ok(
self.inner.get_cjs_analysis(specifier, expected_source_hash), self.inner.get_cjs_analysis(specifier, expected_source_hash),
) )
@ -68,8 +67,8 @@ impl NodeAnalysisCache {
pub fn set_cjs_analysis( pub fn set_cjs_analysis(
&self, &self,
specifier: &str, specifier: &str,
source_hash: &str, source_hash: CacheDBHash,
cjs_analysis: &CjsAnalysis, cjs_analysis: &CliCjsAnalysis,
) { ) {
Self::ensure_ok(self.inner.set_cjs_analysis( Self::ensure_ok(self.inner.set_cjs_analysis(
specifier, specifier,
@ -92,8 +91,8 @@ impl NodeAnalysisCacheInner {
pub fn get_cjs_analysis( pub fn get_cjs_analysis(
&self, &self,
specifier: &str, specifier: &str,
expected_source_hash: &str, expected_source_hash: CacheDBHash,
) -> Result<Option<CjsAnalysis>, AnyError> { ) -> Result<Option<CliCjsAnalysis>, AnyError> {
let query = " let query = "
SELECT SELECT
data data
@ -105,7 +104,7 @@ impl NodeAnalysisCacheInner {
LIMIT 1"; LIMIT 1";
let res = self.conn.query_row( let res = self.conn.query_row(
query, query,
params![specifier, &expected_source_hash], params![specifier, expected_source_hash],
|row| { |row| {
let analysis_info: String = row.get(0)?; let analysis_info: String = row.get(0)?;
Ok(serde_json::from_str(&analysis_info)?) Ok(serde_json::from_str(&analysis_info)?)
@ -117,8 +116,8 @@ impl NodeAnalysisCacheInner {
pub fn set_cjs_analysis( pub fn set_cjs_analysis(
&self, &self,
specifier: &str, specifier: &str,
source_hash: &str, source_hash: CacheDBHash,
cjs_analysis: &CjsAnalysis, cjs_analysis: &CliCjsAnalysis,
) -> Result<(), AnyError> { ) -> Result<(), AnyError> {
let sql = " let sql = "
INSERT OR REPLACE INTO INSERT OR REPLACE INTO
@ -129,7 +128,7 @@ impl NodeAnalysisCacheInner {
sql, sql,
params![ params![
specifier, specifier,
&source_hash.to_string(), source_hash,
&serde_json::to_string(&cjs_analysis)?, &serde_json::to_string(&cjs_analysis)?,
], ],
)?; )?;
@ -146,36 +145,47 @@ mod test {
let conn = CacheDB::in_memory(&NODE_ANALYSIS_CACHE_DB, "1.0.0"); let conn = CacheDB::in_memory(&NODE_ANALYSIS_CACHE_DB, "1.0.0");
let cache = NodeAnalysisCacheInner::new(conn); let cache = NodeAnalysisCacheInner::new(conn);
assert!(cache.get_cjs_analysis("file.js", "2").unwrap().is_none()); assert!(cache
let cjs_analysis = CjsAnalysis { .get_cjs_analysis("file.js", CacheDBHash::new(2))
.unwrap()
.is_none());
let cjs_analysis = CliCjsAnalysis::Cjs {
exports: vec!["export1".to_string()], exports: vec!["export1".to_string()],
reexports: vec!["re-export1".to_string()], reexports: vec!["re-export1".to_string()],
}; };
cache cache
.set_cjs_analysis("file.js", "2", &cjs_analysis) .set_cjs_analysis("file.js", CacheDBHash::new(2), &cjs_analysis)
.unwrap(); .unwrap();
assert!(cache.get_cjs_analysis("file.js", "3").unwrap().is_none()); // different hash assert!(cache
let actual_cjs_analysis = .get_cjs_analysis("file.js", CacheDBHash::new(3))
cache.get_cjs_analysis("file.js", "2").unwrap().unwrap(); .unwrap()
assert_eq!(actual_cjs_analysis.exports, cjs_analysis.exports); .is_none()); // different hash
assert_eq!(actual_cjs_analysis.reexports, cjs_analysis.reexports); let actual_cjs_analysis = cache
.get_cjs_analysis("file.js", CacheDBHash::new(2))
.unwrap()
.unwrap();
assert_eq!(actual_cjs_analysis, cjs_analysis);
// adding when already exists should not cause issue // adding when already exists should not cause issue
cache cache
.set_cjs_analysis("file.js", "2", &cjs_analysis) .set_cjs_analysis("file.js", CacheDBHash::new(2), &cjs_analysis)
.unwrap(); .unwrap();
// recreating with same cli version should still have it // recreating with same cli version should still have it
let conn = cache.conn.recreate_with_version("1.0.0"); let conn = cache.conn.recreate_with_version("1.0.0");
let cache = NodeAnalysisCacheInner::new(conn); let cache = NodeAnalysisCacheInner::new(conn);
let actual_analysis = let actual_analysis = cache
cache.get_cjs_analysis("file.js", "2").unwrap().unwrap(); .get_cjs_analysis("file.js", CacheDBHash::new(2))
assert_eq!(actual_analysis.exports, cjs_analysis.exports); .unwrap()
assert_eq!(actual_analysis.reexports, cjs_analysis.reexports); .unwrap();
assert_eq!(actual_analysis, cjs_analysis);
// now changing the cli version should clear it // now changing the cli version should clear it
let conn = cache.conn.recreate_with_version("2.0.0"); let conn = cache.conn.recreate_with_version("2.0.0");
let cache = NodeAnalysisCacheInner::new(conn); let cache = NodeAnalysisCacheInner::new(conn);
assert!(cache.get_cjs_analysis("file.js", "2").unwrap().is_none()); assert!(cache
.get_cjs_analysis("file.js", CacheDBHash::new(2))
.unwrap()
.is_none());
} }
} }

Some files were not shown because too many files have changed in this diff Show more