mirror of
https://github.com/denoland/deno.git
synced 2025-02-01 20:25:12 -05:00
Merge remote-tracking branch 'upstream/main' into show-remote-modules-size
This commit is contained in:
commit
e98e110b0a
144 changed files with 10972 additions and 803 deletions
2
.github/workflows/ci.generate.ts
vendored
2
.github/workflows/ci.generate.ts
vendored
|
@ -5,7 +5,7 @@ import { stringify } from "jsr:@std/yaml@^0.221/stringify";
|
|||
// Bump this number when you want to purge the cache.
|
||||
// Note: the tools/release/01_bump_crate_versions.ts script will update this version
|
||||
// automatically via regex, so ensure that this line maintains this format.
|
||||
const cacheVersion = 31;
|
||||
const cacheVersion = 32;
|
||||
|
||||
const ubuntuX86Runner = "ubuntu-24.04";
|
||||
const ubuntuX86XlRunner = "ubuntu-24.04-xl";
|
||||
|
|
8
.github/workflows/ci.yml
vendored
8
.github/workflows/ci.yml
vendored
|
@ -184,8 +184,8 @@ jobs:
|
|||
~/.cargo/registry/index
|
||||
~/.cargo/registry/cache
|
||||
~/.cargo/git/db
|
||||
key: '31-cargo-home-${{ matrix.os }}-${{ matrix.arch }}-${{ hashFiles(''Cargo.lock'') }}'
|
||||
restore-keys: '31-cargo-home-${{ matrix.os }}-${{ matrix.arch }}-'
|
||||
key: '32-cargo-home-${{ matrix.os }}-${{ matrix.arch }}-${{ hashFiles(''Cargo.lock'') }}'
|
||||
restore-keys: '32-cargo-home-${{ matrix.os }}-${{ matrix.arch }}-'
|
||||
if: '!(matrix.skip)'
|
||||
- uses: dsherret/rust-toolchain-file@v1
|
||||
if: '!(matrix.skip)'
|
||||
|
@ -379,7 +379,7 @@ jobs:
|
|||
!./target/*/*.zip
|
||||
!./target/*/*.tar.gz
|
||||
key: never_saved
|
||||
restore-keys: '31-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-'
|
||||
restore-keys: '32-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-'
|
||||
- name: Apply and update mtime cache
|
||||
if: '!(matrix.skip) && (!startsWith(github.ref, ''refs/tags/''))'
|
||||
uses: ./.github/mtime_cache
|
||||
|
@ -689,7 +689,7 @@ jobs:
|
|||
!./target/*/gn_root
|
||||
!./target/*/*.zip
|
||||
!./target/*/*.tar.gz
|
||||
key: '31-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-${{ github.sha }}'
|
||||
key: '32-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-${{ github.sha }}'
|
||||
publish-canary:
|
||||
name: publish canary
|
||||
runs-on: ubuntu-24.04
|
||||
|
|
234
Cargo.lock
generated
234
Cargo.lock
generated
|
@ -380,7 +380,7 @@ dependencies = [
|
|||
"rustversion",
|
||||
"serde",
|
||||
"sync_wrapper",
|
||||
"tower",
|
||||
"tower 0.4.13",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
]
|
||||
|
@ -677,6 +677,28 @@ dependencies = [
|
|||
"itoa",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "capacity_builder"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8f2d24a6dcf0cd402a21b65d35340f3a49ff3475dc5fdac91d22d2733e6641c6"
|
||||
dependencies = [
|
||||
"capacity_builder_macros",
|
||||
"ecow",
|
||||
"hipstr",
|
||||
"itoa",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "capacity_builder_macros"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b4a6cae9efc04cc6cbb8faf338d2c497c165c83e74509cf4dbedea948bbf6e5"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"syn 2.0.87",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "caseless"
|
||||
version = "0.2.1"
|
||||
|
@ -728,6 +750,12 @@ version = "0.1.1"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
|
||||
|
||||
[[package]]
|
||||
name = "cfg_aliases"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
|
||||
|
||||
[[package]]
|
||||
name = "chrono"
|
||||
version = "0.4.37"
|
||||
|
@ -1224,7 +1252,7 @@ dependencies = [
|
|||
"boxed_error",
|
||||
"bytes",
|
||||
"cache_control",
|
||||
"capacity_builder",
|
||||
"capacity_builder 0.5.0",
|
||||
"chrono",
|
||||
"clap",
|
||||
"clap_complete",
|
||||
|
@ -1391,7 +1419,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_bench_util"
|
||||
version = "0.177.0"
|
||||
version = "0.178.0"
|
||||
dependencies = [
|
||||
"bencher",
|
||||
"deno_core",
|
||||
|
@ -1400,7 +1428,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_broadcast_channel"
|
||||
version = "0.177.0"
|
||||
version = "0.178.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"deno_core",
|
||||
|
@ -1411,7 +1439,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_cache"
|
||||
version = "0.115.0"
|
||||
version = "0.116.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"deno_core",
|
||||
|
@ -1452,7 +1480,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_canvas"
|
||||
version = "0.52.0"
|
||||
version = "0.53.0"
|
||||
dependencies = [
|
||||
"deno_core",
|
||||
"deno_webgpu",
|
||||
|
@ -1463,9 +1491,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_config"
|
||||
version = "0.39.3"
|
||||
version = "0.41.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ce717af3fe6788dae63965d58d5637fd62be8fe4f345f189137ffc06c51837d2"
|
||||
checksum = "8afa3beb6b9e0604cfe0380d30f88c5b758d44e228d5a5fc42ae637ccfb7d089"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"deno_package_json",
|
||||
|
@ -1487,16 +1515,16 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_console"
|
||||
version = "0.183.0"
|
||||
version = "0.184.0"
|
||||
dependencies = [
|
||||
"deno_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deno_core"
|
||||
version = "0.326.0"
|
||||
version = "0.327.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ed157162dc5320a2b46ffeeaec24788339df0f2437cfaea78a8d82696715ad7f"
|
||||
checksum = "eaf8dff204b9c2415deb47b9f30d4d38b0925d0d88f1f9074e8e76f59e6d7ded"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"az",
|
||||
|
@ -1504,7 +1532,7 @@ dependencies = [
|
|||
"bit-set",
|
||||
"bit-vec",
|
||||
"bytes",
|
||||
"capacity_builder",
|
||||
"capacity_builder 0.1.3",
|
||||
"cooked-waker",
|
||||
"deno_core_icudata",
|
||||
"deno_ops",
|
||||
|
@ -1536,7 +1564,7 @@ checksum = "fe4dccb6147bb3f3ba0c7a48e993bfeb999d2c2e47a81badee80e2b370c8d695"
|
|||
|
||||
[[package]]
|
||||
name = "deno_cron"
|
||||
version = "0.63.0"
|
||||
version = "0.64.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
|
@ -1549,7 +1577,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_crypto"
|
||||
version = "0.197.0"
|
||||
version = "0.198.0"
|
||||
dependencies = [
|
||||
"aes",
|
||||
"aes-gcm",
|
||||
|
@ -1639,7 +1667,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_fetch"
|
||||
version = "0.207.0"
|
||||
version = "0.208.0"
|
||||
dependencies = [
|
||||
"base64 0.21.7",
|
||||
"bytes",
|
||||
|
@ -1651,6 +1679,7 @@ dependencies = [
|
|||
"dyn-clone",
|
||||
"error_reporter",
|
||||
"fast-socks5",
|
||||
"h2 0.4.4",
|
||||
"hickory-resolver",
|
||||
"http 1.1.0",
|
||||
"http-body-util",
|
||||
|
@ -1667,14 +1696,14 @@ dependencies = [
|
|||
"tokio-rustls",
|
||||
"tokio-socks",
|
||||
"tokio-util",
|
||||
"tower",
|
||||
"tower 0.5.2",
|
||||
"tower-http",
|
||||
"tower-service",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deno_ffi"
|
||||
version = "0.170.0"
|
||||
version = "0.171.0"
|
||||
dependencies = [
|
||||
"deno_core",
|
||||
"deno_permissions",
|
||||
|
@ -1694,7 +1723,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_fs"
|
||||
version = "0.93.0"
|
||||
version = "0.94.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"base32",
|
||||
|
@ -1717,12 +1746,13 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_graph"
|
||||
version = "0.86.3"
|
||||
version = "0.86.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc78ed0b4bbcb4197300f0d6e7d1edc2d2c5019cdb9dedba7ff229158441885b"
|
||||
checksum = "f669d96d63841d9ba10f86b161d898678ce05bc1e3c9ee1c1f7449a68eed2b64"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"capacity_builder 0.5.0",
|
||||
"data-url",
|
||||
"deno_ast",
|
||||
"deno_semver",
|
||||
|
@ -1747,7 +1777,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_http"
|
||||
version = "0.181.0"
|
||||
version = "0.182.0"
|
||||
dependencies = [
|
||||
"async-compression",
|
||||
"async-trait",
|
||||
|
@ -1786,7 +1816,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_io"
|
||||
version = "0.93.0"
|
||||
version = "0.94.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"deno_core",
|
||||
|
@ -1807,7 +1837,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_kv"
|
||||
version = "0.91.0"
|
||||
version = "0.92.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
|
@ -1857,9 +1887,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_lockfile"
|
||||
version = "0.23.2"
|
||||
version = "0.24.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "559c19feb00af0c34f0bd4a20e56e12463fafd5c5069d6005f3ce33008027eea"
|
||||
checksum = "632e835a53ed667d62fdd766c5780fe8361c831d3e3fbf1a760a0b7896657587"
|
||||
dependencies = [
|
||||
"deno_semver",
|
||||
"serde",
|
||||
|
@ -1880,7 +1910,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_napi"
|
||||
version = "0.114.0"
|
||||
version = "0.115.0"
|
||||
dependencies = [
|
||||
"deno_core",
|
||||
"deno_permissions",
|
||||
|
@ -1908,7 +1938,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_net"
|
||||
version = "0.175.0"
|
||||
version = "0.176.0"
|
||||
dependencies = [
|
||||
"deno_core",
|
||||
"deno_permissions",
|
||||
|
@ -1916,6 +1946,7 @@ dependencies = [
|
|||
"hickory-proto",
|
||||
"hickory-resolver",
|
||||
"pin-project",
|
||||
"quinn",
|
||||
"rustls-tokio-stream",
|
||||
"serde",
|
||||
"socket2",
|
||||
|
@ -1925,7 +1956,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_node"
|
||||
version = "0.120.0"
|
||||
version = "0.122.0"
|
||||
dependencies = [
|
||||
"aead-gcm-stream",
|
||||
"aes",
|
||||
|
@ -2017,12 +2048,13 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_npm"
|
||||
version = "0.26.0"
|
||||
version = "0.27.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f2f125a5dba7839c46394a0a9c835da9fe60f5f412587ab4956a76492a1cc6a8"
|
||||
checksum = "5f818ad5dc4c206b50b5cfa6f10b4b94b127e15c8342c152768eba40c225ca23"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"capacity_builder 0.5.0",
|
||||
"deno_error",
|
||||
"deno_lockfile",
|
||||
"deno_semver",
|
||||
"futures",
|
||||
|
@ -2044,6 +2076,7 @@ dependencies = [
|
|||
"boxed_error",
|
||||
"deno_cache_dir",
|
||||
"deno_core",
|
||||
"deno_error",
|
||||
"deno_npm",
|
||||
"deno_semver",
|
||||
"deno_unsync",
|
||||
|
@ -2065,9 +2098,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_ops"
|
||||
version = "0.202.0"
|
||||
version = "0.203.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4dd8ac1af251e292388e516dd339b9a3b982a6d1e7f8644c08e34671ca39003c"
|
||||
checksum = "b146ca74cac431843486ade58e2accc16c11315fb2c6934590a52a73c56b7ec3"
|
||||
dependencies = [
|
||||
"proc-macro-rules",
|
||||
"proc-macro2",
|
||||
|
@ -2081,10 +2114,11 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_package_json"
|
||||
version = "0.2.1"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "80b0a3d81c592624a1ae15332a04b4dc2b7c163ef1dfc7c60171f736d1babdf5"
|
||||
checksum = "81d72db99fdebfc371d7be16972c18a47daa7a29cb5fbb3900ab2114b1f42d96"
|
||||
dependencies = [
|
||||
"boxed_error",
|
||||
"deno_error",
|
||||
"deno_path_util",
|
||||
"deno_semver",
|
||||
|
@ -2111,7 +2145,7 @@ dependencies = [
|
|||
name = "deno_permissions"
|
||||
version = "0.43.0"
|
||||
dependencies = [
|
||||
"capacity_builder",
|
||||
"capacity_builder 0.5.0",
|
||||
"deno_core",
|
||||
"deno_path_util",
|
||||
"deno_terminal 0.2.0",
|
||||
|
@ -2216,11 +2250,14 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_semver"
|
||||
version = "0.6.1"
|
||||
version = "0.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7d1259270d66a5e6d29bb75c9289656541874f79ae9ff6c9f1c790846d5c07ba"
|
||||
checksum = "4775271f9b5602482698f76d24ea9ed8ba27af7f587a7e9a876916300c542435"
|
||||
dependencies = [
|
||||
"capacity_builder 0.5.0",
|
||||
"deno_error",
|
||||
"ecow",
|
||||
"hipstr",
|
||||
"monch",
|
||||
"once_cell",
|
||||
"serde",
|
||||
|
@ -2248,7 +2285,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_telemetry"
|
||||
version = "0.5.0"
|
||||
version = "0.6.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"deno_core",
|
||||
|
@ -2289,7 +2326,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_tls"
|
||||
version = "0.170.0"
|
||||
version = "0.171.0"
|
||||
dependencies = [
|
||||
"deno_core",
|
||||
"deno_native_certs",
|
||||
|
@ -2322,7 +2359,7 @@ dependencies = [
|
|||
"serde_json",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tower",
|
||||
"tower 0.4.13",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
|
@ -2339,7 +2376,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_url"
|
||||
version = "0.183.0"
|
||||
version = "0.184.0"
|
||||
dependencies = [
|
||||
"deno_bench_util",
|
||||
"deno_console",
|
||||
|
@ -2351,7 +2388,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_web"
|
||||
version = "0.214.0"
|
||||
version = "0.215.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"base64-simd 0.8.0",
|
||||
|
@ -2373,7 +2410,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_webgpu"
|
||||
version = "0.150.0"
|
||||
version = "0.151.0"
|
||||
dependencies = [
|
||||
"deno_core",
|
||||
"raw-window-handle",
|
||||
|
@ -2386,7 +2423,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_webidl"
|
||||
version = "0.183.0"
|
||||
version = "0.184.0"
|
||||
dependencies = [
|
||||
"deno_bench_util",
|
||||
"deno_core",
|
||||
|
@ -2394,7 +2431,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_websocket"
|
||||
version = "0.188.0"
|
||||
version = "0.189.0"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"deno_core",
|
||||
|
@ -2416,7 +2453,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_webstorage"
|
||||
version = "0.178.0"
|
||||
version = "0.179.0"
|
||||
dependencies = [
|
||||
"deno_core",
|
||||
"deno_web",
|
||||
|
@ -2887,6 +2924,15 @@ dependencies = [
|
|||
"spki",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ecow"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e42fc0a93992b20c58b99e59d61eaf1635a25bfbe49e4275c34ba0aee98119ba"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ed25519"
|
||||
version = "2.2.3"
|
||||
|
@ -3823,6 +3869,17 @@ dependencies = [
|
|||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hipstr"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "97971ffc85d4c98de12e2608e992a43f5294ebb625fdb045b27c731b64c4c6d6"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_bytes",
|
||||
"sptr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hkdf"
|
||||
version = "0.12.4"
|
||||
|
@ -4023,9 +4080,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "hyper-timeout"
|
||||
version = "0.5.1"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793"
|
||||
checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0"
|
||||
dependencies = [
|
||||
"hyper 1.4.1",
|
||||
"hyper-util",
|
||||
|
@ -4552,9 +4609,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.153"
|
||||
version = "0.2.168"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
|
||||
checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d"
|
||||
|
||||
[[package]]
|
||||
name = "libffi"
|
||||
|
@ -4930,7 +4987,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "napi_sym"
|
||||
version = "0.113.0"
|
||||
version = "0.114.0"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"serde",
|
||||
|
@ -5906,49 +5963,54 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "quinn"
|
||||
version = "0.11.2"
|
||||
version = "0.11.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e4ceeeeabace7857413798eb1ffa1e9c905a9946a57d81fb69b4b71c4d8eb3ad"
|
||||
checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"pin-project-lite",
|
||||
"quinn-proto",
|
||||
"quinn-udp",
|
||||
"rustc-hash 1.1.0",
|
||||
"rustc-hash 2.0.0",
|
||||
"rustls",
|
||||
"thiserror 1.0.64",
|
||||
"socket2",
|
||||
"thiserror 2.0.3",
|
||||
"tokio",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quinn-proto"
|
||||
version = "0.11.8"
|
||||
version = "0.11.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6"
|
||||
checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"getrandom",
|
||||
"rand",
|
||||
"ring",
|
||||
"rustc-hash 2.0.0",
|
||||
"rustls",
|
||||
"rustls-pki-types",
|
||||
"slab",
|
||||
"thiserror 1.0.64",
|
||||
"thiserror 2.0.3",
|
||||
"tinyvec",
|
||||
"tracing",
|
||||
"web-time",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quinn-udp"
|
||||
version = "0.5.2"
|
||||
version = "0.5.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9096629c45860fc7fb143e125eb826b5e721e10be3263160c7d60ca832cf8c46"
|
||||
checksum = "52cd4b1eff68bf27940dd39811292c49e007f4d0b4c357358dc9b0197be6b527"
|
||||
dependencies = [
|
||||
"cfg_aliases 0.2.1",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"socket2",
|
||||
"tracing",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -6425,6 +6487,9 @@ name = "rustls-pki-types"
|
|||
version = "1.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0"
|
||||
dependencies = [
|
||||
"web-time",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustls-tokio-stream"
|
||||
|
@ -6708,9 +6773,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "serde_v8"
|
||||
version = "0.235.0"
|
||||
version = "0.236.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d07afd8b67b4a442ecc2823038473ac0e9e5682de93c213323b60661afdd7eb4"
|
||||
checksum = "e23b3abce64010612f88f4ff689a959736f99eb3dc0dbf1c7903434b8bd8cda5"
|
||||
dependencies = [
|
||||
"num-bigint",
|
||||
"serde",
|
||||
|
@ -6986,6 +7051,12 @@ dependencies = [
|
|||
"der",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sptr"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a"
|
||||
|
||||
[[package]]
|
||||
name = "sqlformat"
|
||||
version = "0.3.2"
|
||||
|
@ -7976,7 +8047,7 @@ dependencies = [
|
|||
"socket2",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tower",
|
||||
"tower 0.4.13",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
|
@ -8002,6 +8073,21 @@ dependencies = [
|
|||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tower"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"pin-project-lite",
|
||||
"sync_wrapper",
|
||||
"tokio",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tower-http"
|
||||
version = "0.6.1"
|
||||
|
@ -8030,9 +8116,9 @@ checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e"
|
|||
|
||||
[[package]]
|
||||
name = "tower-service"
|
||||
version = "0.3.2"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52"
|
||||
checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3"
|
||||
|
||||
[[package]]
|
||||
name = "tracing"
|
||||
|
@ -8506,6 +8592,16 @@ dependencies = [
|
|||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "web-time"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb"
|
||||
dependencies = [
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "webpki-root-certs"
|
||||
version = "0.26.6"
|
||||
|
@ -8533,7 +8629,7 @@ dependencies = [
|
|||
"arrayvec",
|
||||
"bit-vec",
|
||||
"bitflags 2.6.0",
|
||||
"cfg_aliases",
|
||||
"cfg_aliases 0.1.1",
|
||||
"codespan-reporting",
|
||||
"document-features",
|
||||
"indexmap 2.3.0",
|
||||
|
@ -8565,7 +8661,7 @@ dependencies = [
|
|||
"bit-set",
|
||||
"bitflags 2.6.0",
|
||||
"block",
|
||||
"cfg_aliases",
|
||||
"cfg_aliases 0.1.1",
|
||||
"core-graphics-types",
|
||||
"d3d12",
|
||||
"glow",
|
||||
|
|
68
Cargo.toml
68
Cargo.toml
|
@ -48,19 +48,19 @@ repository = "https://github.com/denoland/deno"
|
|||
|
||||
[workspace.dependencies]
|
||||
deno_ast = { version = "=0.44.0", features = ["transpiling"] }
|
||||
deno_core = { version = "0.326.0" }
|
||||
deno_core = { version = "0.327.0" }
|
||||
|
||||
deno_bench_util = { version = "0.177.0", path = "./bench_util" }
|
||||
deno_config = { version = "=0.39.3", features = ["workspace", "sync"] }
|
||||
deno_lockfile = "=0.23.2"
|
||||
deno_bench_util = { version = "0.178.0", path = "./bench_util" }
|
||||
deno_config = { version = "=0.41.0", features = ["workspace", "sync"] }
|
||||
deno_lockfile = "=0.24.0"
|
||||
deno_media_type = { version = "0.2.0", features = ["module_specifier"] }
|
||||
deno_npm = "=0.26.0"
|
||||
deno_npm = "=0.27.0"
|
||||
deno_path_util = "=0.2.2"
|
||||
deno_permissions = { version = "0.43.0", path = "./runtime/permissions" }
|
||||
deno_runtime = { version = "0.192.0", path = "./runtime" }
|
||||
deno_semver = "=0.6.1"
|
||||
deno_semver = "=0.7.1"
|
||||
deno_terminal = "0.2.0"
|
||||
napi_sym = { version = "0.113.0", path = "./ext/napi/sym" }
|
||||
napi_sym = { version = "0.114.0", path = "./ext/napi/sym" }
|
||||
test_util = { package = "test_server", path = "./tests/util/server" }
|
||||
|
||||
denokv_proto = "0.8.4"
|
||||
|
@ -69,29 +69,29 @@ denokv_remote = "0.8.4"
|
|||
denokv_sqlite = { default-features = false, version = "0.8.4" }
|
||||
|
||||
# exts
|
||||
deno_broadcast_channel = { version = "0.177.0", path = "./ext/broadcast_channel" }
|
||||
deno_cache = { version = "0.115.0", path = "./ext/cache" }
|
||||
deno_canvas = { version = "0.52.0", path = "./ext/canvas" }
|
||||
deno_console = { version = "0.183.0", path = "./ext/console" }
|
||||
deno_cron = { version = "0.63.0", path = "./ext/cron" }
|
||||
deno_crypto = { version = "0.197.0", path = "./ext/crypto" }
|
||||
deno_fetch = { version = "0.207.0", path = "./ext/fetch" }
|
||||
deno_ffi = { version = "0.170.0", path = "./ext/ffi" }
|
||||
deno_fs = { version = "0.93.0", path = "./ext/fs" }
|
||||
deno_http = { version = "0.181.0", path = "./ext/http" }
|
||||
deno_io = { version = "0.93.0", path = "./ext/io" }
|
||||
deno_kv = { version = "0.91.0", path = "./ext/kv" }
|
||||
deno_napi = { version = "0.114.0", path = "./ext/napi" }
|
||||
deno_net = { version = "0.175.0", path = "./ext/net" }
|
||||
deno_node = { version = "0.120.0", path = "./ext/node" }
|
||||
deno_telemetry = { version = "0.5.0", path = "./ext/telemetry" }
|
||||
deno_tls = { version = "0.170.0", path = "./ext/tls" }
|
||||
deno_url = { version = "0.183.0", path = "./ext/url" }
|
||||
deno_web = { version = "0.214.0", path = "./ext/web" }
|
||||
deno_webgpu = { version = "0.150.0", path = "./ext/webgpu" }
|
||||
deno_webidl = { version = "0.183.0", path = "./ext/webidl" }
|
||||
deno_websocket = { version = "0.188.0", path = "./ext/websocket" }
|
||||
deno_webstorage = { version = "0.178.0", path = "./ext/webstorage" }
|
||||
deno_broadcast_channel = { version = "0.178.0", path = "./ext/broadcast_channel" }
|
||||
deno_cache = { version = "0.116.0", path = "./ext/cache" }
|
||||
deno_canvas = { version = "0.53.0", path = "./ext/canvas" }
|
||||
deno_console = { version = "0.184.0", path = "./ext/console" }
|
||||
deno_cron = { version = "0.64.0", path = "./ext/cron" }
|
||||
deno_crypto = { version = "0.198.0", path = "./ext/crypto" }
|
||||
deno_fetch = { version = "0.208.0", path = "./ext/fetch" }
|
||||
deno_ffi = { version = "0.171.0", path = "./ext/ffi" }
|
||||
deno_fs = { version = "0.94.0", path = "./ext/fs" }
|
||||
deno_http = { version = "0.182.0", path = "./ext/http" }
|
||||
deno_io = { version = "0.94.0", path = "./ext/io" }
|
||||
deno_kv = { version = "0.92.0", path = "./ext/kv" }
|
||||
deno_napi = { version = "0.115.0", path = "./ext/napi" }
|
||||
deno_net = { version = "0.176.0", path = "./ext/net" }
|
||||
deno_node = { version = "0.122.0", path = "./ext/node" }
|
||||
deno_telemetry = { version = "0.6.0", path = "./ext/telemetry" }
|
||||
deno_tls = { version = "0.171.0", path = "./ext/tls" }
|
||||
deno_url = { version = "0.184.0", path = "./ext/url" }
|
||||
deno_web = { version = "0.215.0", path = "./ext/web" }
|
||||
deno_webgpu = { version = "0.151.0", path = "./ext/webgpu" }
|
||||
deno_webidl = { version = "0.184.0", path = "./ext/webidl" }
|
||||
deno_websocket = { version = "0.189.0", path = "./ext/websocket" }
|
||||
deno_webstorage = { version = "0.179.0", path = "./ext/webstorage" }
|
||||
|
||||
# resolvers
|
||||
deno_npm_cache = { version = "0.3.0", path = "./resolvers/npm_cache" }
|
||||
|
@ -108,7 +108,7 @@ boxed_error = "0.2.3"
|
|||
brotli = "6.0.0"
|
||||
bytes = "1.4.0"
|
||||
cache_control = "=0.2.0"
|
||||
capacity_builder = "0.1.3"
|
||||
capacity_builder = "0.5.0"
|
||||
cbc = { version = "=0.1.2", features = ["alloc"] }
|
||||
# Note: Do not use the "clock" feature of chrono, as it links us to CoreFoundation on macOS.
|
||||
# Instead use util::time::utc_now()
|
||||
|
@ -120,7 +120,7 @@ data-encoding = "2.3.3"
|
|||
data-url = "=0.3.1"
|
||||
deno_cache_dir = "=0.15.0"
|
||||
deno_error = "=0.5.2"
|
||||
deno_package_json = { version = "0.2.1", default-features = false }
|
||||
deno_package_json = { version = "0.3.0", default-features = false }
|
||||
deno_unsync = "0.4.2"
|
||||
dlopen2 = "0.6.1"
|
||||
ecb = "=0.1.2"
|
||||
|
@ -149,7 +149,7 @@ indexmap = { version = "2", features = ["serde"] }
|
|||
ipnet = "2.3"
|
||||
jsonc-parser = { version = "=0.26.2", features = ["serde"] }
|
||||
lazy-regex = "3"
|
||||
libc = "0.2.126"
|
||||
libc = "0.2.168"
|
||||
libz-sys = { version = "1.1.20", default-features = false }
|
||||
log = { version = "0.4.20", features = ["kv"] }
|
||||
lsp-types = "=0.97.0" # used by tower-lsp and "proposed" feature is unstable in patch releases
|
||||
|
@ -202,7 +202,7 @@ tokio-metrics = { version = "0.3.0", features = ["rt"] }
|
|||
tokio-rustls = { version = "0.26.0", default-features = false, features = ["ring", "tls12"] }
|
||||
tokio-socks = "0.5.1"
|
||||
tokio-util = "0.7.4"
|
||||
tower = { version = "0.4.13", default-features = false, features = ["util"] }
|
||||
tower = { version = "0.5.2", default-features = false, features = ["retry", "util"] }
|
||||
tower-http = { version = "0.6.1", features = ["decompression-br", "decompression-gzip"] }
|
||||
tower-lsp = { package = "deno_tower_lsp", version = "0.1.0", features = ["proposed"] }
|
||||
tower-service = "0.3.2"
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_bench_util"
|
||||
version = "0.177.0"
|
||||
version = "0.178.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -74,7 +74,7 @@ deno_config.workspace = true
|
|||
deno_core = { workspace = true, features = ["include_js_files_for_snapshotting"] }
|
||||
deno_doc = { version = "=0.161.3", features = ["rust", "comrak"] }
|
||||
deno_error.workspace = true
|
||||
deno_graph = { version = "=0.86.3" }
|
||||
deno_graph = { version = "=0.86.5" }
|
||||
deno_lint = { version = "=0.68.2", features = ["docs"] }
|
||||
deno_lockfile.workspace = true
|
||||
deno_npm.workspace = true
|
||||
|
|
|
@ -31,6 +31,7 @@ use deno_npm::resolution::ValidSerializedNpmResolutionSnapshot;
|
|||
use deno_npm::NpmSystemInfo;
|
||||
use deno_path_util::normalize_path;
|
||||
use deno_semver::npm::NpmPackageReqReference;
|
||||
use deno_semver::StackString;
|
||||
use deno_telemetry::OtelConfig;
|
||||
use deno_telemetry::OtelRuntimeConfig;
|
||||
use import_map::resolve_import_map_value_from_specifier;
|
||||
|
@ -992,24 +993,24 @@ impl CliOptions {
|
|||
// https://nodejs.org/api/process.html
|
||||
match target.as_str() {
|
||||
"aarch64-apple-darwin" => NpmSystemInfo {
|
||||
os: "darwin".to_string(),
|
||||
cpu: "arm64".to_string(),
|
||||
os: "darwin".into(),
|
||||
cpu: "arm64".into(),
|
||||
},
|
||||
"aarch64-unknown-linux-gnu" => NpmSystemInfo {
|
||||
os: "linux".to_string(),
|
||||
cpu: "arm64".to_string(),
|
||||
os: "linux".into(),
|
||||
cpu: "arm64".into(),
|
||||
},
|
||||
"x86_64-apple-darwin" => NpmSystemInfo {
|
||||
os: "darwin".to_string(),
|
||||
cpu: "x64".to_string(),
|
||||
os: "darwin".into(),
|
||||
cpu: "x64".into(),
|
||||
},
|
||||
"x86_64-unknown-linux-gnu" => NpmSystemInfo {
|
||||
os: "linux".to_string(),
|
||||
cpu: "x64".to_string(),
|
||||
os: "linux".into(),
|
||||
cpu: "x64".into(),
|
||||
},
|
||||
"x86_64-pc-windows-msvc" => NpmSystemInfo {
|
||||
os: "win32".to_string(),
|
||||
cpu: "x64".to_string(),
|
||||
os: "win32".into(),
|
||||
cpu: "x64".into(),
|
||||
},
|
||||
value => {
|
||||
log::warn!(
|
||||
|
@ -1363,9 +1364,9 @@ impl CliOptions {
|
|||
|
||||
Ok(DenoLintConfig {
|
||||
default_jsx_factory: (!transpile_options.jsx_automatic)
|
||||
.then(|| transpile_options.jsx_factory.clone()),
|
||||
.then_some(transpile_options.jsx_factory),
|
||||
default_jsx_fragment_factory: (!transpile_options.jsx_automatic)
|
||||
.then(|| transpile_options.jsx_fragment_factory.clone()),
|
||||
.then_some(transpile_options.jsx_fragment_factory),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -1946,15 +1947,17 @@ pub fn has_flag_env_var(name: &str) -> bool {
|
|||
pub fn npm_pkg_req_ref_to_binary_command(
|
||||
req_ref: &NpmPackageReqReference,
|
||||
) -> String {
|
||||
let binary_name = req_ref.sub_path().unwrap_or(req_ref.req().name.as_str());
|
||||
binary_name.to_string()
|
||||
req_ref
|
||||
.sub_path()
|
||||
.map(|s| s.to_string())
|
||||
.unwrap_or_else(|| req_ref.req().name.to_string())
|
||||
}
|
||||
|
||||
pub fn config_to_deno_graph_workspace_member(
|
||||
config: &ConfigFile,
|
||||
) -> Result<deno_graph::WorkspaceMember, AnyError> {
|
||||
let name = match &config.json.name {
|
||||
Some(name) => name.clone(),
|
||||
let name: StackString = match &config.json.name {
|
||||
Some(name) => name.as_str().into(),
|
||||
None => bail!("Missing 'name' field in config file."),
|
||||
};
|
||||
let version = match &config.json.version {
|
||||
|
|
|
@ -11,19 +11,20 @@ use deno_package_json::PackageJsonDepValueParseError;
|
|||
use deno_package_json::PackageJsonDepWorkspaceReq;
|
||||
use deno_semver::npm::NpmPackageReqReference;
|
||||
use deno_semver::package::PackageReq;
|
||||
use deno_semver::StackString;
|
||||
use deno_semver::VersionReq;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct InstallNpmRemotePkg {
|
||||
pub alias: Option<String>,
|
||||
pub alias: Option<StackString>,
|
||||
pub base_dir: PathBuf,
|
||||
pub req: PackageReq,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct InstallNpmWorkspacePkg {
|
||||
pub alias: Option<String>,
|
||||
pub alias: Option<StackString>,
|
||||
pub target_dir: PathBuf,
|
||||
}
|
||||
|
||||
|
@ -31,7 +32,7 @@ pub struct InstallNpmWorkspacePkg {
|
|||
#[error("Failed to install '{}'\n at {}", alias, location)]
|
||||
pub struct PackageJsonDepValueParseWithLocationError {
|
||||
pub location: Url,
|
||||
pub alias: String,
|
||||
pub alias: StackString,
|
||||
#[source]
|
||||
pub source: PackageJsonDepValueParseError,
|
||||
}
|
||||
|
@ -100,10 +101,8 @@ impl NpmInstallDepsProvider {
|
|||
let mut pkg_pkgs = Vec::with_capacity(
|
||||
deps.dependencies.len() + deps.dev_dependencies.len(),
|
||||
);
|
||||
for (alias, dep) in deps
|
||||
.dependencies
|
||||
.into_iter()
|
||||
.chain(deps.dev_dependencies.into_iter())
|
||||
for (alias, dep) in
|
||||
deps.dependencies.iter().chain(deps.dev_dependencies.iter())
|
||||
{
|
||||
let dep = match dep {
|
||||
Ok(dep) => dep,
|
||||
|
@ -111,8 +110,8 @@ impl NpmInstallDepsProvider {
|
|||
pkg_json_dep_errors.push(
|
||||
PackageJsonDepValueParseWithLocationError {
|
||||
location: pkg_json.specifier(),
|
||||
alias,
|
||||
source: err,
|
||||
alias: alias.clone(),
|
||||
source: err.clone(),
|
||||
},
|
||||
);
|
||||
continue;
|
||||
|
@ -121,28 +120,28 @@ impl NpmInstallDepsProvider {
|
|||
match dep {
|
||||
PackageJsonDepValue::Req(pkg_req) => {
|
||||
let workspace_pkg = workspace_npm_pkgs.iter().find(|pkg| {
|
||||
pkg.matches_req(&pkg_req)
|
||||
pkg.matches_req(pkg_req)
|
||||
// do not resolve to the current package
|
||||
&& pkg.pkg_json.path != pkg_json.path
|
||||
});
|
||||
|
||||
if let Some(pkg) = workspace_pkg {
|
||||
workspace_pkgs.push(InstallNpmWorkspacePkg {
|
||||
alias: Some(alias),
|
||||
alias: Some(alias.clone()),
|
||||
target_dir: pkg.pkg_json.dir_path().to_path_buf(),
|
||||
});
|
||||
} else {
|
||||
pkg_pkgs.push(InstallNpmRemotePkg {
|
||||
alias: Some(alias),
|
||||
alias: Some(alias.clone()),
|
||||
base_dir: pkg_json.dir_path().to_path_buf(),
|
||||
req: pkg_req,
|
||||
req: pkg_req.clone(),
|
||||
});
|
||||
}
|
||||
}
|
||||
PackageJsonDepValue::Workspace(workspace_version_req) => {
|
||||
let version_req = match workspace_version_req {
|
||||
PackageJsonDepWorkspaceReq::VersionReq(version_req) => {
|
||||
version_req
|
||||
version_req.clone()
|
||||
}
|
||||
PackageJsonDepWorkspaceReq::Tilde
|
||||
| PackageJsonDepWorkspaceReq::Caret => {
|
||||
|
@ -150,10 +149,10 @@ impl NpmInstallDepsProvider {
|
|||
}
|
||||
};
|
||||
if let Some(pkg) = workspace_npm_pkgs.iter().find(|pkg| {
|
||||
pkg.matches_name_and_version_req(&alias, &version_req)
|
||||
pkg.matches_name_and_version_req(alias, &version_req)
|
||||
}) {
|
||||
workspace_pkgs.push(InstallNpmWorkspacePkg {
|
||||
alias: Some(alias),
|
||||
alias: Some(alias.clone()),
|
||||
target_dir: pkg.pkg_json.dir_path().to_path_buf(),
|
||||
});
|
||||
}
|
||||
|
|
35
cli/emit.rs
35
cli/emit.rs
|
@ -5,6 +5,7 @@ use crate::cache::FastInsecureHasher;
|
|||
use crate::cache::ParsedSourceCache;
|
||||
use crate::resolver::CjsTracker;
|
||||
|
||||
use deno_ast::EmittedSourceText;
|
||||
use deno_ast::ModuleKind;
|
||||
use deno_ast::SourceMapOption;
|
||||
use deno_ast::SourceRange;
|
||||
|
@ -132,6 +133,7 @@ impl Emitter {
|
|||
&transpile_and_emit_options.0,
|
||||
&transpile_and_emit_options.1,
|
||||
)
|
||||
.map(|r| r.text)
|
||||
}
|
||||
})
|
||||
.await
|
||||
|
@ -166,7 +168,8 @@ impl Emitter {
|
|||
source.clone(),
|
||||
&self.transpile_and_emit_options.0,
|
||||
&self.transpile_and_emit_options.1,
|
||||
)?;
|
||||
)?
|
||||
.text;
|
||||
helper.post_emit_parsed_source(
|
||||
specifier,
|
||||
&transpiled_source,
|
||||
|
@ -177,6 +180,31 @@ impl Emitter {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn emit_parsed_source_for_deno_compile(
|
||||
&self,
|
||||
specifier: &ModuleSpecifier,
|
||||
media_type: MediaType,
|
||||
module_kind: deno_ast::ModuleKind,
|
||||
source: &Arc<str>,
|
||||
) -> Result<(String, String), AnyError> {
|
||||
let mut emit_options = self.transpile_and_emit_options.1.clone();
|
||||
emit_options.inline_sources = false;
|
||||
emit_options.source_map = SourceMapOption::Separate;
|
||||
// strip off the path to have more deterministic builds as we don't care
|
||||
// about the source name because we manually provide the source map to v8
|
||||
emit_options.source_map_base = Some(deno_path_util::url_parent(specifier));
|
||||
let source = EmitParsedSourceHelper::transpile(
|
||||
&self.parsed_source_cache,
|
||||
specifier,
|
||||
media_type,
|
||||
module_kind,
|
||||
source.clone(),
|
||||
&self.transpile_and_emit_options.0,
|
||||
&emit_options,
|
||||
)?;
|
||||
Ok((source.text, source.source_map.unwrap()))
|
||||
}
|
||||
|
||||
/// Expects a file URL, panics otherwise.
|
||||
pub async fn load_and_emit_for_hmr(
|
||||
&self,
|
||||
|
@ -282,7 +310,7 @@ impl<'a> EmitParsedSourceHelper<'a> {
|
|||
source: Arc<str>,
|
||||
transpile_options: &deno_ast::TranspileOptions,
|
||||
emit_options: &deno_ast::EmitOptions,
|
||||
) -> Result<String, AnyError> {
|
||||
) -> Result<EmittedSourceText, AnyError> {
|
||||
// nothing else needs the parsed source at this point, so remove from
|
||||
// the cache in order to not transpile owned
|
||||
let parsed_source = parsed_source_cache
|
||||
|
@ -302,8 +330,7 @@ impl<'a> EmitParsedSourceHelper<'a> {
|
|||
source
|
||||
}
|
||||
};
|
||||
debug_assert!(transpiled_source.source_map.is_none());
|
||||
Ok(transpiled_source.text)
|
||||
Ok(transpiled_source)
|
||||
}
|
||||
|
||||
pub fn post_emit_parsed_source(
|
||||
|
|
|
@ -52,6 +52,7 @@ use deno_runtime::deno_node;
|
|||
use deno_runtime::deno_permissions::PermissionsContainer;
|
||||
use deno_semver::jsr::JsrDepPackageReq;
|
||||
use deno_semver::package::PackageNv;
|
||||
use deno_semver::SmallStackString;
|
||||
use import_map::ImportMapError;
|
||||
use node_resolver::InNpmPackageChecker;
|
||||
use std::collections::HashSet;
|
||||
|
@ -680,7 +681,7 @@ impl ModuleGraphBuilder {
|
|||
for (from, to) in graph.packages.mappings() {
|
||||
lockfile.insert_package_specifier(
|
||||
JsrDepPackageReq::jsr(from.clone()),
|
||||
to.version.to_string(),
|
||||
to.version.to_custom_string::<SmallStackString>(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -145,9 +145,7 @@ impl HttpClient {
|
|||
}
|
||||
|
||||
pub fn get(&self, url: Url) -> Result<RequestBuilder, http::Error> {
|
||||
let body = http_body_util::Empty::new()
|
||||
.map_err(|never| match never {})
|
||||
.boxed();
|
||||
let body = deno_fetch::ReqBody::empty();
|
||||
let mut req = http::Request::new(body);
|
||||
*req.uri_mut() = url.as_str().parse()?;
|
||||
Ok(RequestBuilder {
|
||||
|
@ -179,9 +177,7 @@ impl HttpClient {
|
|||
S: serde::Serialize,
|
||||
{
|
||||
let json = deno_core::serde_json::to_vec(ser)?;
|
||||
let body = http_body_util::Full::new(json.into())
|
||||
.map_err(|never| match never {})
|
||||
.boxed();
|
||||
let body = deno_fetch::ReqBody::full(json.into());
|
||||
let builder = self.post(url, body)?;
|
||||
Ok(builder.header(
|
||||
http::header::CONTENT_TYPE,
|
||||
|
@ -194,9 +190,7 @@ impl HttpClient {
|
|||
url: &Url,
|
||||
headers: HeaderMap,
|
||||
) -> Result<http::Response<ResBody>, SendError> {
|
||||
let body = http_body_util::Empty::new()
|
||||
.map_err(|never| match never {})
|
||||
.boxed();
|
||||
let body = deno_fetch::ReqBody::empty();
|
||||
let mut request = http::Request::new(body);
|
||||
*request.uri_mut() = http::Uri::try_from(url.as_str())?;
|
||||
*request.headers_mut() = headers;
|
||||
|
|
1097
cli/js/40_lint.js
Normal file
1097
cli/js/40_lint.js
Normal file
File diff suppressed because it is too large
Load diff
1014
cli/js/40_lint_selector.js
Normal file
1014
cli/js/40_lint_selector.js
Normal file
File diff suppressed because it is too large
Load diff
132
cli/js/40_lint_types.d.ts
vendored
Normal file
132
cli/js/40_lint_types.d.ts
vendored
Normal file
|
@ -0,0 +1,132 @@
|
|||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
export interface NodeFacade {
|
||||
type: string;
|
||||
range: [number, number];
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
export interface AstContext {
|
||||
buf: Uint8Array;
|
||||
strTable: Map<number, string>;
|
||||
strTableOffset: number;
|
||||
rootOffset: number;
|
||||
nodes: Map<number, NodeFacade>;
|
||||
strByType: number[];
|
||||
strByProp: number[];
|
||||
typeByStr: Map<string, number>;
|
||||
propByStr: Map<string, number>;
|
||||
matcher: MatchContext;
|
||||
}
|
||||
|
||||
// TODO(@marvinhagemeister) Remove once we land "official" types
|
||||
export interface RuleContext {
|
||||
id: string;
|
||||
}
|
||||
|
||||
// TODO(@marvinhagemeister) Remove once we land "official" types
|
||||
export interface LintRule {
|
||||
create(ctx: RuleContext): Record<string, (node: unknown) => void>;
|
||||
destroy?(ctx: RuleContext): void;
|
||||
}
|
||||
|
||||
// TODO(@marvinhagemeister) Remove once we land "official" types
|
||||
export interface LintPlugin {
|
||||
name: string;
|
||||
rules: Record<string, LintRule>;
|
||||
}
|
||||
|
||||
export interface LintState {
|
||||
plugins: LintPlugin[];
|
||||
installedPlugins: Set<string>;
|
||||
}
|
||||
|
||||
export type VisitorFn = (node: unknown) => void;
|
||||
|
||||
export interface CompiledVisitor {
|
||||
matcher: (ctx: MatchContext, offset: number) => boolean;
|
||||
info: { enter: VisitorFn; exit: VisitorFn };
|
||||
}
|
||||
|
||||
export interface AttrExists {
|
||||
type: 3;
|
||||
prop: number[];
|
||||
}
|
||||
|
||||
export interface AttrBin {
|
||||
type: 4;
|
||||
prop: number[];
|
||||
op: number;
|
||||
// deno-lint-ignore no-explicit-any
|
||||
value: any;
|
||||
}
|
||||
|
||||
export type AttrSelector = AttrExists | AttrBin;
|
||||
|
||||
export interface ElemSelector {
|
||||
type: 1;
|
||||
wildcard: boolean;
|
||||
elem: number;
|
||||
}
|
||||
|
||||
export interface PseudoNthChild {
|
||||
type: 5;
|
||||
op: string | null;
|
||||
step: number;
|
||||
stepOffset: number;
|
||||
of: Selector | null;
|
||||
repeat: boolean;
|
||||
}
|
||||
|
||||
export interface PseudoHas {
|
||||
type: 6;
|
||||
selectors: Selector[];
|
||||
}
|
||||
export interface PseudoNot {
|
||||
type: 7;
|
||||
selectors: Selector[];
|
||||
}
|
||||
export interface PseudoFirstChild {
|
||||
type: 8;
|
||||
}
|
||||
export interface PseudoLastChild {
|
||||
type: 9;
|
||||
}
|
||||
|
||||
export interface Relation {
|
||||
type: 2;
|
||||
op: number;
|
||||
}
|
||||
|
||||
export type Selector = Array<
|
||||
| ElemSelector
|
||||
| Relation
|
||||
| AttrExists
|
||||
| AttrBin
|
||||
| PseudoNthChild
|
||||
| PseudoNot
|
||||
| PseudoHas
|
||||
| PseudoFirstChild
|
||||
| PseudoLastChild
|
||||
>;
|
||||
|
||||
export interface SelectorParseCtx {
|
||||
root: Selector;
|
||||
current: Selector;
|
||||
}
|
||||
|
||||
export interface MatchContext {
|
||||
getFirstChild(id: number): number;
|
||||
getLastChild(id: number): number;
|
||||
getSiblings(id: number): number[];
|
||||
getParent(id: number): number;
|
||||
getType(id: number): number;
|
||||
hasAttrPath(id: number, propIds: number[], idx: number): boolean;
|
||||
getAttrPathValue(id: number, propIds: number[], idx: number): unknown;
|
||||
}
|
||||
|
||||
export type NextFn = (ctx: MatchContext, id: number) => boolean;
|
||||
export type MatcherFn = (ctx: MatchContext, id: number) => boolean;
|
||||
export type TransformFn = (value: string) => number;
|
||||
|
||||
export {};
|
|
@ -36,6 +36,8 @@ use deno_semver::package::PackageNv;
|
|||
use deno_semver::package::PackageNvReference;
|
||||
use deno_semver::package::PackageReq;
|
||||
use deno_semver::package::PackageReqReference;
|
||||
use deno_semver::SmallStackString;
|
||||
use deno_semver::StackString;
|
||||
use deno_semver::Version;
|
||||
use import_map::ImportMap;
|
||||
use node_resolver::NodeResolutionKind;
|
||||
|
@ -278,9 +280,16 @@ impl<'a> TsResponseImportMapper<'a> {
|
|||
{
|
||||
let mut segments = jsr_path.split('/');
|
||||
let name = if jsr_path.starts_with('@') {
|
||||
format!("{}/{}", segments.next()?, segments.next()?)
|
||||
let scope = segments.next()?;
|
||||
let name = segments.next()?;
|
||||
capacity_builder::StringBuilder::<StackString>::build(|builder| {
|
||||
builder.append(scope);
|
||||
builder.append("/");
|
||||
builder.append(name);
|
||||
})
|
||||
.unwrap()
|
||||
} else {
|
||||
segments.next()?.to_string()
|
||||
StackString::from(segments.next()?)
|
||||
};
|
||||
let version = Version::parse_standard(segments.next()?).ok()?;
|
||||
let nv = PackageNv { name, version };
|
||||
|
@ -290,7 +299,9 @@ impl<'a> TsResponseImportMapper<'a> {
|
|||
&path,
|
||||
Some(&self.file_referrer),
|
||||
)?;
|
||||
let sub_path = (export != ".").then_some(export);
|
||||
let sub_path = (export != ".")
|
||||
.then_some(export)
|
||||
.map(SmallStackString::from_string);
|
||||
let mut req = None;
|
||||
req = req.or_else(|| {
|
||||
let import_map = self.maybe_import_map?;
|
||||
|
@ -603,18 +614,24 @@ fn try_reverse_map_package_json_exports(
|
|||
/// For a set of tsc changes, can them for any that contain something that looks
|
||||
/// like an import and rewrite the import specifier to include the extension
|
||||
pub fn fix_ts_import_changes(
|
||||
referrer: &ModuleSpecifier,
|
||||
resolution_mode: ResolutionMode,
|
||||
changes: &[tsc::FileTextChanges],
|
||||
language_server: &language_server::Inner,
|
||||
) -> Result<Vec<tsc::FileTextChanges>, AnyError> {
|
||||
let import_mapper = language_server.get_ts_response_import_mapper(referrer);
|
||||
let mut r = Vec::new();
|
||||
for change in changes {
|
||||
let Ok(referrer) = ModuleSpecifier::parse(&change.file_name) else {
|
||||
continue;
|
||||
};
|
||||
let referrer_doc = language_server.get_asset_or_document(&referrer).ok();
|
||||
let resolution_mode = referrer_doc
|
||||
.as_ref()
|
||||
.map(|d| d.resolution_mode())
|
||||
.unwrap_or(ResolutionMode::Import);
|
||||
let import_mapper =
|
||||
language_server.get_ts_response_import_mapper(&referrer);
|
||||
let mut text_changes = Vec::new();
|
||||
for text_change in &change.text_changes {
|
||||
let lines = text_change.new_text.split('\n');
|
||||
|
||||
let new_lines: Vec<String> = lines
|
||||
.map(|line| {
|
||||
// This assumes that there's only one import per line.
|
||||
|
@ -622,7 +639,7 @@ pub fn fix_ts_import_changes(
|
|||
let specifier =
|
||||
captures.iter().skip(1).find_map(|s| s).unwrap().as_str();
|
||||
if let Some(new_specifier) = import_mapper
|
||||
.check_unresolved_specifier(specifier, referrer, resolution_mode)
|
||||
.check_unresolved_specifier(specifier, &referrer, resolution_mode)
|
||||
{
|
||||
line.replace(specifier, &new_specifier)
|
||||
} else {
|
||||
|
|
|
@ -251,6 +251,13 @@ impl AssetOrDocument {
|
|||
pub fn document_lsp_version(&self) -> Option<i32> {
|
||||
self.document().and_then(|d| d.maybe_lsp_version())
|
||||
}
|
||||
|
||||
pub fn resolution_mode(&self) -> ResolutionMode {
|
||||
match self {
|
||||
AssetOrDocument::Asset(_) => ResolutionMode::Import,
|
||||
AssetOrDocument::Document(d) => d.resolution_mode(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type ModuleResult = Result<deno_graph::JsModule, deno_graph::ModuleGraphError>;
|
||||
|
|
|
@ -18,6 +18,7 @@ use deno_graph::ModuleSpecifier;
|
|||
use deno_semver::jsr::JsrPackageReqReference;
|
||||
use deno_semver::package::PackageNv;
|
||||
use deno_semver::package::PackageReq;
|
||||
use deno_semver::StackString;
|
||||
use deno_semver::Version;
|
||||
use serde::Deserialize;
|
||||
use std::collections::HashMap;
|
||||
|
@ -33,8 +34,8 @@ pub struct JsrCacheResolver {
|
|||
/// The `module_graph` fields of the version infos should be forcibly absent.
|
||||
/// It can be large and we don't want to store it.
|
||||
info_by_nv: DashMap<PackageNv, Option<Arc<JsrPackageVersionInfo>>>,
|
||||
info_by_name: DashMap<String, Option<Arc<JsrPackageInfo>>>,
|
||||
workspace_scope_by_name: HashMap<String, ModuleSpecifier>,
|
||||
info_by_name: DashMap<StackString, Option<Arc<JsrPackageInfo>>>,
|
||||
workspace_scope_by_name: HashMap<StackString, ModuleSpecifier>,
|
||||
cache: Arc<dyn HttpCache>,
|
||||
}
|
||||
|
||||
|
@ -59,7 +60,7 @@ impl JsrCacheResolver {
|
|||
continue;
|
||||
};
|
||||
let nv = PackageNv {
|
||||
name: jsr_pkg_config.name.clone(),
|
||||
name: jsr_pkg_config.name.as_str().into(),
|
||||
version: version.clone(),
|
||||
};
|
||||
info_by_name.insert(
|
||||
|
@ -125,8 +126,8 @@ impl JsrCacheResolver {
|
|||
return nv.value().clone();
|
||||
}
|
||||
let maybe_get_nv = || {
|
||||
let name = req.name.clone();
|
||||
let package_info = self.package_info(&name)?;
|
||||
let name = &req.name;
|
||||
let package_info = self.package_info(name)?;
|
||||
// Find the first matching version of the package which is cached.
|
||||
let mut versions = package_info.versions.keys().collect::<Vec<_>>();
|
||||
versions.sort();
|
||||
|
@ -144,7 +145,10 @@ impl JsrCacheResolver {
|
|||
self.package_version_info(&nv).is_some()
|
||||
})
|
||||
.cloned()?;
|
||||
Some(PackageNv { name, version })
|
||||
Some(PackageNv {
|
||||
name: name.clone(),
|
||||
version,
|
||||
})
|
||||
};
|
||||
let nv = maybe_get_nv();
|
||||
self.nv_by_req.insert(req.clone(), nv.clone());
|
||||
|
@ -216,7 +220,10 @@ impl JsrCacheResolver {
|
|||
None
|
||||
}
|
||||
|
||||
pub fn package_info(&self, name: &str) -> Option<Arc<JsrPackageInfo>> {
|
||||
pub fn package_info(
|
||||
&self,
|
||||
name: &StackString,
|
||||
) -> Option<Arc<JsrPackageInfo>> {
|
||||
if let Some(info) = self.info_by_name.get(name) {
|
||||
return info.value().clone();
|
||||
}
|
||||
|
@ -226,7 +233,7 @@ impl JsrCacheResolver {
|
|||
serde_json::from_slice::<JsrPackageInfo>(&meta_bytes).ok()
|
||||
};
|
||||
let info = read_cached_package_info().map(Arc::new);
|
||||
self.info_by_name.insert(name.to_string(), info.clone());
|
||||
self.info_by_name.insert(name.clone(), info.clone());
|
||||
info
|
||||
}
|
||||
|
||||
|
|
|
@ -1855,20 +1855,12 @@ impl Inner {
|
|||
}
|
||||
|
||||
let changes = if code_action_data.fix_id == "fixMissingImport" {
|
||||
fix_ts_import_changes(
|
||||
&code_action_data.specifier,
|
||||
maybe_asset_or_doc
|
||||
.as_ref()
|
||||
.and_then(|d| d.document())
|
||||
.map(|d| d.resolution_mode())
|
||||
.unwrap_or(ResolutionMode::Import),
|
||||
&combined_code_actions.changes,
|
||||
self,
|
||||
)
|
||||
.map_err(|err| {
|
||||
fix_ts_import_changes(&combined_code_actions.changes, self).map_err(
|
||||
|err| {
|
||||
error!("Unable to remap changes: {:#}", err);
|
||||
LspError::internal_error()
|
||||
})?
|
||||
},
|
||||
)?
|
||||
} else {
|
||||
combined_code_actions.changes
|
||||
};
|
||||
|
@ -1912,20 +1904,16 @@ impl Inner {
|
|||
asset_or_doc.scope().cloned(),
|
||||
)
|
||||
.await?;
|
||||
if kind_suffix == ".rewrite.function.returnType" {
|
||||
refactor_edit_info.edits = fix_ts_import_changes(
|
||||
&action_data.specifier,
|
||||
asset_or_doc
|
||||
.document()
|
||||
.map(|d| d.resolution_mode())
|
||||
.unwrap_or(ResolutionMode::Import),
|
||||
&refactor_edit_info.edits,
|
||||
self,
|
||||
)
|
||||
.map_err(|err| {
|
||||
if kind_suffix == ".rewrite.function.returnType"
|
||||
|| kind_suffix == ".move.newFile"
|
||||
{
|
||||
refactor_edit_info.edits =
|
||||
fix_ts_import_changes(&refactor_edit_info.edits, self).map_err(
|
||||
|err| {
|
||||
error!("Unable to remap changes: {:#}", err);
|
||||
LspError::internal_error()
|
||||
})?
|
||||
},
|
||||
)?
|
||||
}
|
||||
code_action.edit = refactor_edit_info.to_workspace_edit(self)?;
|
||||
code_action
|
||||
|
@ -3793,7 +3781,7 @@ impl Inner {
|
|||
for (name, command) in scripts {
|
||||
result.push(TaskDefinition {
|
||||
name: name.clone(),
|
||||
command: command.clone(),
|
||||
command: Some(command.clone()),
|
||||
source_uri: url_to_uri(&package_json.specifier())
|
||||
.map_err(|_| LspError::internal_error())?,
|
||||
});
|
||||
|
|
|
@ -14,7 +14,7 @@ pub const LATEST_DIAGNOSTIC_BATCH_INDEX: &str =
|
|||
#[serde(rename_all = "camelCase")]
|
||||
pub struct TaskDefinition {
|
||||
pub name: String,
|
||||
pub command: String,
|
||||
pub command: Option<String>,
|
||||
pub source_uri: lsp::Uri,
|
||||
}
|
||||
|
||||
|
|
|
@ -67,7 +67,9 @@ pub mod tests {
|
|||
&self,
|
||||
nv: &PackageNv,
|
||||
) -> Result<Arc<Vec<String>>, AnyError> {
|
||||
let Some(exports_by_version) = self.package_versions.get(&nv.name) else {
|
||||
let Some(exports_by_version) =
|
||||
self.package_versions.get(nv.name.as_str())
|
||||
else {
|
||||
return Err(anyhow!("Package not found."));
|
||||
};
|
||||
let Some(exports) = exports_by_version.get(&nv.version) else {
|
||||
|
|
|
@ -996,7 +996,7 @@ impl<TGraphContainer: ModuleGraphContainer> ModuleLoader
|
|||
std::future::ready(()).boxed_local()
|
||||
}
|
||||
|
||||
fn get_source_map(&self, file_name: &str) -> Option<Vec<u8>> {
|
||||
fn get_source_map(&self, file_name: &str) -> Option<Cow<[u8]>> {
|
||||
let specifier = resolve_url(file_name).ok()?;
|
||||
match specifier.scheme() {
|
||||
// we should only be looking for emits for schemes that denote external
|
||||
|
@ -1008,7 +1008,7 @@ impl<TGraphContainer: ModuleGraphContainer> ModuleLoader
|
|||
.0
|
||||
.load_prepared_module_for_source_map_sync(&specifier)
|
||||
.ok()??;
|
||||
source_map_from_code(source.code.as_bytes())
|
||||
source_map_from_code(source.code.as_bytes()).map(Cow::Owned)
|
||||
}
|
||||
|
||||
fn get_source_mapped_source_line(
|
||||
|
|
|
@ -560,11 +560,11 @@ impl ManagedCliNpmResolver {
|
|||
&self,
|
||||
) -> Result<(), Box<PackageJsonDepValueParseWithLocationError>> {
|
||||
for err in self.npm_install_deps_provider.pkg_json_dep_errors() {
|
||||
match &err.source {
|
||||
deno_package_json::PackageJsonDepValueParseError::VersionReq(_) => {
|
||||
match err.source.as_kind() {
|
||||
deno_package_json::PackageJsonDepValueParseErrorKind::VersionReq(_) => {
|
||||
return Err(Box::new(err.clone()));
|
||||
}
|
||||
deno_package_json::PackageJsonDepValueParseError::Unsupported {
|
||||
deno_package_json::PackageJsonDepValueParseErrorKind::Unsupported {
|
||||
..
|
||||
} => {
|
||||
// only warn for this one
|
||||
|
|
|
@ -4,6 +4,7 @@ use std::collections::HashMap;
|
|||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use capacity_builder::StringBuilder;
|
||||
use deno_core::error::AnyError;
|
||||
use deno_lockfile::NpmPackageDependencyLockfileInfo;
|
||||
use deno_lockfile::NpmPackageLockfileInfo;
|
||||
|
@ -24,6 +25,7 @@ use deno_npm::NpmSystemInfo;
|
|||
use deno_semver::jsr::JsrDepPackageReq;
|
||||
use deno_semver::package::PackageNv;
|
||||
use deno_semver::package::PackageReq;
|
||||
use deno_semver::SmallStackString;
|
||||
use deno_semver::VersionReq;
|
||||
|
||||
use crate::args::CliLockfile;
|
||||
|
@ -336,7 +338,13 @@ fn populate_lockfile_from_snapshot(
|
|||
let id = &snapshot.resolve_package_from_deno_module(nv).unwrap().id;
|
||||
lockfile.insert_package_specifier(
|
||||
JsrDepPackageReq::npm(package_req.clone()),
|
||||
format!("{}{}", id.nv.version, id.peer_deps_serialized()),
|
||||
{
|
||||
StringBuilder::<SmallStackString>::build(|builder| {
|
||||
builder.append(&id.nv.version);
|
||||
builder.append(&id.peer_dependencies);
|
||||
})
|
||||
.unwrap()
|
||||
},
|
||||
);
|
||||
}
|
||||
for package in snapshot.all_packages_for_every_system() {
|
||||
|
|
|
@ -28,8 +28,10 @@ fn default_bin_name(package: &NpmResolutionPackage) -> &str {
|
|||
.id
|
||||
.nv
|
||||
.name
|
||||
.as_str()
|
||||
.rsplit_once('/')
|
||||
.map_or(package.id.nv.name.as_str(), |(_, name)| name)
|
||||
.map(|(_, name)| name)
|
||||
.unwrap_or(package.id.nv.name.as_str())
|
||||
}
|
||||
|
||||
pub fn warn_missing_entrypoint(
|
||||
|
|
|
@ -38,6 +38,7 @@ use deno_resolver::npm::normalize_pkg_name_for_node_modules_deno_folder;
|
|||
use deno_runtime::deno_fs;
|
||||
use deno_runtime::deno_node::NodePermissions;
|
||||
use deno_semver::package::PackageNv;
|
||||
use deno_semver::StackString;
|
||||
use node_resolver::errors::PackageFolderResolveError;
|
||||
use node_resolver::errors::PackageFolderResolveIoError;
|
||||
use node_resolver::errors::PackageNotFoundError;
|
||||
|
@ -355,8 +356,10 @@ async fn sync_resolution_with_fs(
|
|||
let package_partitions =
|
||||
snapshot.all_system_packages_partitioned(system_info);
|
||||
let mut cache_futures = FuturesUnordered::new();
|
||||
let mut newest_packages_by_name: HashMap<&String, &NpmResolutionPackage> =
|
||||
HashMap::with_capacity(package_partitions.packages.len());
|
||||
let mut newest_packages_by_name: HashMap<
|
||||
&StackString,
|
||||
&NpmResolutionPackage,
|
||||
> = HashMap::with_capacity(package_partitions.packages.len());
|
||||
let bin_entries = Rc::new(RefCell::new(bin_entries::BinEntries::new()));
|
||||
let mut lifecycle_scripts =
|
||||
super::common::lifecycle_scripts::LifecycleScripts::new(
|
||||
|
@ -536,7 +539,7 @@ async fn sync_resolution_with_fs(
|
|||
}
|
||||
}
|
||||
|
||||
let mut found_names: HashMap<&String, &PackageNv> = HashMap::new();
|
||||
let mut found_names: HashMap<&StackString, &PackageNv> = HashMap::new();
|
||||
|
||||
// set of node_modules in workspace packages that we've already ensured exist
|
||||
let mut existing_child_node_modules_dirs: HashSet<PathBuf> = HashSet::new();
|
||||
|
@ -1012,10 +1015,10 @@ fn get_package_folder_id_from_folder_name(
|
|||
) -> Option<NpmPackageCacheFolderId> {
|
||||
let folder_name = folder_name.replace('+', "/");
|
||||
let (name, ending) = folder_name.rsplit_once('@')?;
|
||||
let name = if let Some(encoded_name) = name.strip_prefix('_') {
|
||||
mixed_case_package_name_decode(encoded_name)?
|
||||
let name: StackString = if let Some(encoded_name) = name.strip_prefix('_') {
|
||||
StackString::from_string(mixed_case_package_name_decode(encoded_name)?)
|
||||
} else {
|
||||
name.to_string()
|
||||
name.into()
|
||||
};
|
||||
let (raw_version, copy_index) = match ending.split_once('_') {
|
||||
Some((raw_version, copy_index)) => {
|
||||
|
|
34
cli/ops/lint.rs
Normal file
34
cli/ops/lint.rs
Normal file
|
@ -0,0 +1,34 @@
|
|||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use deno_ast::MediaType;
|
||||
use deno_ast::ModuleSpecifier;
|
||||
use deno_core::error::generic_error;
|
||||
use deno_core::error::AnyError;
|
||||
use deno_core::op2;
|
||||
|
||||
use crate::tools::lint;
|
||||
|
||||
deno_core::extension!(deno_lint, ops = [op_lint_create_serialized_ast,],);
|
||||
|
||||
#[op2]
|
||||
#[buffer]
|
||||
fn op_lint_create_serialized_ast(
|
||||
#[string] file_name: &str,
|
||||
#[string] source: String,
|
||||
) -> Result<Vec<u8>, AnyError> {
|
||||
let file_text = deno_ast::strip_bom(source);
|
||||
let path = std::env::current_dir()?.join(file_name);
|
||||
let specifier = ModuleSpecifier::from_file_path(&path).map_err(|_| {
|
||||
generic_error(format!("Failed to parse path as URL: {}", path.display()))
|
||||
})?;
|
||||
let media_type = MediaType::from_specifier(&specifier);
|
||||
let parsed_source = deno_ast::parse_program(deno_ast::ParseParams {
|
||||
specifier,
|
||||
text: file_text.into(),
|
||||
media_type,
|
||||
capture_tokens: false,
|
||||
scope_analysis: false,
|
||||
maybe_syntax: None,
|
||||
})?;
|
||||
Ok(lint::serialize_ast_to_buffer(&parsed_source))
|
||||
}
|
|
@ -2,4 +2,5 @@
|
|||
|
||||
pub mod bench;
|
||||
pub mod jupyter;
|
||||
pub mod lint;
|
||||
pub mod testing;
|
||||
|
|
|
@ -446,7 +446,6 @@
|
|||
},
|
||||
"command": {
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The task to execute"
|
||||
},
|
||||
"dependencies": {
|
||||
|
|
|
@ -91,6 +91,7 @@ use super::serialization::DenoCompileModuleData;
|
|||
use super::serialization::DeserializedDataSection;
|
||||
use super::serialization::RemoteModulesStore;
|
||||
use super::serialization::RemoteModulesStoreBuilder;
|
||||
use super::serialization::SourceMapStore;
|
||||
use super::virtual_fs::output_vfs;
|
||||
use super::virtual_fs::BuiltVfs;
|
||||
use super::virtual_fs::FileBackedVfs;
|
||||
|
@ -98,6 +99,7 @@ use super::virtual_fs::VfsBuilder;
|
|||
use super::virtual_fs::VfsFileSubDataKind;
|
||||
use super::virtual_fs::VfsRoot;
|
||||
use super::virtual_fs::VirtualDirectory;
|
||||
use super::virtual_fs::VirtualDirectoryEntries;
|
||||
use super::virtual_fs::WindowsSystemRootablePath;
|
||||
|
||||
pub static DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME: &str =
|
||||
|
@ -203,17 +205,24 @@ pub struct Metadata {
|
|||
pub otel_config: OtelConfig,
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn write_binary_bytes(
|
||||
mut file_writer: File,
|
||||
original_bin: Vec<u8>,
|
||||
metadata: &Metadata,
|
||||
npm_snapshot: Option<SerializedNpmResolutionSnapshot>,
|
||||
remote_modules: &RemoteModulesStoreBuilder,
|
||||
source_map_store: &SourceMapStore,
|
||||
vfs: &BuiltVfs,
|
||||
compile_flags: &CompileFlags,
|
||||
) -> Result<(), AnyError> {
|
||||
let data_section_bytes =
|
||||
serialize_binary_data_section(metadata, npm_snapshot, remote_modules, vfs)
|
||||
let data_section_bytes = serialize_binary_data_section(
|
||||
metadata,
|
||||
npm_snapshot,
|
||||
remote_modules,
|
||||
source_map_store,
|
||||
vfs,
|
||||
)
|
||||
.context("Serializing binary data section.")?;
|
||||
|
||||
let target = compile_flags.resolve_target();
|
||||
|
@ -256,6 +265,7 @@ pub struct StandaloneData {
|
|||
pub modules: StandaloneModules,
|
||||
pub npm_snapshot: Option<ValidSerializedNpmResolutionSnapshot>,
|
||||
pub root_path: PathBuf,
|
||||
pub source_maps: SourceMapStore,
|
||||
pub vfs: Arc<FileBackedVfs>,
|
||||
}
|
||||
|
||||
|
@ -283,13 +293,12 @@ impl StandaloneModules {
|
|||
pub fn read<'a>(
|
||||
&'a self,
|
||||
specifier: &'a ModuleSpecifier,
|
||||
kind: VfsFileSubDataKind,
|
||||
) -> Result<Option<DenoCompileModuleData<'a>>, AnyError> {
|
||||
if specifier.scheme() == "file" {
|
||||
let path = deno_path_util::url_to_file_path(specifier)?;
|
||||
let bytes = match self.vfs.file_entry(&path) {
|
||||
Ok(entry) => self
|
||||
.vfs
|
||||
.read_file_all(entry, VfsFileSubDataKind::ModuleGraph)?,
|
||||
Ok(entry) => self.vfs.read_file_all(entry, kind)?,
|
||||
Err(err) if err.kind() == ErrorKind::NotFound => {
|
||||
match RealFs.read_file_sync(&path, None) {
|
||||
Ok(bytes) => bytes,
|
||||
|
@ -307,7 +316,18 @@ impl StandaloneModules {
|
|||
data: bytes,
|
||||
}))
|
||||
} else {
|
||||
self.remote_modules.read(specifier)
|
||||
self.remote_modules.read(specifier).map(|maybe_entry| {
|
||||
maybe_entry.map(|entry| DenoCompileModuleData {
|
||||
media_type: entry.media_type,
|
||||
specifier: entry.specifier,
|
||||
data: match kind {
|
||||
VfsFileSubDataKind::Raw => entry.data,
|
||||
VfsFileSubDataKind::ModuleGraph => {
|
||||
entry.transpiled_data.unwrap_or(entry.data)
|
||||
}
|
||||
},
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -328,7 +348,8 @@ pub fn extract_standalone(
|
|||
mut metadata,
|
||||
npm_snapshot,
|
||||
remote_modules,
|
||||
mut vfs_dir,
|
||||
source_maps,
|
||||
vfs_root_entries,
|
||||
vfs_files_data,
|
||||
} = match deserialize_binary_data_section(data)? {
|
||||
Some(data_section) => data_section,
|
||||
|
@ -351,11 +372,12 @@ pub fn extract_standalone(
|
|||
metadata.argv.push(arg.into_string().unwrap());
|
||||
}
|
||||
let vfs = {
|
||||
// align the name of the directory with the root dir
|
||||
vfs_dir.name = root_path.file_name().unwrap().to_string_lossy().to_string();
|
||||
|
||||
let fs_root = VfsRoot {
|
||||
dir: vfs_dir,
|
||||
dir: VirtualDirectory {
|
||||
// align the name of the directory with the root dir
|
||||
name: root_path.file_name().unwrap().to_string_lossy().to_string(),
|
||||
entries: vfs_root_entries,
|
||||
},
|
||||
root_path: root_path.clone(),
|
||||
start_file_offset: 0,
|
||||
};
|
||||
|
@ -372,6 +394,7 @@ pub fn extract_standalone(
|
|||
},
|
||||
npm_snapshot,
|
||||
root_path,
|
||||
source_maps,
|
||||
vfs,
|
||||
}))
|
||||
}
|
||||
|
@ -451,7 +474,7 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
)
|
||||
}
|
||||
}
|
||||
self.write_standalone_binary(options, original_binary).await
|
||||
self.write_standalone_binary(options, original_binary)
|
||||
}
|
||||
|
||||
async fn get_base_binary(
|
||||
|
@ -554,7 +577,7 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
/// This functions creates a standalone deno binary by appending a bundle
|
||||
/// and magic trailer to the currently executing binary.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn write_standalone_binary(
|
||||
fn write_standalone_binary(
|
||||
&self,
|
||||
options: WriteBinOptions<'_>,
|
||||
original_bin: Vec<u8>,
|
||||
|
@ -616,71 +639,81 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
.with_context(|| format!("Including {}", path.display()))?;
|
||||
}
|
||||
let mut remote_modules_store = RemoteModulesStoreBuilder::default();
|
||||
let mut code_cache_key_hasher = if self.cli_options.code_cache_enabled() {
|
||||
Some(FastInsecureHasher::new_deno_versioned())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let mut source_maps = Vec::with_capacity(graph.specifiers_count());
|
||||
// todo(dsherret): transpile in parallel
|
||||
for module in graph.modules() {
|
||||
if module.specifier().scheme() == "data" {
|
||||
continue; // don't store data urls as an entry as they're in the code
|
||||
}
|
||||
if let Some(hasher) = &mut code_cache_key_hasher {
|
||||
if let Some(source) = module.source() {
|
||||
hasher.write(module.specifier().as_str().as_bytes());
|
||||
hasher.write(source.as_bytes());
|
||||
}
|
||||
}
|
||||
let (maybe_source, media_type) = match module {
|
||||
let (maybe_original_source, maybe_transpiled, media_type) = match module {
|
||||
deno_graph::Module::Js(m) => {
|
||||
let source = if m.media_type.is_emittable() {
|
||||
let original_bytes = m.source.as_bytes().to_vec();
|
||||
let maybe_transpiled = if m.media_type.is_emittable() {
|
||||
let is_cjs = self.cjs_tracker.is_cjs_with_known_is_script(
|
||||
&m.specifier,
|
||||
m.media_type,
|
||||
m.is_script,
|
||||
)?;
|
||||
let module_kind = ModuleKind::from_is_cjs(is_cjs);
|
||||
let source = self
|
||||
.emitter
|
||||
.emit_parsed_source(
|
||||
let (source, source_map) =
|
||||
self.emitter.emit_parsed_source_for_deno_compile(
|
||||
&m.specifier,
|
||||
m.media_type,
|
||||
module_kind,
|
||||
&m.source,
|
||||
)
|
||||
.await?;
|
||||
source.into_bytes()
|
||||
)?;
|
||||
if source != m.source.as_ref() {
|
||||
source_maps.push((&m.specifier, source_map));
|
||||
Some(source.into_bytes())
|
||||
} else {
|
||||
m.source.as_bytes().to_vec()
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
(Some(source), m.media_type)
|
||||
(Some(original_bytes), maybe_transpiled, m.media_type)
|
||||
}
|
||||
deno_graph::Module::Json(m) => {
|
||||
(Some(m.source.as_bytes().to_vec()), m.media_type)
|
||||
(Some(m.source.as_bytes().to_vec()), None, m.media_type)
|
||||
}
|
||||
deno_graph::Module::Wasm(m) => {
|
||||
(Some(m.source.to_vec()), MediaType::Wasm)
|
||||
(Some(m.source.to_vec()), None, MediaType::Wasm)
|
||||
}
|
||||
deno_graph::Module::Npm(_)
|
||||
| deno_graph::Module::Node(_)
|
||||
| deno_graph::Module::External(_) => (None, MediaType::Unknown),
|
||||
| deno_graph::Module::External(_) => (None, None, MediaType::Unknown),
|
||||
};
|
||||
if let Some(original_source) = maybe_original_source {
|
||||
if module.specifier().scheme() == "file" {
|
||||
let file_path = deno_path_util::url_to_file_path(module.specifier())?;
|
||||
vfs
|
||||
.add_file_with_data(
|
||||
&file_path,
|
||||
match maybe_source {
|
||||
Some(source) => source,
|
||||
None => RealFs.read_file_sync(&file_path, None)?.into_owned(),
|
||||
},
|
||||
original_source,
|
||||
VfsFileSubDataKind::Raw,
|
||||
)
|
||||
.with_context(|| {
|
||||
format!("Failed adding '{}'", file_path.display())
|
||||
})?;
|
||||
if let Some(transpiled_source) = maybe_transpiled {
|
||||
vfs
|
||||
.add_file_with_data(
|
||||
&file_path,
|
||||
transpiled_source,
|
||||
VfsFileSubDataKind::ModuleGraph,
|
||||
)
|
||||
.with_context(|| {
|
||||
format!("Failed adding '{}'", file_path.display())
|
||||
})?;
|
||||
} else if let Some(source) = maybe_source {
|
||||
remote_modules_store.add(module.specifier(), media_type, source);
|
||||
}
|
||||
} else {
|
||||
remote_modules_store.add(
|
||||
module.specifier(),
|
||||
media_type,
|
||||
original_source,
|
||||
maybe_transpiled,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
remote_modules_store.add_redirects(&graph.redirects);
|
||||
|
@ -713,6 +746,28 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
None => StandaloneRelativeFileBaseUrl::WindowsSystemRoot,
|
||||
};
|
||||
|
||||
let code_cache_key = if self.cli_options.code_cache_enabled() {
|
||||
let mut hasher = FastInsecureHasher::new_deno_versioned();
|
||||
for module in graph.modules() {
|
||||
if let Some(source) = module.source() {
|
||||
hasher
|
||||
.write(root_dir_url.specifier_key(module.specifier()).as_bytes());
|
||||
hasher.write(source.as_bytes());
|
||||
}
|
||||
}
|
||||
Some(hasher.finish())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let mut source_map_store = SourceMapStore::with_capacity(source_maps.len());
|
||||
for (specifier, source_map) in source_maps {
|
||||
source_map_store.add(
|
||||
Cow::Owned(root_dir_url.specifier_key(specifier).into_owned()),
|
||||
Cow::Owned(source_map.into_bytes()),
|
||||
);
|
||||
}
|
||||
|
||||
let node_modules = match self.npm_resolver.as_inner() {
|
||||
InnerCliNpmResolverRef::Managed(_) => {
|
||||
npm_snapshot.as_ref().map(|_| NodeModules::Managed {
|
||||
|
@ -765,7 +820,7 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
let metadata = Metadata {
|
||||
argv: compile_flags.args.clone(),
|
||||
seed: self.cli_options.seed(),
|
||||
code_cache_key: code_cache_key_hasher.map(|h| h.finish()),
|
||||
code_cache_key,
|
||||
location: self.cli_options.location_flag().clone(),
|
||||
permissions: self.cli_options.permission_flags().clone(),
|
||||
v8_flags: self.cli_options.v8_flags().clone(),
|
||||
|
@ -832,6 +887,7 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
&metadata,
|
||||
npm_snapshot.map(|s| s.into_serialized()),
|
||||
&remote_modules_store,
|
||||
&source_map_store,
|
||||
&vfs,
|
||||
compile_flags,
|
||||
)
|
||||
|
@ -926,10 +982,10 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
root_dir.name = DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME.to_string();
|
||||
let mut new_entries = Vec::with_capacity(root_dir.entries.len());
|
||||
let mut localhost_entries = IndexMap::new();
|
||||
for entry in std::mem::take(&mut root_dir.entries) {
|
||||
for entry in root_dir.entries.take_inner() {
|
||||
match entry {
|
||||
VfsEntry::Dir(dir) => {
|
||||
for entry in dir.entries {
|
||||
VfsEntry::Dir(mut dir) => {
|
||||
for entry in dir.entries.take_inner() {
|
||||
log::debug!("Flattening {} into node_modules", entry.name());
|
||||
if let Some(existing) =
|
||||
localhost_entries.insert(entry.name().to_string(), entry)
|
||||
|
@ -948,11 +1004,11 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
}
|
||||
new_entries.push(VfsEntry::Dir(VirtualDirectory {
|
||||
name: "localhost".to_string(),
|
||||
entries: localhost_entries.into_iter().map(|(_, v)| v).collect(),
|
||||
entries: VirtualDirectoryEntries::new(
|
||||
localhost_entries.into_iter().map(|(_, v)| v).collect(),
|
||||
),
|
||||
}));
|
||||
// needs to be sorted by name
|
||||
new_entries.sort_by(|a, b| a.name().cmp(b.name()));
|
||||
root_dir.entries = new_entries;
|
||||
root_dir.entries = VirtualDirectoryEntries::new(new_entries);
|
||||
|
||||
// it's better to not expose the user's cache directory, so take it out
|
||||
// of there
|
||||
|
@ -960,10 +1016,7 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
let parent_dir = vfs.get_dir_mut(parent).unwrap();
|
||||
let index = parent_dir
|
||||
.entries
|
||||
.iter()
|
||||
.position(|entry| {
|
||||
entry.name() == DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME
|
||||
})
|
||||
.binary_search(DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME)
|
||||
.unwrap();
|
||||
let npm_global_cache_dir_entry = parent_dir.entries.remove(index);
|
||||
|
||||
|
@ -973,11 +1026,7 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
Cow::Borrowed(DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME);
|
||||
for ancestor in parent.ancestors() {
|
||||
let dir = vfs.get_dir_mut(ancestor).unwrap();
|
||||
if let Some(index) = dir
|
||||
.entries
|
||||
.iter()
|
||||
.position(|entry| entry.name() == last_name)
|
||||
{
|
||||
if let Ok(index) = dir.entries.binary_search(&last_name) {
|
||||
dir.entries.remove(index);
|
||||
}
|
||||
last_name = Cow::Owned(dir.name.clone());
|
||||
|
@ -988,7 +1037,7 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
|
||||
// now build the vfs and add the global cache dir entry there
|
||||
let mut built_vfs = vfs.build();
|
||||
built_vfs.root.insert_entry(npm_global_cache_dir_entry);
|
||||
built_vfs.entries.insert(npm_global_cache_dir_entry);
|
||||
built_vfs
|
||||
}
|
||||
InnerCliNpmResolverRef::Byonm(_) => vfs.build(),
|
||||
|
|
|
@ -55,6 +55,7 @@ use node_resolver::errors::ClosestPkgJsonError;
|
|||
use node_resolver::NodeResolutionKind;
|
||||
use node_resolver::ResolutionMode;
|
||||
use serialization::DenoCompileModuleSource;
|
||||
use serialization::SourceMapStore;
|
||||
use std::borrow::Cow;
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
|
@ -122,6 +123,7 @@ struct SharedModuleLoaderState {
|
|||
npm_module_loader: Arc<NpmModuleLoader>,
|
||||
npm_req_resolver: Arc<CliNpmReqResolver>,
|
||||
npm_resolver: Arc<dyn CliNpmResolver>,
|
||||
source_maps: SourceMapStore,
|
||||
vfs: Arc<FileBackedVfs>,
|
||||
workspace_resolver: WorkspaceResolver,
|
||||
}
|
||||
|
@ -396,7 +398,11 @@ impl ModuleLoader for EmbeddedModuleLoader {
|
|||
);
|
||||
}
|
||||
|
||||
match self.shared.modules.read(original_specifier) {
|
||||
match self
|
||||
.shared
|
||||
.modules
|
||||
.read(original_specifier, VfsFileSubDataKind::ModuleGraph)
|
||||
{
|
||||
Ok(Some(module)) => {
|
||||
let media_type = module.media_type;
|
||||
let (module_specifier, module_type, module_source) =
|
||||
|
@ -495,6 +501,45 @@ impl ModuleLoader for EmbeddedModuleLoader {
|
|||
}
|
||||
std::future::ready(()).boxed_local()
|
||||
}
|
||||
|
||||
fn get_source_map(&self, file_name: &str) -> Option<Cow<[u8]>> {
|
||||
if file_name.starts_with("file:///") {
|
||||
let url =
|
||||
deno_path_util::url_from_directory_path(self.shared.vfs.root()).ok()?;
|
||||
let file_url = ModuleSpecifier::parse(file_name).ok()?;
|
||||
let relative_path = url.make_relative(&file_url)?;
|
||||
self.shared.source_maps.get(&relative_path)
|
||||
} else {
|
||||
self.shared.source_maps.get(file_name)
|
||||
}
|
||||
.map(Cow::Borrowed)
|
||||
}
|
||||
|
||||
fn get_source_mapped_source_line(
|
||||
&self,
|
||||
file_name: &str,
|
||||
line_number: usize,
|
||||
) -> Option<String> {
|
||||
let specifier = ModuleSpecifier::parse(file_name).ok()?;
|
||||
let data = self
|
||||
.shared
|
||||
.modules
|
||||
.read(&specifier, VfsFileSubDataKind::Raw)
|
||||
.ok()??;
|
||||
|
||||
let source = String::from_utf8_lossy(&data.data);
|
||||
// Do NOT use .lines(): it skips the terminating empty line.
|
||||
// (due to internally using_terminator() instead of .split())
|
||||
let lines: Vec<&str> = source.split('\n').collect();
|
||||
if line_number >= lines.len() {
|
||||
Some(format!(
|
||||
"{} Couldn't format source line: Line {} is out of bounds (source may have changed at runtime)",
|
||||
crate::colors::yellow("Warning"), line_number + 1,
|
||||
))
|
||||
} else {
|
||||
Some(lines[line_number].to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeRequireLoader for EmbeddedModuleLoader {
|
||||
|
@ -590,6 +635,7 @@ pub async fn run(data: StandaloneData) -> Result<i32, AnyError> {
|
|||
modules,
|
||||
npm_snapshot,
|
||||
root_path,
|
||||
source_maps,
|
||||
vfs,
|
||||
} = data;
|
||||
let deno_dir_provider = Arc::new(DenoDirProvider::new(None));
|
||||
|
@ -841,6 +887,7 @@ pub async fn run(data: StandaloneData) -> Result<i32, AnyError> {
|
|||
)),
|
||||
npm_resolver: npm_resolver.clone(),
|
||||
npm_req_resolver,
|
||||
source_maps,
|
||||
vfs,
|
||||
workspace_resolver,
|
||||
}),
|
||||
|
|
|
@ -6,6 +6,8 @@ use std::collections::BTreeMap;
|
|||
use std::collections::HashMap;
|
||||
use std::io::Write;
|
||||
|
||||
use capacity_builder::BytesAppendable;
|
||||
use deno_ast::swc::common::source_map;
|
||||
use deno_ast::MediaType;
|
||||
use deno_core::anyhow::bail;
|
||||
use deno_core::anyhow::Context;
|
||||
|
@ -20,12 +22,15 @@ use deno_npm::resolution::SerializedNpmResolutionSnapshotPackage;
|
|||
use deno_npm::resolution::ValidSerializedNpmResolutionSnapshot;
|
||||
use deno_npm::NpmPackageId;
|
||||
use deno_semver::package::PackageReq;
|
||||
use deno_semver::StackString;
|
||||
use indexmap::IndexMap;
|
||||
|
||||
use crate::standalone::virtual_fs::VirtualDirectory;
|
||||
|
||||
use super::binary::Metadata;
|
||||
use super::virtual_fs::BuiltVfs;
|
||||
use super::virtual_fs::VfsBuilder;
|
||||
use super::virtual_fs::VirtualDirectoryEntries;
|
||||
|
||||
const MAGIC_BYTES: &[u8; 8] = b"d3n0l4nd";
|
||||
|
||||
|
@ -33,21 +38,22 @@ const MAGIC_BYTES: &[u8; 8] = b"d3n0l4nd";
|
|||
/// * d3n0l4nd
|
||||
/// * <metadata_len><metadata>
|
||||
/// * <npm_snapshot_len><npm_snapshot>
|
||||
/// * <remote_modules_len><remote_modules>
|
||||
/// * <remote_modules>
|
||||
/// * <vfs_headers_len><vfs_headers>
|
||||
/// * <vfs_file_data_len><vfs_file_data>
|
||||
/// * <source_map_data>
|
||||
/// * d3n0l4nd
|
||||
pub fn serialize_binary_data_section(
|
||||
metadata: &Metadata,
|
||||
npm_snapshot: Option<SerializedNpmResolutionSnapshot>,
|
||||
remote_modules: &RemoteModulesStoreBuilder,
|
||||
source_map_store: &SourceMapStore,
|
||||
vfs: &BuiltVfs,
|
||||
) -> Result<Vec<u8>, AnyError> {
|
||||
let metadata = serde_json::to_string(metadata)?;
|
||||
let npm_snapshot =
|
||||
npm_snapshot.map(serialize_npm_snapshot).unwrap_or_default();
|
||||
let remote_modules_len = Cell::new(0_u64);
|
||||
let serialized_vfs = serde_json::to_string(&vfs.root)?;
|
||||
let serialized_vfs = serde_json::to_string(&vfs.entries)?;
|
||||
|
||||
let bytes = capacity_builder::BytesBuilder::build(|builder| {
|
||||
builder.append(MAGIC_BYTES);
|
||||
|
@ -63,10 +69,7 @@ pub fn serialize_binary_data_section(
|
|||
}
|
||||
// 3. Remote modules
|
||||
{
|
||||
builder.append_le(remote_modules_len.get()); // this will be properly initialized on the second pass
|
||||
let start_index = builder.len();
|
||||
remote_modules.write(builder);
|
||||
remote_modules_len.set((builder.len() - start_index) as u64);
|
||||
}
|
||||
// 4. VFS
|
||||
{
|
||||
|
@ -78,6 +81,16 @@ pub fn serialize_binary_data_section(
|
|||
builder.append(file);
|
||||
}
|
||||
}
|
||||
// 5. Source maps
|
||||
{
|
||||
builder.append_le(source_map_store.data.len() as u32);
|
||||
for (specifier, source_map) in &source_map_store.data {
|
||||
builder.append_le(specifier.len() as u32);
|
||||
builder.append(specifier);
|
||||
builder.append_le(source_map.len() as u32);
|
||||
builder.append(source_map.as_ref());
|
||||
}
|
||||
}
|
||||
|
||||
// write the magic bytes at the end so we can use it
|
||||
// to make sure we've deserialized correctly
|
||||
|
@ -91,19 +104,14 @@ pub struct DeserializedDataSection {
|
|||
pub metadata: Metadata,
|
||||
pub npm_snapshot: Option<ValidSerializedNpmResolutionSnapshot>,
|
||||
pub remote_modules: RemoteModulesStore,
|
||||
pub vfs_dir: VirtualDirectory,
|
||||
pub source_maps: SourceMapStore,
|
||||
pub vfs_root_entries: VirtualDirectoryEntries,
|
||||
pub vfs_files_data: &'static [u8],
|
||||
}
|
||||
|
||||
pub fn deserialize_binary_data_section(
|
||||
data: &'static [u8],
|
||||
) -> Result<Option<DeserializedDataSection>, AnyError> {
|
||||
fn read_bytes_with_len(input: &[u8]) -> Result<(&[u8], &[u8]), AnyError> {
|
||||
let (input, len) = read_u64(input)?;
|
||||
let (input, data) = read_bytes(input, len as usize)?;
|
||||
Ok((input, data))
|
||||
}
|
||||
|
||||
fn read_magic_bytes(input: &[u8]) -> Result<(&[u8], bool), AnyError> {
|
||||
if input.len() < MAGIC_BYTES.len() {
|
||||
bail!("Unexpected end of data. Could not find magic bytes.");
|
||||
|
@ -115,34 +123,51 @@ pub fn deserialize_binary_data_section(
|
|||
Ok((input, true))
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn read_source_map_entry(
|
||||
input: &[u8],
|
||||
) -> Result<(&[u8], (Cow<str>, &[u8])), AnyError> {
|
||||
let (input, specifier) = read_string_lossy(input)?;
|
||||
let (input, source_map) = read_bytes_with_u32_len(input)?;
|
||||
Ok((input, (specifier, source_map)))
|
||||
}
|
||||
|
||||
let (input, found) = read_magic_bytes(data)?;
|
||||
if !found {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// 1. Metadata
|
||||
let (input, data) = read_bytes_with_len(input).context("reading metadata")?;
|
||||
let (input, data) =
|
||||
read_bytes_with_u64_len(input).context("reading metadata")?;
|
||||
let metadata: Metadata =
|
||||
serde_json::from_slice(data).context("deserializing metadata")?;
|
||||
// 2. Npm snapshot
|
||||
let (input, data) =
|
||||
read_bytes_with_len(input).context("reading npm snapshot")?;
|
||||
read_bytes_with_u64_len(input).context("reading npm snapshot")?;
|
||||
let npm_snapshot = if data.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(deserialize_npm_snapshot(data).context("deserializing npm snapshot")?)
|
||||
};
|
||||
// 3. Remote modules
|
||||
let (input, data) =
|
||||
read_bytes_with_len(input).context("reading remote modules data")?;
|
||||
let remote_modules =
|
||||
RemoteModulesStore::build(data).context("deserializing remote modules")?;
|
||||
let (input, remote_modules) =
|
||||
RemoteModulesStore::build(input).context("deserializing remote modules")?;
|
||||
// 4. VFS
|
||||
let (input, data) = read_bytes_with_len(input).context("vfs")?;
|
||||
let vfs_dir: VirtualDirectory =
|
||||
let (input, data) = read_bytes_with_u64_len(input).context("vfs")?;
|
||||
let vfs_root_entries: VirtualDirectoryEntries =
|
||||
serde_json::from_slice(data).context("deserializing vfs data")?;
|
||||
let (input, vfs_files_data) =
|
||||
read_bytes_with_len(input).context("reading vfs files data")?;
|
||||
read_bytes_with_u64_len(input).context("reading vfs files data")?;
|
||||
// 5. Source maps
|
||||
let (mut input, source_map_data_len) = read_u32_as_usize(input)?;
|
||||
let mut source_maps = SourceMapStore::with_capacity(source_map_data_len);
|
||||
for _ in 0..source_map_data_len {
|
||||
let (current_input, (specifier, source_map)) =
|
||||
read_source_map_entry(input)?;
|
||||
input = current_input;
|
||||
source_maps.add(specifier, Cow::Borrowed(source_map));
|
||||
}
|
||||
|
||||
// finally ensure we read the magic bytes at the end
|
||||
let (_input, found) = read_magic_bytes(input)?;
|
||||
|
@ -154,7 +179,8 @@ pub fn deserialize_binary_data_section(
|
|||
metadata,
|
||||
npm_snapshot,
|
||||
remote_modules,
|
||||
vfs_dir,
|
||||
source_maps,
|
||||
vfs_root_entries,
|
||||
vfs_files_data,
|
||||
}))
|
||||
}
|
||||
|
@ -162,19 +188,31 @@ pub fn deserialize_binary_data_section(
|
|||
#[derive(Default)]
|
||||
pub struct RemoteModulesStoreBuilder {
|
||||
specifiers: Vec<(String, u64)>,
|
||||
data: Vec<(MediaType, Vec<u8>)>,
|
||||
data: Vec<(MediaType, Vec<u8>, Option<Vec<u8>>)>,
|
||||
data_byte_len: u64,
|
||||
redirects: Vec<(String, String)>,
|
||||
redirects_len: u64,
|
||||
}
|
||||
|
||||
impl RemoteModulesStoreBuilder {
|
||||
pub fn add(&mut self, specifier: &Url, media_type: MediaType, data: Vec<u8>) {
|
||||
pub fn add(
|
||||
&mut self,
|
||||
specifier: &Url,
|
||||
media_type: MediaType,
|
||||
data: Vec<u8>,
|
||||
maybe_transpiled: Option<Vec<u8>>,
|
||||
) {
|
||||
log::debug!("Adding '{}' ({})", specifier, media_type);
|
||||
let specifier = specifier.to_string();
|
||||
self.specifiers.push((specifier, self.data_byte_len));
|
||||
self.data_byte_len += 1 + 8 + data.len() as u64; // media type (1 byte), data length (8 bytes), data
|
||||
self.data.push((media_type, data));
|
||||
let maybe_transpiled_len = match &maybe_transpiled {
|
||||
// data length (4 bytes), data
|
||||
Some(data) => 4 + data.len() as u64,
|
||||
None => 0,
|
||||
};
|
||||
// media type (1 byte), data length (4 bytes), data, has transpiled (1 byte), transpiled length
|
||||
self.data_byte_len += 1 + 4 + data.len() as u64 + 1 + maybe_transpiled_len;
|
||||
self.data.push((media_type, data, maybe_transpiled));
|
||||
}
|
||||
|
||||
pub fn add_redirects(&mut self, redirects: &BTreeMap<Url, Url>) {
|
||||
|
@ -188,12 +226,15 @@ impl RemoteModulesStoreBuilder {
|
|||
}
|
||||
}
|
||||
|
||||
fn write<'a>(&'a self, builder: &mut capacity_builder::BytesBuilder<'a>) {
|
||||
fn write<'a, TBytes: capacity_builder::BytesType>(
|
||||
&'a self,
|
||||
builder: &mut capacity_builder::BytesBuilder<'a, TBytes>,
|
||||
) {
|
||||
builder.append_le(self.specifiers.len() as u32);
|
||||
builder.append_le(self.redirects.len() as u32);
|
||||
for (specifier, offset) in &self.specifiers {
|
||||
builder.append_le(specifier.len() as u32);
|
||||
builder.append(specifier.as_bytes());
|
||||
builder.append(specifier);
|
||||
builder.append_le(*offset);
|
||||
}
|
||||
for (from, to) in &self.redirects {
|
||||
|
@ -202,10 +243,32 @@ impl RemoteModulesStoreBuilder {
|
|||
builder.append_le(to.len() as u32);
|
||||
builder.append(to);
|
||||
}
|
||||
for (media_type, data) in &self.data {
|
||||
builder.append_le(
|
||||
self
|
||||
.data
|
||||
.iter()
|
||||
.map(|(_, data, maybe_transpiled)| {
|
||||
1 + 4
|
||||
+ (data.len() as u64)
|
||||
+ 1
|
||||
+ match maybe_transpiled {
|
||||
Some(transpiled) => 4 + (transpiled.len() as u64),
|
||||
None => 0,
|
||||
}
|
||||
})
|
||||
.sum::<u64>(),
|
||||
);
|
||||
for (media_type, data, maybe_transpiled) in &self.data {
|
||||
builder.append(serialize_media_type(*media_type));
|
||||
builder.append_le(data.len() as u64);
|
||||
builder.append_le(data.len() as u32);
|
||||
builder.append(data);
|
||||
if let Some(transpiled) = maybe_transpiled {
|
||||
builder.append(1);
|
||||
builder.append_le(transpiled.len() as u32);
|
||||
builder.append(transpiled);
|
||||
} else {
|
||||
builder.append(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -234,6 +297,30 @@ impl DenoCompileModuleSource {
|
|||
}
|
||||
}
|
||||
|
||||
pub struct SourceMapStore {
|
||||
data: IndexMap<Cow<'static, str>, Cow<'static, [u8]>>,
|
||||
}
|
||||
|
||||
impl SourceMapStore {
|
||||
pub fn with_capacity(capacity: usize) -> Self {
|
||||
Self {
|
||||
data: IndexMap::with_capacity(capacity),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add(
|
||||
&mut self,
|
||||
specifier: Cow<'static, str>,
|
||||
source_map: Cow<'static, [u8]>,
|
||||
) {
|
||||
self.data.insert(specifier, source_map);
|
||||
}
|
||||
|
||||
pub fn get(&self, specifier: &str) -> Option<&[u8]> {
|
||||
self.data.get(specifier).map(|v| v.as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DenoCompileModuleData<'a> {
|
||||
pub specifier: &'a Url,
|
||||
pub media_type: MediaType,
|
||||
|
@ -280,6 +367,13 @@ impl<'a> DenoCompileModuleData<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
pub struct RemoteModuleEntry<'a> {
|
||||
pub specifier: &'a Url,
|
||||
pub media_type: MediaType,
|
||||
pub data: Cow<'static, [u8]>,
|
||||
pub transpiled_data: Option<Cow<'static, [u8]>>,
|
||||
}
|
||||
|
||||
enum RemoteModulesStoreSpecifierValue {
|
||||
Data(usize),
|
||||
Redirect(Url),
|
||||
|
@ -291,7 +385,7 @@ pub struct RemoteModulesStore {
|
|||
}
|
||||
|
||||
impl RemoteModulesStore {
|
||||
fn build(data: &'static [u8]) -> Result<Self, AnyError> {
|
||||
fn build(input: &'static [u8]) -> Result<(&'static [u8], Self), AnyError> {
|
||||
fn read_specifier(input: &[u8]) -> Result<(&[u8], (Url, u64)), AnyError> {
|
||||
let (input, specifier) = read_string_lossy(input)?;
|
||||
let specifier = Url::parse(&specifier)?;
|
||||
|
@ -334,12 +428,16 @@ impl RemoteModulesStore {
|
|||
Ok((input, specifiers))
|
||||
}
|
||||
|
||||
let (files_data, specifiers) = read_headers(data)?;
|
||||
let (input, specifiers) = read_headers(input)?;
|
||||
let (input, files_data) = read_bytes_with_u64_len(input)?;
|
||||
|
||||
Ok(Self {
|
||||
Ok((
|
||||
input,
|
||||
Self {
|
||||
specifiers,
|
||||
files_data,
|
||||
})
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
pub fn resolve_specifier<'a>(
|
||||
|
@ -370,7 +468,7 @@ impl RemoteModulesStore {
|
|||
pub fn read<'a>(
|
||||
&'a self,
|
||||
original_specifier: &'a Url,
|
||||
) -> Result<Option<DenoCompileModuleData<'a>>, AnyError> {
|
||||
) -> Result<Option<RemoteModuleEntry<'a>>, AnyError> {
|
||||
let mut count = 0;
|
||||
let mut specifier = original_specifier;
|
||||
loop {
|
||||
|
@ -386,12 +484,25 @@ impl RemoteModulesStore {
|
|||
let input = &self.files_data[*offset..];
|
||||
let (input, media_type_byte) = read_bytes(input, 1)?;
|
||||
let media_type = deserialize_media_type(media_type_byte[0])?;
|
||||
let (input, len) = read_u64(input)?;
|
||||
let (_input, data) = read_bytes(input, len as usize)?;
|
||||
return Ok(Some(DenoCompileModuleData {
|
||||
let (input, data) = read_bytes_with_u32_len(input)?;
|
||||
check_has_len(input, 1)?;
|
||||
let (input, has_transpiled) = (&input[1..], input[0]);
|
||||
let (_, transpiled_data) = match has_transpiled {
|
||||
0 => (input, None),
|
||||
1 => {
|
||||
let (input, data) = read_bytes_with_u32_len(input)?;
|
||||
(input, Some(data))
|
||||
}
|
||||
value => bail!(
|
||||
"Invalid transpiled data flag: {}. Compiled data is corrupt.",
|
||||
value
|
||||
),
|
||||
};
|
||||
return Ok(Some(RemoteModuleEntry {
|
||||
specifier,
|
||||
media_type,
|
||||
data: Cow::Borrowed(data),
|
||||
transpiled_data: transpiled_data.map(Cow::Borrowed),
|
||||
}));
|
||||
}
|
||||
None => {
|
||||
|
@ -475,12 +586,13 @@ fn deserialize_npm_snapshot(
|
|||
#[allow(clippy::needless_lifetimes)] // clippy bug
|
||||
fn parse_package_dep<'a>(
|
||||
id_to_npm_id: &'a impl Fn(usize) -> Result<NpmPackageId, AnyError>,
|
||||
) -> impl Fn(&[u8]) -> Result<(&[u8], (String, NpmPackageId)), AnyError> + 'a
|
||||
) -> impl Fn(&[u8]) -> Result<(&[u8], (StackString, NpmPackageId)), AnyError> + 'a
|
||||
{
|
||||
|input| {
|
||||
let (input, req) = read_string_lossy(input)?;
|
||||
let (input, id) = read_u32_as_usize(input)?;
|
||||
Ok((input, (req.into_owned(), id_to_npm_id(id)?)))
|
||||
let req = StackString::from_cow(req);
|
||||
Ok((input, (req, id_to_npm_id(id)?)))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -630,17 +742,34 @@ fn parse_vec_n_times_with_index<TResult>(
|
|||
Ok((input, results))
|
||||
}
|
||||
|
||||
fn read_bytes_with_u64_len(input: &[u8]) -> Result<(&[u8], &[u8]), AnyError> {
|
||||
let (input, len) = read_u64(input)?;
|
||||
let (input, data) = read_bytes(input, len as usize)?;
|
||||
Ok((input, data))
|
||||
}
|
||||
|
||||
fn read_bytes_with_u32_len(input: &[u8]) -> Result<(&[u8], &[u8]), AnyError> {
|
||||
let (input, len) = read_u32_as_usize(input)?;
|
||||
let (input, data) = read_bytes(input, len)?;
|
||||
Ok((input, data))
|
||||
}
|
||||
|
||||
fn read_bytes(input: &[u8], len: usize) -> Result<(&[u8], &[u8]), AnyError> {
|
||||
if input.len() < len {
|
||||
bail!("Unexpected end of data.",);
|
||||
}
|
||||
check_has_len(input, len)?;
|
||||
let (len_bytes, input) = input.split_at(len);
|
||||
Ok((input, len_bytes))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn check_has_len(input: &[u8], len: usize) -> Result<(), AnyError> {
|
||||
if input.len() < len {
|
||||
bail!("Unexpected end of data.");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read_string_lossy(input: &[u8]) -> Result<(&[u8], Cow<str>), AnyError> {
|
||||
let (input, str_len) = read_u32_as_usize(input)?;
|
||||
let (input, data_bytes) = read_bytes(input, str_len)?;
|
||||
let (input, data_bytes) = read_bytes_with_u32_len(input)?;
|
||||
Ok((input, String::from_utf8_lossy(data_bytes)))
|
||||
}
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ impl WindowsSystemRootablePath {
|
|||
#[derive(Debug)]
|
||||
pub struct BuiltVfs {
|
||||
pub root_path: WindowsSystemRootablePath,
|
||||
pub root: VirtualDirectory,
|
||||
pub entries: VirtualDirectoryEntries,
|
||||
pub files: Vec<Vec<u8>>,
|
||||
}
|
||||
|
||||
|
@ -95,7 +95,7 @@ impl VfsBuilder {
|
|||
Self {
|
||||
executable_root: VirtualDirectory {
|
||||
name: "/".to_string(),
|
||||
entries: Vec::new(),
|
||||
entries: Default::default(),
|
||||
},
|
||||
files: Vec::new(),
|
||||
current_offset: 0,
|
||||
|
@ -208,23 +208,20 @@ impl VfsBuilder {
|
|||
continue;
|
||||
}
|
||||
let name = component.as_os_str().to_string_lossy();
|
||||
let index = match current_dir
|
||||
.entries
|
||||
.binary_search_by(|e| e.name().cmp(&name))
|
||||
{
|
||||
let index = match current_dir.entries.binary_search(&name) {
|
||||
Ok(index) => index,
|
||||
Err(insert_index) => {
|
||||
current_dir.entries.insert(
|
||||
current_dir.entries.0.insert(
|
||||
insert_index,
|
||||
VfsEntry::Dir(VirtualDirectory {
|
||||
name: name.to_string(),
|
||||
entries: Vec::new(),
|
||||
entries: Default::default(),
|
||||
}),
|
||||
);
|
||||
insert_index
|
||||
}
|
||||
};
|
||||
match &mut current_dir.entries[index] {
|
||||
match &mut current_dir.entries.0[index] {
|
||||
VfsEntry::Dir(dir) => {
|
||||
current_dir = dir;
|
||||
}
|
||||
|
@ -248,14 +245,8 @@ impl VfsBuilder {
|
|||
continue;
|
||||
}
|
||||
let name = component.as_os_str().to_string_lossy();
|
||||
let index = match current_dir
|
||||
.entries
|
||||
.binary_search_by(|e| e.name().cmp(&name))
|
||||
{
|
||||
Ok(index) => index,
|
||||
Err(_) => return None,
|
||||
};
|
||||
match &mut current_dir.entries[index] {
|
||||
let entry = current_dir.entries.get_mut_by_name(&name)?;
|
||||
match entry {
|
||||
VfsEntry::Dir(dir) => {
|
||||
current_dir = dir;
|
||||
}
|
||||
|
@ -320,9 +311,9 @@ impl VfsBuilder {
|
|||
offset,
|
||||
len: data.len() as u64,
|
||||
};
|
||||
match dir.entries.binary_search_by(|e| e.name().cmp(&name)) {
|
||||
match dir.entries.binary_search(&name) {
|
||||
Ok(index) => {
|
||||
let entry = &mut dir.entries[index];
|
||||
let entry = &mut dir.entries.0[index];
|
||||
match entry {
|
||||
VfsEntry::File(virtual_file) => match sub_data_kind {
|
||||
VfsFileSubDataKind::Raw => {
|
||||
|
@ -336,7 +327,7 @@ impl VfsBuilder {
|
|||
}
|
||||
}
|
||||
Err(insert_index) => {
|
||||
dir.entries.insert(
|
||||
dir.entries.0.insert(
|
||||
insert_index,
|
||||
VfsEntry::File(VirtualFile {
|
||||
name: name.to_string(),
|
||||
|
@ -384,10 +375,10 @@ impl VfsBuilder {
|
|||
let target = normalize_path(path.parent().unwrap().join(&target));
|
||||
let dir = self.add_dir_raw(path.parent().unwrap());
|
||||
let name = path.file_name().unwrap().to_string_lossy();
|
||||
match dir.entries.binary_search_by(|e| e.name().cmp(&name)) {
|
||||
match dir.entries.binary_search(&name) {
|
||||
Ok(_) => {} // previously inserted
|
||||
Err(insert_index) => {
|
||||
dir.entries.insert(
|
||||
dir.entries.0.insert(
|
||||
insert_index,
|
||||
VfsEntry::Symlink(VirtualSymlink {
|
||||
name: name.to_string(),
|
||||
|
@ -426,7 +417,7 @@ impl VfsBuilder {
|
|||
dir: &mut VirtualDirectory,
|
||||
parts: &[String],
|
||||
) {
|
||||
for entry in &mut dir.entries {
|
||||
for entry in &mut dir.entries.0 {
|
||||
match entry {
|
||||
VfsEntry::Dir(dir) => {
|
||||
strip_prefix_from_symlinks(dir, parts);
|
||||
|
@ -454,13 +445,13 @@ impl VfsBuilder {
|
|||
if self.min_root_dir.as_ref() == Some(¤t_path) {
|
||||
break;
|
||||
}
|
||||
match ¤t_dir.entries[0] {
|
||||
match ¤t_dir.entries.0[0] {
|
||||
VfsEntry::Dir(dir) => {
|
||||
if dir.name == DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME {
|
||||
// special directory we want to maintain
|
||||
break;
|
||||
}
|
||||
match current_dir.entries.remove(0) {
|
||||
match current_dir.entries.0.remove(0) {
|
||||
VfsEntry::Dir(dir) => {
|
||||
current_path =
|
||||
WindowsSystemRootablePath::Path(current_path.join(&dir.name));
|
||||
|
@ -480,7 +471,7 @@ impl VfsBuilder {
|
|||
}
|
||||
BuiltVfs {
|
||||
root_path: current_path,
|
||||
root: current_dir,
|
||||
entries: current_dir.entries,
|
||||
files: self.files,
|
||||
}
|
||||
}
|
||||
|
@ -506,7 +497,7 @@ pub fn output_vfs(vfs: &BuiltVfs, executable_name: &str) {
|
|||
return; // no need to compute if won't output
|
||||
}
|
||||
|
||||
if vfs.root.entries.is_empty() {
|
||||
if vfs.entries.is_empty() {
|
||||
return; // nothing to output
|
||||
}
|
||||
|
||||
|
@ -696,7 +687,7 @@ fn vfs_as_display_tree(
|
|||
|
||||
fn dir_size(dir: &VirtualDirectory, seen_offsets: &mut HashSet<u64>) -> Size {
|
||||
let mut size = Size::default();
|
||||
for entry in &dir.entries {
|
||||
for entry in dir.entries.iter() {
|
||||
match entry {
|
||||
VfsEntry::Dir(virtual_directory) => {
|
||||
size = size + dir_size(virtual_directory, seen_offsets);
|
||||
|
@ -760,15 +751,10 @@ fn vfs_as_display_tree(
|
|||
|
||||
fn include_all_entries<'a>(
|
||||
dir_path: &WindowsSystemRootablePath,
|
||||
vfs_dir: &'a VirtualDirectory,
|
||||
entries: &'a VirtualDirectoryEntries,
|
||||
seen_offsets: &mut HashSet<u64>,
|
||||
) -> Vec<DirEntryOutput<'a>> {
|
||||
if vfs_dir.name == DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME {
|
||||
return show_global_node_modules_dir(vfs_dir, seen_offsets);
|
||||
}
|
||||
|
||||
vfs_dir
|
||||
.entries
|
||||
entries
|
||||
.iter()
|
||||
.map(|entry| DirEntryOutput {
|
||||
name: Cow::Borrowed(entry.name()),
|
||||
|
@ -826,10 +812,12 @@ fn vfs_as_display_tree(
|
|||
} else {
|
||||
EntryOutput::Subset(children)
|
||||
}
|
||||
} else if vfs_dir.name == DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME {
|
||||
EntryOutput::Subset(show_global_node_modules_dir(vfs_dir, seen_offsets))
|
||||
} else {
|
||||
EntryOutput::Subset(include_all_entries(
|
||||
&WindowsSystemRootablePath::Path(dir),
|
||||
vfs_dir,
|
||||
&vfs_dir.entries,
|
||||
seen_offsets,
|
||||
))
|
||||
}
|
||||
|
@ -839,7 +827,7 @@ fn vfs_as_display_tree(
|
|||
// user might not have context about what's being shown
|
||||
let mut seen_offsets = HashSet::with_capacity(vfs.files.len());
|
||||
let mut child_entries =
|
||||
include_all_entries(&vfs.root_path, &vfs.root, &mut seen_offsets);
|
||||
include_all_entries(&vfs.root_path, &vfs.entries, &mut seen_offsets);
|
||||
for child_entry in &mut child_entries {
|
||||
child_entry.collapse_leaf_nodes();
|
||||
}
|
||||
|
@ -961,27 +949,70 @@ impl VfsEntry {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct VirtualDirectoryEntries(Vec<VfsEntry>);
|
||||
|
||||
impl VirtualDirectoryEntries {
|
||||
pub fn new(mut entries: Vec<VfsEntry>) -> Self {
|
||||
// needs to be sorted by name
|
||||
entries.sort_by(|a, b| a.name().cmp(b.name()));
|
||||
Self(entries)
|
||||
}
|
||||
|
||||
pub fn take_inner(&mut self) -> Vec<VfsEntry> {
|
||||
std::mem::take(&mut self.0)
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
pub fn get_by_name(&self, name: &str) -> Option<&VfsEntry> {
|
||||
self.binary_search(name).ok().map(|index| &self.0[index])
|
||||
}
|
||||
|
||||
pub fn get_mut_by_name(&mut self, name: &str) -> Option<&mut VfsEntry> {
|
||||
self
|
||||
.binary_search(name)
|
||||
.ok()
|
||||
.map(|index| &mut self.0[index])
|
||||
}
|
||||
|
||||
pub fn binary_search(&self, name: &str) -> Result<usize, usize> {
|
||||
self.0.binary_search_by(|e| e.name().cmp(name))
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, entry: VfsEntry) {
|
||||
match self.binary_search(entry.name()) {
|
||||
Ok(index) => {
|
||||
self.0[index] = entry;
|
||||
}
|
||||
Err(insert_index) => {
|
||||
self.0.insert(insert_index, entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn remove(&mut self, index: usize) -> VfsEntry {
|
||||
self.0.remove(index)
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> std::slice::Iter<'_, VfsEntry> {
|
||||
self.0.iter()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VirtualDirectory {
|
||||
#[serde(rename = "n")]
|
||||
pub name: String,
|
||||
// should be sorted by name
|
||||
#[serde(rename = "e")]
|
||||
pub entries: Vec<VfsEntry>,
|
||||
}
|
||||
|
||||
impl VirtualDirectory {
|
||||
pub fn insert_entry(&mut self, entry: VfsEntry) {
|
||||
let name = entry.name();
|
||||
match self.entries.binary_search_by(|e| e.name().cmp(name)) {
|
||||
Ok(index) => {
|
||||
self.entries[index] = entry;
|
||||
}
|
||||
Err(insert_index) => {
|
||||
self.entries.insert(insert_index, entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
pub entries: VirtualDirectoryEntries,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||
|
@ -1136,20 +1167,13 @@ impl VfsRoot {
|
|||
}
|
||||
};
|
||||
let component = component.to_string_lossy();
|
||||
match current_dir
|
||||
current_entry = current_dir
|
||||
.entries
|
||||
.binary_search_by(|e| e.name().cmp(&component))
|
||||
{
|
||||
Ok(index) => {
|
||||
current_entry = current_dir.entries[index].as_ref();
|
||||
}
|
||||
Err(_) => {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::NotFound,
|
||||
"path not found",
|
||||
));
|
||||
}
|
||||
}
|
||||
.get_by_name(&component)
|
||||
.ok_or_else(|| {
|
||||
std::io::Error::new(std::io::ErrorKind::NotFound, "path not found")
|
||||
})?
|
||||
.as_ref();
|
||||
}
|
||||
|
||||
Ok((final_path, current_entry))
|
||||
|
@ -1706,7 +1730,10 @@ mod test {
|
|||
FileBackedVfs::new(
|
||||
Cow::Owned(data),
|
||||
VfsRoot {
|
||||
dir: vfs.root,
|
||||
dir: VirtualDirectory {
|
||||
name: "".to_string(),
|
||||
entries: vfs.entries,
|
||||
},
|
||||
root_path: dest_path.to_path_buf(),
|
||||
start_file_offset: 0,
|
||||
},
|
||||
|
|
|
@ -198,7 +198,7 @@ pub struct CoverageReport {
|
|||
fn generate_coverage_report(
|
||||
script_coverage: &cdp::ScriptCoverage,
|
||||
script_source: String,
|
||||
maybe_source_map: &Option<Vec<u8>>,
|
||||
maybe_source_map: Option<&[u8]>,
|
||||
output: &Option<PathBuf>,
|
||||
) -> CoverageReport {
|
||||
let maybe_source_map = maybe_source_map
|
||||
|
@ -625,7 +625,7 @@ pub fn cover_files(
|
|||
let coverage_report = generate_coverage_report(
|
||||
&script_coverage,
|
||||
runtime_code.as_str().to_owned(),
|
||||
&source_map,
|
||||
source_map.as_deref(),
|
||||
&out_mode,
|
||||
);
|
||||
|
||||
|
|
|
@ -343,14 +343,14 @@ impl deno_doc::html::HrefResolver for DocResolver {
|
|||
let name = &res.req().name;
|
||||
Some((
|
||||
format!("https://www.npmjs.com/package/{name}"),
|
||||
name.to_owned(),
|
||||
name.to_string(),
|
||||
))
|
||||
}
|
||||
"jsr" => {
|
||||
let res =
|
||||
deno_semver::jsr::JsrPackageReqReference::from_str(module).ok()?;
|
||||
let name = &res.req().name;
|
||||
Some((format!("https://jsr.io/{name}"), name.to_owned()))
|
||||
Some((format!("https://jsr.io/{name}"), name.to_string()))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
|
|
|
@ -278,8 +278,10 @@ fn add_npm_packages_to_json(
|
|||
});
|
||||
if let Some(pkg) = maybe_package {
|
||||
if let Some(module) = module.as_object_mut() {
|
||||
module
|
||||
.insert("npmPackage".to_string(), pkg.id.as_serialized().into());
|
||||
module.insert(
|
||||
"npmPackage".to_string(),
|
||||
pkg.id.as_serialized().into_string().into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -296,7 +298,7 @@ fn add_npm_packages_to_json(
|
|||
{
|
||||
dep.insert(
|
||||
"npmPackage".to_string(),
|
||||
pkg.id.as_serialized().into(),
|
||||
pkg.id.as_serialized().into_string().into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -324,19 +326,19 @@ fn add_npm_packages_to_json(
|
|||
let mut json_packages = serde_json::Map::with_capacity(sorted_packages.len());
|
||||
for pkg in sorted_packages {
|
||||
let mut kv = serde_json::Map::new();
|
||||
kv.insert("name".to_string(), pkg.id.nv.name.clone().into());
|
||||
kv.insert("name".to_string(), pkg.id.nv.name.to_string().into());
|
||||
kv.insert("version".to_string(), pkg.id.nv.version.to_string().into());
|
||||
let mut deps = pkg.dependencies.values().collect::<Vec<_>>();
|
||||
deps.sort();
|
||||
let deps = deps
|
||||
.into_iter()
|
||||
.map(|id| serde_json::Value::String(id.as_serialized()))
|
||||
.map(|id| serde_json::Value::String(id.as_serialized().into_string()))
|
||||
.collect::<Vec<_>>();
|
||||
kv.insert("dependencies".to_string(), deps.into());
|
||||
let registry_url = npmrc.get_registry_url(&pkg.id.nv.name);
|
||||
kv.insert("registryUrl".to_string(), registry_url.to_string().into());
|
||||
|
||||
json_packages.insert(pkg.id.as_serialized(), kv.into());
|
||||
json_packages.insert(pkg.id.as_serialized().into_string(), kv.into());
|
||||
}
|
||||
|
||||
json.insert("npmPackages".to_string(), json_packages.into());
|
||||
|
@ -549,7 +551,7 @@ impl<'a> GraphDisplayContext<'a> {
|
|||
None => Specifier(module.specifier().clone()),
|
||||
};
|
||||
let was_seen = !self.seen.insert(match &package_or_specifier {
|
||||
Package(package) => package.id.as_serialized(),
|
||||
Package(package) => package.id.as_serialized().into_string(),
|
||||
Specifier(specifier) => specifier.to_string(),
|
||||
});
|
||||
let header_text = if was_seen {
|
||||
|
@ -631,7 +633,8 @@ impl<'a> GraphDisplayContext<'a> {
|
|||
));
|
||||
if let Some(package) = self.npm_info.packages.get(dep_id) {
|
||||
if !package.dependencies.is_empty() {
|
||||
let was_seen = !self.seen.insert(package.id.as_serialized());
|
||||
let was_seen =
|
||||
!self.seen.insert(package.id.as_serialized().into_string());
|
||||
if was_seen {
|
||||
child.text = format!("{} {}", child.text, colors::gray("*"));
|
||||
} else {
|
||||
|
|
|
@ -161,11 +161,11 @@ pub async fn infer_name_from_url(
|
|||
let npm_ref = npm_ref.into_inner();
|
||||
if let Some(sub_path) = npm_ref.sub_path {
|
||||
if !sub_path.contains('/') {
|
||||
return Some(sub_path);
|
||||
return Some(sub_path.to_string());
|
||||
}
|
||||
}
|
||||
if !npm_ref.req.name.contains('/') {
|
||||
return Some(npm_ref.req.name);
|
||||
return Some(npm_ref.req.name.into_string());
|
||||
}
|
||||
return None;
|
||||
}
|
||||
|
|
518
cli/tools/lint/ast_buffer/buffer.rs
Normal file
518
cli/tools/lint/ast_buffer/buffer.rs
Normal file
|
@ -0,0 +1,518 @@
|
|||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use std::fmt::Display;
|
||||
|
||||
use deno_ast::swc::common::Span;
|
||||
use deno_ast::swc::common::DUMMY_SP;
|
||||
use indexmap::IndexMap;
|
||||
|
||||
/// Each property has this flag to mark what kind of value it holds-
|
||||
/// Plain objects and arrays are not supported yet, but could be easily
|
||||
/// added if needed.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum PropFlags {
|
||||
Ref,
|
||||
RefArr,
|
||||
String,
|
||||
Bool,
|
||||
Null,
|
||||
Undefined,
|
||||
}
|
||||
|
||||
impl From<PropFlags> for u8 {
|
||||
fn from(m: PropFlags) -> u8 {
|
||||
m as u8
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<u8> for PropFlags {
|
||||
type Error = &'static str;
|
||||
|
||||
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
||||
match value {
|
||||
0 => Ok(PropFlags::Ref),
|
||||
1 => Ok(PropFlags::RefArr),
|
||||
2 => Ok(PropFlags::String),
|
||||
3 => Ok(PropFlags::Bool),
|
||||
4 => Ok(PropFlags::Null),
|
||||
5 => Ok(PropFlags::Undefined),
|
||||
_ => Err("Unknown Prop flag"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const MASK_U32_1: u32 = 0b11111111_00000000_00000000_00000000;
|
||||
const MASK_U32_2: u32 = 0b00000000_11111111_00000000_00000000;
|
||||
const MASK_U32_3: u32 = 0b00000000_00000000_11111111_00000000;
|
||||
const MASK_U32_4: u32 = 0b00000000_00000000_00000000_11111111;
|
||||
|
||||
// TODO: There is probably a native Rust function to do this.
|
||||
pub fn append_u32(result: &mut Vec<u8>, value: u32) {
|
||||
let v1: u8 = ((value & MASK_U32_1) >> 24) as u8;
|
||||
let v2: u8 = ((value & MASK_U32_2) >> 16) as u8;
|
||||
let v3: u8 = ((value & MASK_U32_3) >> 8) as u8;
|
||||
let v4: u8 = (value & MASK_U32_4) as u8;
|
||||
|
||||
result.push(v1);
|
||||
result.push(v2);
|
||||
result.push(v3);
|
||||
result.push(v4);
|
||||
}
|
||||
|
||||
pub fn append_usize(result: &mut Vec<u8>, value: usize) {
|
||||
let raw = u32::try_from(value).unwrap();
|
||||
append_u32(result, raw);
|
||||
}
|
||||
|
||||
pub fn write_usize(result: &mut [u8], value: usize, idx: usize) {
|
||||
let raw = u32::try_from(value).unwrap();
|
||||
|
||||
let v1: u8 = ((raw & MASK_U32_1) >> 24) as u8;
|
||||
let v2: u8 = ((raw & MASK_U32_2) >> 16) as u8;
|
||||
let v3: u8 = ((raw & MASK_U32_3) >> 8) as u8;
|
||||
let v4: u8 = (raw & MASK_U32_4) as u8;
|
||||
|
||||
result[idx] = v1;
|
||||
result[idx + 1] = v2;
|
||||
result[idx + 2] = v3;
|
||||
result[idx + 3] = v4;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct StringTable {
|
||||
id: usize,
|
||||
table: IndexMap<String, usize>,
|
||||
}
|
||||
|
||||
impl StringTable {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
id: 0,
|
||||
table: IndexMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, s: &str) -> usize {
|
||||
if let Some(id) = self.table.get(s) {
|
||||
return *id;
|
||||
}
|
||||
|
||||
let id = self.id;
|
||||
self.id += 1;
|
||||
self.table.insert(s.to_string(), id);
|
||||
id
|
||||
}
|
||||
|
||||
pub fn serialize(&mut self) -> Vec<u8> {
|
||||
let mut result: Vec<u8> = vec![];
|
||||
append_u32(&mut result, self.table.len() as u32);
|
||||
|
||||
// Assume that it's sorted by id
|
||||
for (s, _id) in &self.table {
|
||||
let bytes = s.as_bytes();
|
||||
append_u32(&mut result, bytes.len() as u32);
|
||||
result.append(&mut bytes.to_vec());
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub struct NodeRef(pub usize);
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BoolPos(pub usize);
|
||||
#[derive(Debug)]
|
||||
pub struct FieldPos(pub usize);
|
||||
#[derive(Debug)]
|
||||
pub struct FieldArrPos(pub usize);
|
||||
#[derive(Debug)]
|
||||
pub struct StrPos(pub usize);
|
||||
#[derive(Debug)]
|
||||
pub struct UndefPos(pub usize);
|
||||
#[derive(Debug)]
|
||||
pub struct NullPos(pub usize);
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum NodePos {
|
||||
Bool(BoolPos),
|
||||
#[allow(dead_code)]
|
||||
Field(FieldPos),
|
||||
#[allow(dead_code)]
|
||||
FieldArr(FieldArrPos),
|
||||
Str(StrPos),
|
||||
Undef(UndefPos),
|
||||
#[allow(dead_code)]
|
||||
Null(NullPos),
|
||||
}
|
||||
|
||||
pub trait AstBufSerializer<K, P>
|
||||
where
|
||||
K: Into<u8> + Display,
|
||||
P: Into<u8> + Display,
|
||||
{
|
||||
fn header(
|
||||
&mut self,
|
||||
kind: K,
|
||||
parent: NodeRef,
|
||||
span: &Span,
|
||||
prop_count: usize,
|
||||
) -> NodeRef;
|
||||
fn ref_field(&mut self, prop: P) -> FieldPos;
|
||||
fn ref_vec_field(&mut self, prop: P, len: usize) -> FieldArrPos;
|
||||
fn str_field(&mut self, prop: P) -> StrPos;
|
||||
fn bool_field(&mut self, prop: P) -> BoolPos;
|
||||
fn undefined_field(&mut self, prop: P) -> UndefPos;
|
||||
#[allow(dead_code)]
|
||||
fn null_field(&mut self, prop: P) -> NullPos;
|
||||
|
||||
fn write_ref(&mut self, pos: FieldPos, value: NodeRef);
|
||||
fn write_maybe_ref(&mut self, pos: FieldPos, value: Option<NodeRef>);
|
||||
fn write_refs(&mut self, pos: FieldArrPos, value: Vec<NodeRef>);
|
||||
fn write_str(&mut self, pos: StrPos, value: &str);
|
||||
fn write_bool(&mut self, pos: BoolPos, value: bool);
|
||||
|
||||
fn serialize(&mut self) -> Vec<u8>;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SerializeCtx {
|
||||
buf: Vec<u8>,
|
||||
start_buf: NodeRef,
|
||||
str_table: StringTable,
|
||||
kind_map: Vec<usize>,
|
||||
prop_map: Vec<usize>,
|
||||
}
|
||||
|
||||
/// This is the internal context used to allocate and fill the buffer. The point
|
||||
/// is to be able to write absolute offsets directly in place.
|
||||
///
|
||||
/// The typical workflow is to reserve all necessary space for the currrent
|
||||
/// node with placeholders for the offsets of the child nodes. Once child
|
||||
/// nodes have been traversed, we know their offsets and can replace the
|
||||
/// placeholder values with the actual ones.
|
||||
impl SerializeCtx {
|
||||
pub fn new(kind_len: u8, prop_len: u8) -> Self {
|
||||
let kind_size = kind_len as usize;
|
||||
let prop_size = prop_len as usize;
|
||||
let mut ctx = Self {
|
||||
start_buf: NodeRef(0),
|
||||
buf: vec![],
|
||||
str_table: StringTable::new(),
|
||||
kind_map: vec![0; kind_size + 1],
|
||||
prop_map: vec![0; prop_size + 1],
|
||||
};
|
||||
|
||||
ctx.str_table.insert("");
|
||||
|
||||
// Placeholder node is always 0
|
||||
ctx.append_node(0, NodeRef(0), &DUMMY_SP, 0);
|
||||
ctx.kind_map[0] = 0;
|
||||
ctx.start_buf = NodeRef(ctx.buf.len());
|
||||
|
||||
// Insert default props that are always present
|
||||
let type_str = ctx.str_table.insert("type");
|
||||
let parent_str = ctx.str_table.insert("parent");
|
||||
let range_str = ctx.str_table.insert("range");
|
||||
let length_str = ctx.str_table.insert("length");
|
||||
|
||||
// These values are expected to be in this order on the JS side
|
||||
ctx.prop_map[0] = type_str;
|
||||
ctx.prop_map[1] = parent_str;
|
||||
ctx.prop_map[2] = range_str;
|
||||
ctx.prop_map[3] = length_str;
|
||||
|
||||
ctx
|
||||
}
|
||||
|
||||
/// Allocate a node's header
|
||||
fn field_header<P>(&mut self, prop: P, prop_flags: PropFlags) -> usize
|
||||
where
|
||||
P: Into<u8> + Display + Clone,
|
||||
{
|
||||
let offset = self.buf.len();
|
||||
|
||||
let n: u8 = prop.clone().into();
|
||||
self.buf.push(n);
|
||||
|
||||
if let Some(v) = self.prop_map.get::<usize>(n.into()) {
|
||||
if *v == 0 {
|
||||
let id = self.str_table.insert(&format!("{prop}"));
|
||||
self.prop_map[n as usize] = id;
|
||||
}
|
||||
}
|
||||
|
||||
let flags: u8 = prop_flags.into();
|
||||
self.buf.push(flags);
|
||||
|
||||
offset
|
||||
}
|
||||
|
||||
/// Allocate a property pointing to another node.
|
||||
fn field<P>(&mut self, prop: P, prop_flags: PropFlags) -> usize
|
||||
where
|
||||
P: Into<u8> + Display + Clone,
|
||||
{
|
||||
let offset = self.field_header(prop, prop_flags);
|
||||
|
||||
append_usize(&mut self.buf, 0);
|
||||
|
||||
offset
|
||||
}
|
||||
|
||||
fn append_node(
|
||||
&mut self,
|
||||
kind: u8,
|
||||
parent: NodeRef,
|
||||
span: &Span,
|
||||
prop_count: usize,
|
||||
) -> NodeRef {
|
||||
let offset = self.buf.len();
|
||||
|
||||
// Node type fits in a u8
|
||||
self.buf.push(kind);
|
||||
|
||||
// Offset to the parent node. Will be 0 if none exists
|
||||
append_usize(&mut self.buf, parent.0);
|
||||
|
||||
// Span, the start and end location of this node
|
||||
append_u32(&mut self.buf, span.lo.0);
|
||||
append_u32(&mut self.buf, span.hi.0);
|
||||
|
||||
// No node has more than <10 properties
|
||||
debug_assert!(prop_count < 10);
|
||||
self.buf.push(prop_count as u8);
|
||||
|
||||
NodeRef(offset)
|
||||
}
|
||||
|
||||
/// Allocate the node header. It's always the same for every node.
|
||||
/// <type u8>
|
||||
/// <parent offset u32>
|
||||
/// <span lo u32>
|
||||
/// <span high u32>
|
||||
/// <property count u8> (There is no node with more than 10 properties)
|
||||
pub fn header<N>(
|
||||
&mut self,
|
||||
kind: N,
|
||||
parent: NodeRef,
|
||||
span: &Span,
|
||||
prop_count: usize,
|
||||
) -> NodeRef
|
||||
where
|
||||
N: Into<u8> + Display + Clone,
|
||||
{
|
||||
let n: u8 = kind.clone().into();
|
||||
|
||||
if let Some(v) = self.kind_map.get::<usize>(n.into()) {
|
||||
if *v == 0 {
|
||||
let id = self.str_table.insert(&format!("{kind}"));
|
||||
self.kind_map[n as usize] = id;
|
||||
}
|
||||
}
|
||||
|
||||
self.append_node(n, parent, span, prop_count)
|
||||
}
|
||||
|
||||
/// Allocate a reference property that will hold the offset of
|
||||
/// another node.
|
||||
pub fn ref_field<P>(&mut self, prop: P) -> usize
|
||||
where
|
||||
P: Into<u8> + Display + Clone,
|
||||
{
|
||||
self.field(prop, PropFlags::Ref)
|
||||
}
|
||||
|
||||
/// Allocate a property that is a vec of node offsets pointing to other
|
||||
/// nodes.
|
||||
pub fn ref_vec_field<P>(&mut self, prop: P, len: usize) -> usize
|
||||
where
|
||||
P: Into<u8> + Display + Clone,
|
||||
{
|
||||
let offset = self.field(prop, PropFlags::RefArr);
|
||||
|
||||
for _ in 0..len {
|
||||
append_u32(&mut self.buf, 0);
|
||||
}
|
||||
|
||||
offset
|
||||
}
|
||||
|
||||
// Allocate a property representing a string. Strings are deduplicated
|
||||
// in the message and the property will only contain the string id.
|
||||
pub fn str_field<P>(&mut self, prop: P) -> usize
|
||||
where
|
||||
P: Into<u8> + Display + Clone,
|
||||
{
|
||||
self.field(prop, PropFlags::String)
|
||||
}
|
||||
|
||||
/// Allocate a bool field
|
||||
pub fn bool_field<P>(&mut self, prop: P) -> usize
|
||||
where
|
||||
P: Into<u8> + Display + Clone,
|
||||
{
|
||||
let offset = self.field_header(prop, PropFlags::Bool);
|
||||
self.buf.push(0);
|
||||
offset
|
||||
}
|
||||
|
||||
/// Allocate an undefined field
|
||||
pub fn undefined_field<P>(&mut self, prop: P) -> usize
|
||||
where
|
||||
P: Into<u8> + Display + Clone,
|
||||
{
|
||||
self.field_header(prop, PropFlags::Undefined)
|
||||
}
|
||||
|
||||
/// Allocate an undefined field
|
||||
#[allow(dead_code)]
|
||||
pub fn null_field<P>(&mut self, prop: P) -> usize
|
||||
where
|
||||
P: Into<u8> + Display + Clone,
|
||||
{
|
||||
self.field_header(prop, PropFlags::Null)
|
||||
}
|
||||
|
||||
/// Replace the placeholder of a reference field with the actual offset
|
||||
/// to the node we want to point to.
|
||||
pub fn write_ref(&mut self, field_offset: usize, value: NodeRef) {
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
let value_kind = self.buf[field_offset + 1];
|
||||
if PropFlags::try_from(value_kind).unwrap() != PropFlags::Ref {
|
||||
panic!("Trying to write a ref into a non-ref field")
|
||||
}
|
||||
}
|
||||
|
||||
write_usize(&mut self.buf, value.0, field_offset + 2);
|
||||
}
|
||||
|
||||
/// Helper for writing optional node offsets
|
||||
pub fn write_maybe_ref(
|
||||
&mut self,
|
||||
field_offset: usize,
|
||||
value: Option<NodeRef>,
|
||||
) {
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
let value_kind = self.buf[field_offset + 1];
|
||||
if PropFlags::try_from(value_kind).unwrap() != PropFlags::Ref {
|
||||
panic!("Trying to write a ref into a non-ref field")
|
||||
}
|
||||
}
|
||||
|
||||
let ref_value = if let Some(v) = value { v } else { NodeRef(0) };
|
||||
write_usize(&mut self.buf, ref_value.0, field_offset + 2);
|
||||
}
|
||||
|
||||
/// Write a vec of node offsets into the property. The necessary space
|
||||
/// has been reserved earlier.
|
||||
pub fn write_refs(&mut self, field_offset: usize, value: Vec<NodeRef>) {
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
let value_kind = self.buf[field_offset + 1];
|
||||
if PropFlags::try_from(value_kind).unwrap() != PropFlags::RefArr {
|
||||
panic!("Trying to write a ref into a non-ref array field")
|
||||
}
|
||||
}
|
||||
|
||||
let mut offset = field_offset + 2;
|
||||
write_usize(&mut self.buf, value.len(), offset);
|
||||
offset += 4;
|
||||
|
||||
for item in value {
|
||||
write_usize(&mut self.buf, item.0, offset);
|
||||
offset += 4;
|
||||
}
|
||||
}
|
||||
|
||||
/// Store the string in our string table and save the id of the string
|
||||
/// in the current field.
|
||||
pub fn write_str(&mut self, field_offset: usize, value: &str) {
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
let value_kind = self.buf[field_offset + 1];
|
||||
if PropFlags::try_from(value_kind).unwrap() != PropFlags::String {
|
||||
panic!("Trying to write a ref into a non-string field")
|
||||
}
|
||||
}
|
||||
|
||||
let id = self.str_table.insert(value);
|
||||
write_usize(&mut self.buf, id, field_offset + 2);
|
||||
}
|
||||
|
||||
/// Write a bool to a field.
|
||||
pub fn write_bool(&mut self, field_offset: usize, value: bool) {
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
let value_kind = self.buf[field_offset + 1];
|
||||
if PropFlags::try_from(value_kind).unwrap() != PropFlags::Bool {
|
||||
panic!("Trying to write a ref into a non-bool field")
|
||||
}
|
||||
}
|
||||
|
||||
self.buf[field_offset + 2] = if value { 1 } else { 0 };
|
||||
}
|
||||
|
||||
/// Serialize all information we have into a buffer that can be sent to JS.
|
||||
/// It has the following structure:
|
||||
///
|
||||
/// <...ast>
|
||||
/// <string table>
|
||||
/// <node kind map> <- node kind id maps to string id
|
||||
/// <node prop map> <- node property id maps to string id
|
||||
/// <offset kind map>
|
||||
/// <offset prop map>
|
||||
/// <offset str table>
|
||||
pub fn serialize(&mut self) -> Vec<u8> {
|
||||
let mut buf: Vec<u8> = vec![];
|
||||
|
||||
// The buffer starts with the serialized AST first, because that
|
||||
// contains absolute offsets. By butting this at the start of the
|
||||
// message we don't have to waste time updating any offsets.
|
||||
buf.append(&mut self.buf);
|
||||
|
||||
// Next follows the string table. We'll keep track of the offset
|
||||
// in the message of where the string table begins
|
||||
let offset_str_table = buf.len();
|
||||
|
||||
// Serialize string table
|
||||
buf.append(&mut self.str_table.serialize());
|
||||
|
||||
// Next, serialize the mappings of kind -> string of encountered
|
||||
// nodes in the AST. We use this additional lookup table to compress
|
||||
// the message so that we can save space by using a u8 . All nodes of
|
||||
// JS, TS and JSX together are <200
|
||||
let offset_kind_map = buf.len();
|
||||
|
||||
// Write the total number of entries in the kind -> str mapping table
|
||||
// TODO: make this a u8
|
||||
append_usize(&mut buf, self.kind_map.len());
|
||||
for v in &self.kind_map {
|
||||
append_usize(&mut buf, *v);
|
||||
}
|
||||
|
||||
// Store offset to prop -> string map. It's the same as with node kind
|
||||
// as the total number of properties is <120 which allows us to store it
|
||||
// as u8.
|
||||
let offset_prop_map = buf.len();
|
||||
// Write the total number of entries in the kind -> str mapping table
|
||||
append_usize(&mut buf, self.prop_map.len());
|
||||
for v in &self.prop_map {
|
||||
append_usize(&mut buf, *v);
|
||||
}
|
||||
|
||||
// Putting offsets of relevant parts of the buffer at the end. This
|
||||
// allows us to hop to the relevant part by merely looking at the last
|
||||
// for values in the message. Each value represents an offset into the
|
||||
// buffer.
|
||||
append_usize(&mut buf, offset_kind_map);
|
||||
append_usize(&mut buf, offset_prop_map);
|
||||
append_usize(&mut buf, offset_str_table);
|
||||
append_usize(&mut buf, self.start_buf.0);
|
||||
|
||||
buf
|
||||
}
|
||||
}
|
13
cli/tools/lint/ast_buffer/mod.rs
Normal file
13
cli/tools/lint/ast_buffer/mod.rs
Normal file
|
@ -0,0 +1,13 @@
|
|||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use deno_ast::ParsedSource;
|
||||
use swc::serialize_swc_to_buffer;
|
||||
|
||||
mod buffer;
|
||||
mod swc;
|
||||
mod ts_estree;
|
||||
|
||||
pub fn serialize_ast_to_buffer(parsed_source: &ParsedSource) -> Vec<u8> {
|
||||
// TODO: We could support multiple languages here
|
||||
serialize_swc_to_buffer(parsed_source)
|
||||
}
|
3018
cli/tools/lint/ast_buffer/swc.rs
Normal file
3018
cli/tools/lint/ast_buffer/swc.rs
Normal file
File diff suppressed because it is too large
Load diff
515
cli/tools/lint/ast_buffer/ts_estree.rs
Normal file
515
cli/tools/lint/ast_buffer/ts_estree.rs
Normal file
|
@ -0,0 +1,515 @@
|
|||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use std::fmt;
|
||||
use std::fmt::Debug;
|
||||
use std::fmt::Display;
|
||||
|
||||
use deno_ast::swc::common::Span;
|
||||
|
||||
use super::buffer::AstBufSerializer;
|
||||
use super::buffer::BoolPos;
|
||||
use super::buffer::FieldArrPos;
|
||||
use super::buffer::FieldPos;
|
||||
use super::buffer::NodeRef;
|
||||
use super::buffer::NullPos;
|
||||
use super::buffer::SerializeCtx;
|
||||
use super::buffer::StrPos;
|
||||
use super::buffer::UndefPos;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum AstNode {
|
||||
// First node must always be the empty/invalid node
|
||||
Invalid,
|
||||
// Typically the
|
||||
Program,
|
||||
|
||||
// Module declarations
|
||||
ExportAllDeclaration,
|
||||
ExportDefaultDeclaration,
|
||||
ExportNamedDeclaration,
|
||||
ImportDeclaration,
|
||||
TsExportAssignment,
|
||||
TsImportEquals,
|
||||
TsNamespaceExport,
|
||||
|
||||
// Decls
|
||||
ClassDeclaration,
|
||||
FunctionDeclaration,
|
||||
TSEnumDeclaration,
|
||||
TSInterface,
|
||||
TsModule,
|
||||
TsTypeAlias,
|
||||
Using,
|
||||
VariableDeclaration,
|
||||
|
||||
// Statements
|
||||
BlockStatement,
|
||||
BreakStatement,
|
||||
ContinueStatement,
|
||||
DebuggerStatement,
|
||||
DoWhileStatement,
|
||||
EmptyStatement,
|
||||
ExpressionStatement,
|
||||
ForInStatement,
|
||||
ForOfStatement,
|
||||
ForStatement,
|
||||
IfStatement,
|
||||
LabeledStatement,
|
||||
ReturnStatement,
|
||||
SwitchCase,
|
||||
SwitchStatement,
|
||||
ThrowStatement,
|
||||
TryStatement,
|
||||
WhileStatement,
|
||||
WithStatement,
|
||||
|
||||
// Expressions
|
||||
ArrayExpression,
|
||||
ArrowFunctionExpression,
|
||||
AssignmentExpression,
|
||||
AwaitExpression,
|
||||
BinaryExpression,
|
||||
CallExpression,
|
||||
ChainExpression,
|
||||
ClassExpression,
|
||||
ConditionalExpression,
|
||||
FunctionExpression,
|
||||
Identifier,
|
||||
ImportExpression,
|
||||
LogicalExpression,
|
||||
MemberExpression,
|
||||
MetaProp,
|
||||
NewExpression,
|
||||
ObjectExpression,
|
||||
PrivateIdentifier,
|
||||
SequenceExpression,
|
||||
Super,
|
||||
TaggedTemplateExpression,
|
||||
TemplateLiteral,
|
||||
ThisExpression,
|
||||
TSAsExpression,
|
||||
TsConstAssertion,
|
||||
TsInstantiation,
|
||||
TSNonNullExpression,
|
||||
TSSatisfiesExpression,
|
||||
TSTypeAssertion,
|
||||
UnaryExpression,
|
||||
UpdateExpression,
|
||||
YieldExpression,
|
||||
|
||||
// TODO: TSEsTree uses a single literal node
|
||||
// Literals
|
||||
StringLiteral,
|
||||
Bool,
|
||||
Null,
|
||||
NumericLiteral,
|
||||
BigIntLiteral,
|
||||
RegExpLiteral,
|
||||
|
||||
EmptyExpr,
|
||||
SpreadElement,
|
||||
Property,
|
||||
VariableDeclarator,
|
||||
CatchClause,
|
||||
RestElement,
|
||||
ExportSpecifier,
|
||||
TemplateElement,
|
||||
MethodDefinition,
|
||||
ClassBody,
|
||||
|
||||
// Patterns
|
||||
ArrayPattern,
|
||||
AssignmentPattern,
|
||||
ObjectPattern,
|
||||
|
||||
// JSX
|
||||
JSXAttribute,
|
||||
JSXClosingElement,
|
||||
JSXClosingFragment,
|
||||
JSXElement,
|
||||
JSXEmptyExpression,
|
||||
JSXExpressionContainer,
|
||||
JSXFragment,
|
||||
JSXIdentifier,
|
||||
JSXMemberExpression,
|
||||
JSXNamespacedName,
|
||||
JSXOpeningElement,
|
||||
JSXOpeningFragment,
|
||||
JSXSpreadAttribute,
|
||||
JSXSpreadChild,
|
||||
JSXText,
|
||||
|
||||
TSTypeAnnotation,
|
||||
TSTypeParameterDeclaration,
|
||||
TSTypeParameter,
|
||||
TSTypeParameterInstantiation,
|
||||
TSEnumMember,
|
||||
TSInterfaceBody,
|
||||
TSInterfaceHeritage,
|
||||
TSTypeReference,
|
||||
TSThisType,
|
||||
TSLiteralType,
|
||||
TSInferType,
|
||||
TSConditionalType,
|
||||
TSUnionType,
|
||||
TSIntersectionType,
|
||||
TSMappedType,
|
||||
TSTypeQuery,
|
||||
TSTupleType,
|
||||
TSNamedTupleMember,
|
||||
TSFunctionType,
|
||||
TsCallSignatureDeclaration,
|
||||
TSPropertySignature,
|
||||
TSMethodSignature,
|
||||
TSIndexSignature,
|
||||
TSIndexedAccessType,
|
||||
TSTypeOperator,
|
||||
TSTypePredicate,
|
||||
TSImportType,
|
||||
TSRestType,
|
||||
TSArrayType,
|
||||
TSClassImplements,
|
||||
|
||||
TSAnyKeyword,
|
||||
TSBigIntKeyword,
|
||||
TSBooleanKeyword,
|
||||
TSIntrinsicKeyword,
|
||||
TSNeverKeyword,
|
||||
TSNullKeyword,
|
||||
TSNumberKeyword,
|
||||
TSObjectKeyword,
|
||||
TSStringKeyword,
|
||||
TSSymbolKeyword,
|
||||
TSUndefinedKeyword,
|
||||
TSUnknownKeyword,
|
||||
TSVoidKeyword,
|
||||
TSEnumBody, // Last value is used for max value
|
||||
}
|
||||
|
||||
impl Display for AstNode {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
Debug::fmt(self, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<AstNode> for u8 {
|
||||
fn from(m: AstNode) -> u8 {
|
||||
m as u8
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum AstProp {
|
||||
// Base, these three must be in sync with JS. The
|
||||
// order here for these 3 fields is important.
|
||||
Type,
|
||||
Parent,
|
||||
Range,
|
||||
Length, // Not used in AST, but can be used in attr selectors
|
||||
|
||||
// Starting from here the order doesn't matter.
|
||||
// Following are all possible AST node properties.
|
||||
Abstract,
|
||||
Accessibility,
|
||||
Alternate,
|
||||
Argument,
|
||||
Arguments,
|
||||
Asserts,
|
||||
Async,
|
||||
Attributes,
|
||||
Await,
|
||||
Block,
|
||||
Body,
|
||||
Callee,
|
||||
Cases,
|
||||
Children,
|
||||
CheckType,
|
||||
ClosingElement,
|
||||
ClosingFragment,
|
||||
Computed,
|
||||
Consequent,
|
||||
Const,
|
||||
Constraint,
|
||||
Cooked,
|
||||
Declaration,
|
||||
Declarations,
|
||||
Declare,
|
||||
Default,
|
||||
Definite,
|
||||
Delegate,
|
||||
Discriminant,
|
||||
Elements,
|
||||
ElementType,
|
||||
ElementTypes,
|
||||
ExprName,
|
||||
Expression,
|
||||
Expressions,
|
||||
Exported,
|
||||
Extends,
|
||||
ExtendsType,
|
||||
FalseType,
|
||||
Finalizer,
|
||||
Flags,
|
||||
Generator,
|
||||
Handler,
|
||||
Id,
|
||||
In,
|
||||
IndexType,
|
||||
Init,
|
||||
Initializer,
|
||||
Implements,
|
||||
Key,
|
||||
Kind,
|
||||
Label,
|
||||
Left,
|
||||
Literal,
|
||||
Local,
|
||||
Members,
|
||||
Meta,
|
||||
Method,
|
||||
Name,
|
||||
Namespace,
|
||||
NameType,
|
||||
Object,
|
||||
ObjectType,
|
||||
OpeningElement,
|
||||
OpeningFragment,
|
||||
Operator,
|
||||
Optional,
|
||||
Out,
|
||||
Param,
|
||||
ParameterName,
|
||||
Params,
|
||||
Pattern,
|
||||
Prefix,
|
||||
Properties,
|
||||
Property,
|
||||
Qualifier,
|
||||
Quasi,
|
||||
Quasis,
|
||||
Raw,
|
||||
Readonly,
|
||||
ReturnType,
|
||||
Right,
|
||||
SelfClosing,
|
||||
Shorthand,
|
||||
Source,
|
||||
SourceType,
|
||||
Specifiers,
|
||||
Static,
|
||||
SuperClass,
|
||||
SuperTypeArguments,
|
||||
Tag,
|
||||
Tail,
|
||||
Test,
|
||||
TrueType,
|
||||
TypeAnnotation,
|
||||
TypeArguments,
|
||||
TypeName,
|
||||
TypeParameter,
|
||||
TypeParameters,
|
||||
Types,
|
||||
Update,
|
||||
Value, // Last value is used for max value
|
||||
}
|
||||
|
||||
// TODO: Feels like there should be an easier way to iterater over an
|
||||
// enum in Rust and lowercase the first letter.
|
||||
impl Display for AstProp {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let s = match self {
|
||||
AstProp::Parent => "parent",
|
||||
AstProp::Range => "range",
|
||||
AstProp::Type => "type",
|
||||
AstProp::Length => "length",
|
||||
AstProp::Abstract => "abstract",
|
||||
AstProp::Accessibility => "accessibility",
|
||||
AstProp::Alternate => "alternate",
|
||||
AstProp::Argument => "argument",
|
||||
AstProp::Arguments => "arguments",
|
||||
AstProp::Asserts => "asserts",
|
||||
AstProp::Async => "async",
|
||||
AstProp::Attributes => "attributes",
|
||||
AstProp::Await => "await",
|
||||
AstProp::Block => "block",
|
||||
AstProp::Body => "body",
|
||||
AstProp::Callee => "callee",
|
||||
AstProp::Cases => "cases",
|
||||
AstProp::Children => "children",
|
||||
AstProp::CheckType => "checkType",
|
||||
AstProp::ClosingElement => "closingElement",
|
||||
AstProp::ClosingFragment => "closingFragment",
|
||||
AstProp::Computed => "computed",
|
||||
AstProp::Consequent => "consequent",
|
||||
AstProp::Const => "const",
|
||||
AstProp::Constraint => "constraint",
|
||||
AstProp::Cooked => "cooked",
|
||||
AstProp::Declaration => "declaration",
|
||||
AstProp::Declarations => "declarations",
|
||||
AstProp::Declare => "declare",
|
||||
AstProp::Default => "default",
|
||||
AstProp::Definite => "definite",
|
||||
AstProp::Delegate => "delegate",
|
||||
AstProp::Discriminant => "discriminant",
|
||||
AstProp::Elements => "elements",
|
||||
AstProp::ElementType => "elementType",
|
||||
AstProp::ElementTypes => "elementTypes",
|
||||
AstProp::ExprName => "exprName",
|
||||
AstProp::Expression => "expression",
|
||||
AstProp::Expressions => "expressions",
|
||||
AstProp::Exported => "exported",
|
||||
AstProp::Extends => "extends",
|
||||
AstProp::ExtendsType => "extendsType",
|
||||
AstProp::FalseType => "falseType",
|
||||
AstProp::Finalizer => "finalizer",
|
||||
AstProp::Flags => "flags",
|
||||
AstProp::Generator => "generator",
|
||||
AstProp::Handler => "handler",
|
||||
AstProp::Id => "id",
|
||||
AstProp::In => "in",
|
||||
AstProp::IndexType => "indexType",
|
||||
AstProp::Init => "init",
|
||||
AstProp::Initializer => "initializer",
|
||||
AstProp::Implements => "implements",
|
||||
AstProp::Key => "key",
|
||||
AstProp::Kind => "kind",
|
||||
AstProp::Label => "label",
|
||||
AstProp::Left => "left",
|
||||
AstProp::Literal => "literal",
|
||||
AstProp::Local => "local",
|
||||
AstProp::Members => "members",
|
||||
AstProp::Meta => "meta",
|
||||
AstProp::Method => "method",
|
||||
AstProp::Name => "name",
|
||||
AstProp::Namespace => "namespace",
|
||||
AstProp::NameType => "nameType",
|
||||
AstProp::Object => "object",
|
||||
AstProp::ObjectType => "objectType",
|
||||
AstProp::OpeningElement => "openingElement",
|
||||
AstProp::OpeningFragment => "openingFragment",
|
||||
AstProp::Operator => "operator",
|
||||
AstProp::Optional => "optional",
|
||||
AstProp::Out => "out",
|
||||
AstProp::Param => "param",
|
||||
AstProp::ParameterName => "parameterName",
|
||||
AstProp::Params => "params",
|
||||
AstProp::Pattern => "pattern",
|
||||
AstProp::Prefix => "prefix",
|
||||
AstProp::Properties => "properties",
|
||||
AstProp::Property => "property",
|
||||
AstProp::Qualifier => "qualifier",
|
||||
AstProp::Quasi => "quasi",
|
||||
AstProp::Quasis => "quasis",
|
||||
AstProp::Raw => "raw",
|
||||
AstProp::Readonly => "readonly",
|
||||
AstProp::ReturnType => "returnType",
|
||||
AstProp::Right => "right",
|
||||
AstProp::SelfClosing => "selfClosing",
|
||||
AstProp::Shorthand => "shorthand",
|
||||
AstProp::Source => "source",
|
||||
AstProp::SourceType => "sourceType",
|
||||
AstProp::Specifiers => "specifiers",
|
||||
AstProp::Static => "static",
|
||||
AstProp::SuperClass => "superClass",
|
||||
AstProp::SuperTypeArguments => "superTypeArguments",
|
||||
AstProp::Tag => "tag",
|
||||
AstProp::Tail => "tail",
|
||||
AstProp::Test => "test",
|
||||
AstProp::TrueType => "trueType",
|
||||
AstProp::TypeAnnotation => "typeAnnotation",
|
||||
AstProp::TypeArguments => "typeArguments",
|
||||
AstProp::TypeName => "typeName",
|
||||
AstProp::TypeParameter => "typeParameter",
|
||||
AstProp::TypeParameters => "typeParameters",
|
||||
AstProp::Types => "types",
|
||||
AstProp::Update => "update",
|
||||
AstProp::Value => "value",
|
||||
};
|
||||
|
||||
write!(f, "{}", s)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<AstProp> for u8 {
|
||||
fn from(m: AstProp) -> u8 {
|
||||
m as u8
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TsEsTreeBuilder {
|
||||
ctx: SerializeCtx,
|
||||
}
|
||||
|
||||
// TODO: Add a builder API to make it easier to convert from different source
|
||||
// ast formats.
|
||||
impl TsEsTreeBuilder {
|
||||
pub fn new() -> Self {
|
||||
// Max values
|
||||
// TODO: Maybe there is a rust macro to grab the last enum value?
|
||||
let kind_count: u8 = AstNode::TSEnumBody.into();
|
||||
let prop_count: u8 = AstProp::Value.into();
|
||||
Self {
|
||||
ctx: SerializeCtx::new(kind_count, prop_count),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AstBufSerializer<AstNode, AstProp> for TsEsTreeBuilder {
|
||||
fn header(
|
||||
&mut self,
|
||||
kind: AstNode,
|
||||
parent: NodeRef,
|
||||
span: &Span,
|
||||
prop_count: usize,
|
||||
) -> NodeRef {
|
||||
self.ctx.header(kind, parent, span, prop_count)
|
||||
}
|
||||
|
||||
fn ref_field(&mut self, prop: AstProp) -> FieldPos {
|
||||
FieldPos(self.ctx.ref_field(prop))
|
||||
}
|
||||
|
||||
fn ref_vec_field(&mut self, prop: AstProp, len: usize) -> FieldArrPos {
|
||||
FieldArrPos(self.ctx.ref_vec_field(prop, len))
|
||||
}
|
||||
|
||||
fn str_field(&mut self, prop: AstProp) -> StrPos {
|
||||
StrPos(self.ctx.str_field(prop))
|
||||
}
|
||||
|
||||
fn bool_field(&mut self, prop: AstProp) -> BoolPos {
|
||||
BoolPos(self.ctx.bool_field(prop))
|
||||
}
|
||||
|
||||
fn undefined_field(&mut self, prop: AstProp) -> UndefPos {
|
||||
UndefPos(self.ctx.undefined_field(prop))
|
||||
}
|
||||
|
||||
fn null_field(&mut self, prop: AstProp) -> NullPos {
|
||||
NullPos(self.ctx.null_field(prop))
|
||||
}
|
||||
|
||||
fn write_ref(&mut self, pos: FieldPos, value: NodeRef) {
|
||||
self.ctx.write_ref(pos.0, value);
|
||||
}
|
||||
|
||||
fn write_maybe_ref(&mut self, pos: FieldPos, value: Option<NodeRef>) {
|
||||
self.ctx.write_maybe_ref(pos.0, value);
|
||||
}
|
||||
|
||||
fn write_refs(&mut self, pos: FieldArrPos, value: Vec<NodeRef>) {
|
||||
self.ctx.write_refs(pos.0, value);
|
||||
}
|
||||
|
||||
fn write_str(&mut self, pos: StrPos, value: &str) {
|
||||
self.ctx.write_str(pos.0, value);
|
||||
}
|
||||
|
||||
fn write_bool(&mut self, pos: BoolPos, value: bool) {
|
||||
self.ctx.write_bool(pos.0, value);
|
||||
}
|
||||
|
||||
fn serialize(&mut self) -> Vec<u8> {
|
||||
self.ctx.serialize()
|
||||
}
|
||||
}
|
|
@ -51,10 +51,13 @@ use crate::util::fs::canonicalize_path;
|
|||
use crate::util::path::is_script_ext;
|
||||
use crate::util::sync::AtomicFlag;
|
||||
|
||||
mod ast_buffer;
|
||||
mod linter;
|
||||
mod reporters;
|
||||
mod rules;
|
||||
|
||||
// TODO(bartlomieju): remove once we wire plugins through the CLI linter
|
||||
pub use ast_buffer::serialize_ast_to_buffer;
|
||||
pub use linter::CliLinter;
|
||||
pub use linter::CliLinterOptions;
|
||||
pub use rules::collect_no_slow_type_diagnostics;
|
||||
|
|
|
@ -26,6 +26,7 @@ use deno_core::serde_json;
|
|||
use deno_core::serde_json::json;
|
||||
use deno_core::serde_json::Value;
|
||||
use deno_core::url::Url;
|
||||
use deno_runtime::deno_fetch;
|
||||
use deno_terminal::colors;
|
||||
use http_body_util::BodyExt;
|
||||
use serde::Deserialize;
|
||||
|
@ -911,9 +912,7 @@ async fn publish_package(
|
|||
package.config
|
||||
);
|
||||
|
||||
let body = http_body_util::Full::new(package.tarball.bytes.clone())
|
||||
.map_err(|never| match never {})
|
||||
.boxed();
|
||||
let body = deno_fetch::ReqBody::full(package.tarball.bytes.clone());
|
||||
let response = http_client
|
||||
.post(url.parse()?, body)?
|
||||
.header(
|
||||
|
|
|
@ -15,6 +15,7 @@ use deno_semver::jsr::JsrPackageReqReference;
|
|||
use deno_semver::npm::NpmPackageReqReference;
|
||||
use deno_semver::package::PackageNv;
|
||||
use deno_semver::package::PackageReq;
|
||||
use deno_semver::StackString;
|
||||
use deno_semver::Version;
|
||||
use deno_semver::VersionReq;
|
||||
use deps::KeyPath;
|
||||
|
@ -283,7 +284,7 @@ fn package_json_dependency_entry(
|
|||
(npm_package.into(), selected.version_req)
|
||||
} else {
|
||||
(
|
||||
selected.import_name,
|
||||
selected.import_name.into_string(),
|
||||
format!("npm:{}@{}", npm_package, selected.version_req),
|
||||
)
|
||||
}
|
||||
|
@ -292,7 +293,7 @@ fn package_json_dependency_entry(
|
|||
let scope_replaced = jsr_package.replace('/', "__");
|
||||
let version_req =
|
||||
format!("npm:@jsr/{scope_replaced}@{}", selected.version_req);
|
||||
(selected.import_name, version_req)
|
||||
(selected.import_name.into_string(), version_req)
|
||||
} else {
|
||||
(selected.package_name, selected.version_req)
|
||||
}
|
||||
|
@ -549,10 +550,10 @@ pub async fn add(
|
|||
}
|
||||
|
||||
struct SelectedPackage {
|
||||
import_name: String,
|
||||
import_name: StackString,
|
||||
package_name: String,
|
||||
version_req: String,
|
||||
selected_version: String,
|
||||
selected_version: StackString,
|
||||
}
|
||||
|
||||
enum NotFoundHelp {
|
||||
|
@ -683,7 +684,7 @@ async fn find_package_and_select_version_for_req(
|
|||
import_name: add_package_req.alias,
|
||||
package_name: prefixed_name,
|
||||
version_req: format!("{}{}", range_symbol, &nv.version),
|
||||
selected_version: nv.version.to_string(),
|
||||
selected_version: nv.version.to_custom_string::<StackString>(),
|
||||
}))
|
||||
}
|
||||
|
||||
|
@ -705,7 +706,7 @@ enum AddRmPackageReqValue {
|
|||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct AddRmPackageReq {
|
||||
alias: String,
|
||||
alias: StackString,
|
||||
value: AddRmPackageReqValue,
|
||||
}
|
||||
|
||||
|
@ -753,7 +754,11 @@ impl AddRmPackageReq {
|
|||
return Ok(Err(PackageReq::from_str(entry_text)?));
|
||||
}
|
||||
|
||||
(maybe_prefix.unwrap(), Some(alias.to_string()), entry_text)
|
||||
(
|
||||
maybe_prefix.unwrap(),
|
||||
Some(StackString::from(alias)),
|
||||
entry_text,
|
||||
)
|
||||
}
|
||||
None => return Ok(Err(PackageReq::from_str(entry_text)?)),
|
||||
},
|
||||
|
@ -765,7 +770,7 @@ impl AddRmPackageReq {
|
|||
JsrPackageReqReference::from_str(&format!("jsr:{}", entry_text))?;
|
||||
let package_req = req_ref.into_inner().req;
|
||||
Ok(Ok(AddRmPackageReq {
|
||||
alias: maybe_alias.unwrap_or_else(|| package_req.name.to_string()),
|
||||
alias: maybe_alias.unwrap_or_else(|| package_req.name.clone()),
|
||||
value: AddRmPackageReqValue::Jsr(package_req),
|
||||
}))
|
||||
}
|
||||
|
@ -785,7 +790,7 @@ impl AddRmPackageReq {
|
|||
);
|
||||
}
|
||||
Ok(Ok(AddRmPackageReq {
|
||||
alias: maybe_alias.unwrap_or_else(|| package_req.name.to_string()),
|
||||
alias: maybe_alias.unwrap_or_else(|| package_req.name.clone()),
|
||||
value: AddRmPackageReqValue::Npm(package_req),
|
||||
}))
|
||||
}
|
||||
|
@ -878,14 +883,14 @@ mod test {
|
|||
assert_eq!(
|
||||
AddRmPackageReq::parse("jsr:foo").unwrap().unwrap(),
|
||||
AddRmPackageReq {
|
||||
alias: "foo".to_string(),
|
||||
alias: "foo".into(),
|
||||
value: AddRmPackageReqValue::Jsr(PackageReq::from_str("foo").unwrap())
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
AddRmPackageReq::parse("alias@jsr:foo").unwrap().unwrap(),
|
||||
AddRmPackageReq {
|
||||
alias: "alias".to_string(),
|
||||
alias: "alias".into(),
|
||||
value: AddRmPackageReqValue::Jsr(PackageReq::from_str("foo").unwrap())
|
||||
}
|
||||
);
|
||||
|
@ -894,7 +899,7 @@ mod test {
|
|||
.unwrap()
|
||||
.unwrap(),
|
||||
AddRmPackageReq {
|
||||
alias: "@alias/pkg".to_string(),
|
||||
alias: "@alias/pkg".into(),
|
||||
value: AddRmPackageReqValue::Npm(
|
||||
PackageReq::from_str("foo@latest").unwrap()
|
||||
)
|
||||
|
@ -905,7 +910,7 @@ mod test {
|
|||
.unwrap()
|
||||
.unwrap(),
|
||||
AddRmPackageReq {
|
||||
alias: "@alias/pkg".to_string(),
|
||||
alias: "@alias/pkg".into(),
|
||||
value: AddRmPackageReqValue::Jsr(PackageReq::from_str("foo").unwrap())
|
||||
}
|
||||
);
|
||||
|
@ -914,7 +919,7 @@ mod test {
|
|||
.unwrap()
|
||||
.unwrap(),
|
||||
AddRmPackageReq {
|
||||
alias: "alias".to_string(),
|
||||
alias: "alias".into(),
|
||||
value: AddRmPackageReqValue::Jsr(
|
||||
PackageReq::from_str("foo@^1.5.0").unwrap()
|
||||
)
|
||||
|
|
|
@ -27,6 +27,7 @@ use deno_semver::npm::NpmPackageReqReference;
|
|||
use deno_semver::package::PackageNv;
|
||||
use deno_semver::package::PackageReq;
|
||||
use deno_semver::package::PackageReqReference;
|
||||
use deno_semver::StackString;
|
||||
use deno_semver::Version;
|
||||
use deno_semver::VersionReq;
|
||||
use import_map::ImportMap;
|
||||
|
@ -139,13 +140,7 @@ pub enum KeyPart {
|
|||
Scopes,
|
||||
Dependencies,
|
||||
DevDependencies,
|
||||
String(String),
|
||||
}
|
||||
|
||||
impl From<String> for KeyPart {
|
||||
fn from(value: String) -> Self {
|
||||
KeyPart::String(value)
|
||||
}
|
||||
String(StackString),
|
||||
}
|
||||
|
||||
impl From<PackageJsonDepKind> for KeyPart {
|
||||
|
@ -164,7 +159,7 @@ impl KeyPart {
|
|||
KeyPart::Scopes => "scopes",
|
||||
KeyPart::Dependencies => "dependencies",
|
||||
KeyPart::DevDependencies => "devDependencies",
|
||||
KeyPart::String(s) => s,
|
||||
KeyPart::String(s) => s.as_str(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -217,12 +212,12 @@ fn import_map_entries(
|
|||
.chain(import_map.scopes().flat_map(|scope| {
|
||||
let path = KeyPath::from_parts([
|
||||
KeyPart::Scopes,
|
||||
scope.raw_key.to_string().into(),
|
||||
KeyPart::String(scope.raw_key.into()),
|
||||
]);
|
||||
|
||||
scope.imports.entries().map(move |entry| {
|
||||
let mut full_path = path.clone();
|
||||
full_path.push(KeyPart::String(entry.raw_key.to_string()));
|
||||
full_path.push(KeyPart::String(entry.raw_key.into()));
|
||||
(full_path, entry)
|
||||
})
|
||||
}))
|
||||
|
@ -338,7 +333,7 @@ fn add_deps_from_package_json(
|
|||
package_json: &PackageJsonRc,
|
||||
mut filter: impl DepFilter,
|
||||
package_dep_kind: PackageJsonDepKind,
|
||||
package_json_deps: PackageJsonDepsMap,
|
||||
package_json_deps: &PackageJsonDepsMap,
|
||||
deps: &mut Vec<Dep>,
|
||||
) {
|
||||
for (k, v) in package_json_deps {
|
||||
|
@ -353,7 +348,7 @@ fn add_deps_from_package_json(
|
|||
deno_package_json::PackageJsonDepValue::Req(req) => {
|
||||
let alias = k.as_str();
|
||||
let alias = (alias != req.name).then(|| alias.to_string());
|
||||
if !filter.should_include(alias.as_deref(), &req, DepKind::Npm) {
|
||||
if !filter.should_include(alias.as_deref(), req, DepKind::Npm) {
|
||||
continue;
|
||||
}
|
||||
let id = DepId(deps.len());
|
||||
|
@ -362,9 +357,12 @@ fn add_deps_from_package_json(
|
|||
kind: DepKind::Npm,
|
||||
location: DepLocation::PackageJson(
|
||||
package_json.clone(),
|
||||
KeyPath::from_parts([package_dep_kind.into(), k.into()]),
|
||||
KeyPath::from_parts([
|
||||
package_dep_kind.into(),
|
||||
KeyPart::String(k.clone()),
|
||||
]),
|
||||
),
|
||||
req,
|
||||
req: req.clone(),
|
||||
alias,
|
||||
})
|
||||
}
|
||||
|
@ -377,14 +375,14 @@ fn add_deps_from_package_json(
|
|||
package_json,
|
||||
filter,
|
||||
PackageJsonDepKind::Normal,
|
||||
package_json_deps.dependencies,
|
||||
&package_json_deps.dependencies,
|
||||
deps,
|
||||
);
|
||||
iterate(
|
||||
package_json,
|
||||
filter,
|
||||
PackageJsonDepKind::Dev,
|
||||
package_json_deps.dev_dependencies,
|
||||
&package_json_deps.dev_dependencies,
|
||||
deps,
|
||||
);
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ use deno_core::anyhow::bail;
|
|||
use deno_core::error::AnyError;
|
||||
use deno_semver::package::PackageNv;
|
||||
use deno_semver::package::PackageReq;
|
||||
use deno_semver::StackString;
|
||||
use deno_semver::VersionReq;
|
||||
use deno_terminal::colors;
|
||||
|
||||
|
@ -31,7 +32,7 @@ struct OutdatedPackage {
|
|||
latest: String,
|
||||
semver_compatible: String,
|
||||
current: String,
|
||||
name: String,
|
||||
name: StackString,
|
||||
}
|
||||
|
||||
#[allow(clippy::print_stdout)]
|
||||
|
|
|
@ -231,7 +231,7 @@ pub async fn execute_script(
|
|||
&Url::from_directory_path(cli_options.initial_cwd()).unwrap(),
|
||||
"",
|
||||
&TaskDefinition {
|
||||
command: task_flags.task.as_ref().unwrap().to_string(),
|
||||
command: Some(task_flags.task.as_ref().unwrap().to_string()),
|
||||
dependencies: vec![],
|
||||
description: None,
|
||||
},
|
||||
|
@ -448,6 +448,16 @@ impl<'a> TaskRunner<'a> {
|
|||
kill_signal: KillSignal,
|
||||
argv: &'a [String],
|
||||
) -> Result<i32, deno_core::anyhow::Error> {
|
||||
let Some(command) = &definition.command else {
|
||||
log::info!(
|
||||
"{} {} {}",
|
||||
colors::green("Task"),
|
||||
colors::cyan(task_name),
|
||||
colors::gray("(no command)")
|
||||
);
|
||||
return Ok(0);
|
||||
};
|
||||
|
||||
if let Some(npm_resolver) = self.npm_resolver.as_managed() {
|
||||
npm_resolver.ensure_top_level_package_json_install().await?;
|
||||
npm_resolver
|
||||
|
@ -469,7 +479,7 @@ impl<'a> TaskRunner<'a> {
|
|||
self
|
||||
.run_single(RunSingleOptions {
|
||||
task_name,
|
||||
script: &definition.command,
|
||||
script: command,
|
||||
cwd: &cwd,
|
||||
custom_commands,
|
||||
kill_signal,
|
||||
|
@ -837,7 +847,7 @@ fn print_available_tasks(
|
|||
is_deno: false,
|
||||
name: name.to_string(),
|
||||
task: deno_config::deno_json::TaskDefinition {
|
||||
command: script.to_string(),
|
||||
command: Some(script.to_string()),
|
||||
dependencies: vec![],
|
||||
description: None,
|
||||
},
|
||||
|
@ -873,11 +883,13 @@ fn print_available_tasks(
|
|||
)?;
|
||||
}
|
||||
}
|
||||
if let Some(command) = &desc.task.command {
|
||||
writeln!(
|
||||
writer,
|
||||
" {}",
|
||||
strip_ansi_codes_and_escape_control_chars(&desc.task.command)
|
||||
strip_ansi_codes_and_escape_control_chars(command)
|
||||
)?;
|
||||
};
|
||||
if !desc.task.dependencies.is_empty() {
|
||||
let dependencies = desc
|
||||
.task
|
||||
|
|
|
@ -616,7 +616,10 @@ async fn configure_main_worker(
|
|||
WorkerExecutionMode::Test,
|
||||
specifier.clone(),
|
||||
permissions_container,
|
||||
vec![ops::testing::deno_test::init_ops(worker_sender.sender)],
|
||||
vec![
|
||||
ops::testing::deno_test::init_ops(worker_sender.sender),
|
||||
ops::lint::deno_lint::init_ops(),
|
||||
],
|
||||
Stdio {
|
||||
stdin: StdioPipe::inherit(),
|
||||
stdout: StdioPipe::file(worker_sender.stdout),
|
||||
|
|
|
@ -21,6 +21,7 @@ use deno_core::anyhow::Context;
|
|||
use deno_core::error::AnyError;
|
||||
use deno_core::unsync::spawn;
|
||||
use deno_core::url::Url;
|
||||
use deno_semver::SmallStackString;
|
||||
use deno_semver::Version;
|
||||
use once_cell::sync::Lazy;
|
||||
use std::borrow::Cow;
|
||||
|
@ -255,7 +256,7 @@ async fn print_release_notes(
|
|||
let is_deno_2_rc = new_semver.major == 2
|
||||
&& new_semver.minor == 0
|
||||
&& new_semver.patch == 0
|
||||
&& new_semver.pre.first() == Some(&"rc".to_string());
|
||||
&& new_semver.pre.first().map(|s| s.as_str()) == Some("rc");
|
||||
|
||||
if is_deno_2_rc || is_switching_from_deno1_to_deno2 {
|
||||
log::info!(
|
||||
|
@ -674,7 +675,7 @@ impl RequestedVersion {
|
|||
);
|
||||
};
|
||||
|
||||
if semver.pre.contains(&"rc".to_string()) {
|
||||
if semver.pre.contains(&SmallStackString::from_static("rc")) {
|
||||
(ReleaseChannel::Rc, passed_version)
|
||||
} else {
|
||||
(ReleaseChannel::Stable, passed_version)
|
||||
|
|
|
@ -41,6 +41,13 @@ delete Object.prototype.__proto__;
|
|||
"listen",
|
||||
"listenDatagram",
|
||||
"openKv",
|
||||
"connectQuic",
|
||||
"listenQuic",
|
||||
"QuicBidirectionalStream",
|
||||
"QuicConn",
|
||||
"QuicListener",
|
||||
"QuicReceiveStream",
|
||||
"QuicSendStream",
|
||||
]);
|
||||
const unstableMsgSuggestion =
|
||||
"If not, try changing the 'lib' compiler option to include 'deno.unstable' " +
|
||||
|
|
|
@ -140,23 +140,23 @@ mod tests {
|
|||
#[test]
|
||||
fn test_source_map_from_code() {
|
||||
let to_string =
|
||||
|bytes: Vec<u8>| -> String { String::from_utf8(bytes).unwrap() };
|
||||
|bytes: Vec<u8>| -> String { String::from_utf8(bytes.to_vec()).unwrap() };
|
||||
assert_eq!(
|
||||
source_map_from_code(
|
||||
b"test\n//# sourceMappingURL=data:application/json;base64,dGVzdGluZ3Rlc3Rpbmc=",
|
||||
b"test\n//# sourceMappingURL=data:application/json;base64,dGVzdGluZ3Rlc3Rpbmc="
|
||||
).map(to_string),
|
||||
Some("testingtesting".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
source_map_from_code(
|
||||
b"test\n//# sourceMappingURL=data:application/json;base64,dGVzdGluZ3Rlc3Rpbmc=\n \n",
|
||||
b"test\n//# sourceMappingURL=data:application/json;base64,dGVzdGluZ3Rlc3Rpbmc=\n \n"
|
||||
).map(to_string),
|
||||
Some("testingtesting".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
source_map_from_code(
|
||||
b"test\n//# sourceMappingURL=data:application/json;base64,dGVzdGluZ3Rlc3Rpbmc=\n test\n",
|
||||
),
|
||||
b"test\n//# sourceMappingURL=data:application/json;base64,dGVzdGluZ3Rlc3Rpbmc=\n test\n"
|
||||
).map(to_string),
|
||||
None
|
||||
);
|
||||
assert_eq!(
|
||||
|
@ -164,7 +164,7 @@ mod tests {
|
|||
b"\"use strict\";
|
||||
|
||||
throw new Error(\"Hello world!\");
|
||||
//# sourceMappingURL=data:application/json;base64,{",
|
||||
//# sourceMappingURL=data:application/json;base64,{"
|
||||
),
|
||||
None
|
||||
);
|
||||
|
|
|
@ -612,6 +612,7 @@ impl CliMainWorkerFactory {
|
|||
serve_port: shared.options.serve_port,
|
||||
serve_host: shared.options.serve_host.clone(),
|
||||
otel_config: shared.otel_config.clone(),
|
||||
close_on_idle: true,
|
||||
},
|
||||
extensions: custom_extensions,
|
||||
startup_snapshot: crate::js::deno_isolate_init(),
|
||||
|
@ -655,7 +656,10 @@ impl CliMainWorkerFactory {
|
|||
"40_test_common.js",
|
||||
"40_test.js",
|
||||
"40_bench.js",
|
||||
"40_jupyter.js"
|
||||
"40_jupyter.js",
|
||||
// TODO(bartlomieju): probably shouldn't include these files here?
|
||||
"40_lint_selector.js",
|
||||
"40_lint.js"
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -812,6 +816,7 @@ fn create_web_worker_callback(
|
|||
serve_port: shared.options.serve_port,
|
||||
serve_host: shared.options.serve_host.clone(),
|
||||
otel_config: shared.otel_config.clone(),
|
||||
close_on_idle: args.close_on_idle,
|
||||
},
|
||||
extensions: vec![],
|
||||
startup_snapshot: crate::js::deno_isolate_init(),
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_broadcast_channel"
|
||||
version = "0.177.0"
|
||||
version = "0.178.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
2
ext/cache/Cargo.toml
vendored
2
ext/cache/Cargo.toml
vendored
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_cache"
|
||||
version = "0.115.0"
|
||||
version = "0.116.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_canvas"
|
||||
version = "0.52.0"
|
||||
version = "0.53.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_console"
|
||||
version = "0.183.0"
|
||||
version = "0.184.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_cron"
|
||||
version = "0.63.0"
|
||||
version = "0.64.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_crypto"
|
||||
version = "0.197.0"
|
||||
version = "0.198.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
|
||||
import { core, primordials } from "ext:core/mod.js";
|
||||
const {
|
||||
BadResourcePrototype,
|
||||
isAnyArrayBuffer,
|
||||
isArrayBuffer,
|
||||
isStringObject,
|
||||
|
@ -26,6 +27,7 @@ const {
|
|||
JSONParse,
|
||||
ObjectDefineProperties,
|
||||
ObjectPrototypeIsPrototypeOf,
|
||||
PromisePrototypeCatch,
|
||||
TypedArrayPrototypeGetBuffer,
|
||||
TypedArrayPrototypeGetByteLength,
|
||||
TypedArrayPrototypeGetByteOffset,
|
||||
|
@ -160,7 +162,18 @@ class InnerBody {
|
|||
)
|
||||
) {
|
||||
readableStreamThrowIfErrored(this.stream);
|
||||
return readableStreamCollectIntoUint8Array(this.stream);
|
||||
return PromisePrototypeCatch(
|
||||
readableStreamCollectIntoUint8Array(this.stream),
|
||||
(e) => {
|
||||
if (ObjectPrototypeIsPrototypeOf(BadResourcePrototype, e)) {
|
||||
// TODO(kt3k): We probably like to pass e as `cause` if BadResource supports it.
|
||||
throw new e.constructor(
|
||||
"Cannot read body as underlying resource unavailable",
|
||||
);
|
||||
}
|
||||
throw e;
|
||||
},
|
||||
);
|
||||
} else {
|
||||
this.streamOrStatic.consumed = true;
|
||||
return this.streamOrStatic.body;
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_fetch"
|
||||
version = "0.207.0"
|
||||
version = "0.208.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
@ -23,6 +23,7 @@ deno_permissions.workspace = true
|
|||
deno_tls.workspace = true
|
||||
dyn-clone = "1"
|
||||
error_reporter = "1"
|
||||
h2.workspace = true
|
||||
hickory-resolver.workspace = true
|
||||
http.workspace = true
|
||||
http-body-util.workspace = true
|
||||
|
|
190
ext/fetch/lib.rs
190
ext/fetch/lib.rs
|
@ -10,6 +10,7 @@ use std::borrow::Cow;
|
|||
use std::cell::RefCell;
|
||||
use std::cmp::min;
|
||||
use std::convert::From;
|
||||
use std::future;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::pin::Pin;
|
||||
|
@ -66,6 +67,7 @@ use http::header::USER_AGENT;
|
|||
use http::Extensions;
|
||||
use http::Method;
|
||||
use http::Uri;
|
||||
use http_body_util::combinators::BoxBody;
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::body::Frame;
|
||||
use hyper_util::client::legacy::connect::HttpConnector;
|
||||
|
@ -75,6 +77,7 @@ use hyper_util::rt::TokioExecutor;
|
|||
use hyper_util::rt::TokioTimer;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use tower::retry;
|
||||
use tower::ServiceExt;
|
||||
use tower_http::decompression::Decompression;
|
||||
|
||||
|
@ -476,9 +479,7 @@ where
|
|||
// If a body is passed, we use it, and don't return a body for streaming.
|
||||
con_len = Some(data.len() as u64);
|
||||
|
||||
http_body_util::Full::new(data.to_vec().into())
|
||||
.map_err(|never| match never {})
|
||||
.boxed()
|
||||
ReqBody::full(data.to_vec().into())
|
||||
}
|
||||
(_, Some(resource)) => {
|
||||
let resource = state
|
||||
|
@ -491,7 +492,7 @@ where
|
|||
}
|
||||
_ => {}
|
||||
}
|
||||
ReqBody::new(ResourceToBodyAdapter::new(resource))
|
||||
ReqBody::streaming(ResourceToBodyAdapter::new(resource))
|
||||
}
|
||||
(None, None) => unreachable!(),
|
||||
}
|
||||
|
@ -501,9 +502,7 @@ where
|
|||
if matches!(method, Method::POST | Method::PUT) {
|
||||
con_len = Some(0);
|
||||
}
|
||||
http_body_util::Empty::new()
|
||||
.map_err(|never| match never {})
|
||||
.boxed()
|
||||
ReqBody::empty()
|
||||
};
|
||||
|
||||
let mut request = http::Request::new(body);
|
||||
|
@ -1066,7 +1065,8 @@ pub fn create_http_client(
|
|||
}
|
||||
|
||||
let pooled_client = builder.build(connector);
|
||||
let decompress = Decompression::new(pooled_client).gzip(true).br(true);
|
||||
let retry_client = retry::Retry::new(FetchRetry, pooled_client);
|
||||
let decompress = Decompression::new(retry_client).gzip(true).br(true);
|
||||
|
||||
Ok(Client {
|
||||
inner: decompress,
|
||||
|
@ -1083,7 +1083,12 @@ pub fn op_utf8_to_byte_string(#[string] input: String) -> ByteString {
|
|||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Client {
|
||||
inner: Decompression<hyper_util::client::legacy::Client<Connector, ReqBody>>,
|
||||
inner: Decompression<
|
||||
retry::Retry<
|
||||
FetchRetry,
|
||||
hyper_util::client::legacy::Client<Connector, ReqBody>,
|
||||
>,
|
||||
>,
|
||||
// Used to check whether to include a proxy-authorization header
|
||||
proxies: Arc<proxy::Proxies>,
|
||||
user_agent: HeaderValue,
|
||||
|
@ -1174,10 +1179,70 @@ impl Client {
|
|||
}
|
||||
}
|
||||
|
||||
pub type ReqBody =
|
||||
http_body_util::combinators::BoxBody<Bytes, deno_core::error::AnyError>;
|
||||
pub type ResBody =
|
||||
http_body_util::combinators::BoxBody<Bytes, deno_core::error::AnyError>;
|
||||
// This is a custom enum to allow the retry policy to clone the variants that could be retried.
|
||||
pub enum ReqBody {
|
||||
Full(http_body_util::Full<Bytes>),
|
||||
Empty(http_body_util::Empty<Bytes>),
|
||||
Streaming(BoxBody<Bytes, deno_core::error::AnyError>),
|
||||
}
|
||||
|
||||
pub type ResBody = BoxBody<Bytes, deno_core::error::AnyError>;
|
||||
|
||||
impl ReqBody {
|
||||
pub fn full(bytes: Bytes) -> Self {
|
||||
ReqBody::Full(http_body_util::Full::new(bytes))
|
||||
}
|
||||
|
||||
pub fn empty() -> Self {
|
||||
ReqBody::Empty(http_body_util::Empty::new())
|
||||
}
|
||||
|
||||
pub fn streaming<B>(body: B) -> Self
|
||||
where
|
||||
B: hyper::body::Body<Data = Bytes, Error = deno_core::error::AnyError>
|
||||
+ Send
|
||||
+ Sync
|
||||
+ 'static,
|
||||
{
|
||||
ReqBody::Streaming(BoxBody::new(body))
|
||||
}
|
||||
}
|
||||
|
||||
impl hyper::body::Body for ReqBody {
|
||||
type Data = Bytes;
|
||||
type Error = deno_core::error::AnyError;
|
||||
|
||||
fn poll_frame(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> {
|
||||
match &mut *self {
|
||||
ReqBody::Full(ref mut b) => {
|
||||
Pin::new(b).poll_frame(cx).map_err(|never| match never {})
|
||||
}
|
||||
ReqBody::Empty(ref mut b) => {
|
||||
Pin::new(b).poll_frame(cx).map_err(|never| match never {})
|
||||
}
|
||||
ReqBody::Streaming(ref mut b) => Pin::new(b).poll_frame(cx),
|
||||
}
|
||||
}
|
||||
|
||||
fn is_end_stream(&self) -> bool {
|
||||
match self {
|
||||
ReqBody::Full(ref b) => b.is_end_stream(),
|
||||
ReqBody::Empty(ref b) => b.is_end_stream(),
|
||||
ReqBody::Streaming(ref b) => b.is_end_stream(),
|
||||
}
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> hyper::body::SizeHint {
|
||||
match self {
|
||||
ReqBody::Full(ref b) => b.size_hint(),
|
||||
ReqBody::Empty(ref b) => b.size_hint(),
|
||||
ReqBody::Streaming(ref b) => b.size_hint(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Copied from https://github.com/seanmonstar/reqwest/blob/b9d62a0323d96f11672a61a17bf8849baec00275/src/async_impl/request.rs#L572
|
||||
/// Check the request URL for a "username:password" type authority, and if
|
||||
|
@ -1214,3 +1279,102 @@ pub fn extract_authority(url: &mut Url) -> Option<(String, Option<String>)> {
|
|||
fn op_fetch_promise_is_settled(promise: v8::Local<v8::Promise>) -> bool {
|
||||
promise.state() != v8::PromiseState::Pending
|
||||
}
|
||||
|
||||
/// Deno.fetch's retry policy.
|
||||
#[derive(Clone, Debug)]
|
||||
struct FetchRetry;
|
||||
|
||||
/// Marker extension that a request has been retried once.
|
||||
#[derive(Clone, Debug)]
|
||||
struct Retried;
|
||||
|
||||
impl<ResBody, E>
|
||||
retry::Policy<http::Request<ReqBody>, http::Response<ResBody>, E>
|
||||
for FetchRetry
|
||||
where
|
||||
E: std::error::Error + 'static,
|
||||
{
|
||||
/// Don't delay retries.
|
||||
type Future = future::Ready<()>;
|
||||
|
||||
fn retry(
|
||||
&mut self,
|
||||
req: &mut http::Request<ReqBody>,
|
||||
result: &mut Result<http::Response<ResBody>, E>,
|
||||
) -> Option<Self::Future> {
|
||||
if req.extensions().get::<Retried>().is_some() {
|
||||
// only retry once
|
||||
return None;
|
||||
}
|
||||
|
||||
match result {
|
||||
Ok(..) => {
|
||||
// never retry a Response
|
||||
None
|
||||
}
|
||||
Err(err) => {
|
||||
if is_error_retryable(&*err) {
|
||||
req.extensions_mut().insert(Retried);
|
||||
Some(future::ready(()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn clone_request(
|
||||
&mut self,
|
||||
req: &http::Request<ReqBody>,
|
||||
) -> Option<http::Request<ReqBody>> {
|
||||
let body = match req.body() {
|
||||
ReqBody::Full(b) => ReqBody::Full(b.clone()),
|
||||
ReqBody::Empty(b) => ReqBody::Empty(*b),
|
||||
ReqBody::Streaming(..) => return None,
|
||||
};
|
||||
|
||||
let mut clone = http::Request::new(body);
|
||||
*clone.method_mut() = req.method().clone();
|
||||
*clone.uri_mut() = req.uri().clone();
|
||||
*clone.headers_mut() = req.headers().clone();
|
||||
*clone.extensions_mut() = req.extensions().clone();
|
||||
Some(clone)
|
||||
}
|
||||
}
|
||||
|
||||
fn is_error_retryable(err: &(dyn std::error::Error + 'static)) -> bool {
|
||||
// Note: hyper doesn't promise it will always be this h2 version. Keep up to date.
|
||||
if let Some(err) = find_source::<h2::Error>(err) {
|
||||
// They sent us a graceful shutdown, try with a new connection!
|
||||
if err.is_go_away()
|
||||
&& err.is_remote()
|
||||
&& err.reason() == Some(h2::Reason::NO_ERROR)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
// REFUSED_STREAM was sent from the server, which is safe to retry.
|
||||
// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.7-3.2
|
||||
if err.is_reset()
|
||||
&& err.is_remote()
|
||||
&& err.reason() == Some(h2::Reason::REFUSED_STREAM)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
fn find_source<'a, E: std::error::Error + 'static>(
|
||||
err: &'a (dyn std::error::Error + 'static),
|
||||
) -> Option<&'a E> {
|
||||
let mut err = Some(err);
|
||||
while let Some(src) = err {
|
||||
if let Some(found) = src.downcast_ref::<E>() {
|
||||
return Some(found);
|
||||
}
|
||||
err = src.source();
|
||||
}
|
||||
None
|
||||
}
|
||||
|
|
|
@ -133,11 +133,7 @@ async fn rust_test_client_with_resolver(
|
|||
|
||||
let req = http::Request::builder()
|
||||
.uri(format!("https://{}/foo", src_addr))
|
||||
.body(
|
||||
http_body_util::Empty::new()
|
||||
.map_err(|err| match err {})
|
||||
.boxed(),
|
||||
)
|
||||
.body(crate::ReqBody::empty())
|
||||
.unwrap();
|
||||
let resp = client.send(req).await.unwrap();
|
||||
assert_eq!(resp.status(), http::StatusCode::OK);
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_ffi"
|
||||
version = "0.170.0"
|
||||
version = "0.171.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -77,6 +77,7 @@ const {
|
|||
Error,
|
||||
Function,
|
||||
MathTrunc,
|
||||
Number,
|
||||
ObjectEntries,
|
||||
ObjectDefineProperty,
|
||||
ObjectPrototypeIsPrototypeOf,
|
||||
|
@ -373,12 +374,12 @@ function parseFileInfo(response) {
|
|||
isDirectory: response.isDirectory,
|
||||
isSymlink: response.isSymlink,
|
||||
size: response.size,
|
||||
mtime: response.mtimeSet === true ? new Date(response.mtime) : null,
|
||||
atime: response.atimeSet === true ? new Date(response.atime) : null,
|
||||
mtime: response.mtimeSet === true ? new Date(Number(response.mtime)) : null,
|
||||
atime: response.atimeSet === true ? new Date(Number(response.atime)) : null,
|
||||
birthtime: response.birthtimeSet === true
|
||||
? new Date(response.birthtime)
|
||||
: null,
|
||||
ctime: response.ctimeSet === true ? new Date(response.ctime) : null,
|
||||
ctime: response.ctimeSet === true ? new Date(Number(response.ctime)) : null,
|
||||
dev: response.dev,
|
||||
mode: response.mode,
|
||||
ino: unix ? response.ino : null,
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_fs"
|
||||
version = "0.93.0"
|
||||
version = "0.94.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_http"
|
||||
version = "0.181.0"
|
||||
version = "0.182.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_io"
|
||||
version = "0.93.0"
|
||||
version = "0.94.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_kv"
|
||||
version = "0.91.0"
|
||||
version = "0.92.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -122,9 +122,7 @@ impl RemoteTransport for FetchClient {
|
|||
headers: http::HeaderMap,
|
||||
body: Bytes,
|
||||
) -> Result<(Url, http::StatusCode, Self::Response), anyhow::Error> {
|
||||
let body = http_body_util::Full::new(body)
|
||||
.map_err(|never| match never {})
|
||||
.boxed();
|
||||
let body = deno_fetch::ReqBody::full(body);
|
||||
let mut req = http::Request::new(body);
|
||||
*req.method_mut() = http::Method::POST;
|
||||
*req.uri_mut() = url.as_str().parse()?;
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_napi"
|
||||
version = "0.114.0"
|
||||
version = "0.115.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "napi_sym"
|
||||
version = "0.113.0"
|
||||
version = "0.114.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
367
ext/net/03_quic.js
Normal file
367
ext/net/03_quic.js
Normal file
|
@ -0,0 +1,367 @@
|
|||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
import { core, primordials } from "ext:core/mod.js";
|
||||
import {
|
||||
op_quic_accept,
|
||||
op_quic_accept_bi,
|
||||
op_quic_accept_incoming,
|
||||
op_quic_accept_uni,
|
||||
op_quic_close_connection,
|
||||
op_quic_close_endpoint,
|
||||
op_quic_connect,
|
||||
op_quic_connection_closed,
|
||||
op_quic_connection_get_protocol,
|
||||
op_quic_connection_get_remote_addr,
|
||||
op_quic_endpoint_get_addr,
|
||||
op_quic_get_send_stream_priority,
|
||||
op_quic_incoming_accept,
|
||||
op_quic_incoming_ignore,
|
||||
op_quic_incoming_local_ip,
|
||||
op_quic_incoming_refuse,
|
||||
op_quic_incoming_remote_addr,
|
||||
op_quic_incoming_remote_addr_validated,
|
||||
op_quic_listen,
|
||||
op_quic_max_datagram_size,
|
||||
op_quic_open_bi,
|
||||
op_quic_open_uni,
|
||||
op_quic_read_datagram,
|
||||
op_quic_send_datagram,
|
||||
op_quic_set_send_stream_priority,
|
||||
} from "ext:core/ops";
|
||||
import {
|
||||
getWritableStreamResourceBacking,
|
||||
ReadableStream,
|
||||
readableStreamForRid,
|
||||
WritableStream,
|
||||
writableStreamForRid,
|
||||
} from "ext:deno_web/06_streams.js";
|
||||
import { loadTlsKeyPair } from "ext:deno_net/02_tls.js";
|
||||
const {
|
||||
BadResourcePrototype,
|
||||
} = core;
|
||||
const {
|
||||
Uint8Array,
|
||||
TypedArrayPrototypeSubarray,
|
||||
SymbolAsyncIterator,
|
||||
SafePromisePrototypeFinally,
|
||||
ObjectPrototypeIsPrototypeOf,
|
||||
} = primordials;
|
||||
|
||||
class QuicSendStream extends WritableStream {
|
||||
get sendOrder() {
|
||||
return op_quic_get_send_stream_priority(
|
||||
getWritableStreamResourceBacking(this).rid,
|
||||
);
|
||||
}
|
||||
|
||||
set sendOrder(p) {
|
||||
op_quic_set_send_stream_priority(
|
||||
getWritableStreamResourceBacking(this).rid,
|
||||
p,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
class QuicReceiveStream extends ReadableStream {}
|
||||
|
||||
function readableStream(rid, closed) {
|
||||
// stream can be indirectly closed by closing connection.
|
||||
SafePromisePrototypeFinally(closed, () => {
|
||||
core.tryClose(rid);
|
||||
});
|
||||
return readableStreamForRid(rid, true, QuicReceiveStream);
|
||||
}
|
||||
|
||||
function writableStream(rid, closed) {
|
||||
// stream can be indirectly closed by closing connection.
|
||||
SafePromisePrototypeFinally(closed, () => {
|
||||
core.tryClose(rid);
|
||||
});
|
||||
return writableStreamForRid(rid, true, QuicSendStream);
|
||||
}
|
||||
|
||||
class QuicBidirectionalStream {
|
||||
#readable;
|
||||
#writable;
|
||||
|
||||
constructor(txRid, rxRid, closed) {
|
||||
this.#readable = readableStream(rxRid, closed);
|
||||
this.#writable = writableStream(txRid, closed);
|
||||
}
|
||||
|
||||
get readable() {
|
||||
return this.#readable;
|
||||
}
|
||||
|
||||
get writable() {
|
||||
return this.#writable;
|
||||
}
|
||||
}
|
||||
|
||||
async function* bidiStream(conn, closed) {
|
||||
try {
|
||||
while (true) {
|
||||
const r = await op_quic_accept_bi(conn);
|
||||
yield new QuicBidirectionalStream(r[0], r[1], closed);
|
||||
}
|
||||
} catch (error) {
|
||||
if (ObjectPrototypeIsPrototypeOf(BadResourcePrototype, error)) {
|
||||
return;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async function* uniStream(conn, closed) {
|
||||
try {
|
||||
while (true) {
|
||||
const uniRid = await op_quic_accept_uni(conn);
|
||||
yield readableStream(uniRid, closed);
|
||||
}
|
||||
} catch (error) {
|
||||
if (ObjectPrototypeIsPrototypeOf(BadResourcePrototype, error)) {
|
||||
return;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
class QuicConn {
|
||||
#resource;
|
||||
#bidiStream = null;
|
||||
#uniStream = null;
|
||||
#closed;
|
||||
|
||||
constructor(resource) {
|
||||
this.#resource = resource;
|
||||
|
||||
this.#closed = op_quic_connection_closed(this.#resource);
|
||||
core.unrefOpPromise(this.#closed);
|
||||
}
|
||||
|
||||
get protocol() {
|
||||
return op_quic_connection_get_protocol(this.#resource);
|
||||
}
|
||||
|
||||
get remoteAddr() {
|
||||
return op_quic_connection_get_remote_addr(this.#resource);
|
||||
}
|
||||
|
||||
async createBidirectionalStream(
|
||||
{ sendOrder, waitUntilAvailable } = { __proto__: null },
|
||||
) {
|
||||
const { 0: txRid, 1: rxRid } = await op_quic_open_bi(
|
||||
this.#resource,
|
||||
waitUntilAvailable ?? false,
|
||||
);
|
||||
if (sendOrder !== null && sendOrder !== undefined) {
|
||||
op_quic_set_send_stream_priority(txRid, sendOrder);
|
||||
}
|
||||
return new QuicBidirectionalStream(txRid, rxRid, this.#closed);
|
||||
}
|
||||
|
||||
async createUnidirectionalStream(
|
||||
{ sendOrder, waitUntilAvailable } = { __proto__: null },
|
||||
) {
|
||||
const rid = await op_quic_open_uni(
|
||||
this.#resource,
|
||||
waitUntilAvailable ?? false,
|
||||
);
|
||||
if (sendOrder !== null && sendOrder !== undefined) {
|
||||
op_quic_set_send_stream_priority(rid, sendOrder);
|
||||
}
|
||||
return writableStream(rid, this.#closed);
|
||||
}
|
||||
|
||||
get incomingBidirectionalStreams() {
|
||||
if (this.#bidiStream === null) {
|
||||
this.#bidiStream = ReadableStream.from(
|
||||
bidiStream(this.#resource, this.#closed),
|
||||
);
|
||||
}
|
||||
return this.#bidiStream;
|
||||
}
|
||||
|
||||
get incomingUnidirectionalStreams() {
|
||||
if (this.#uniStream === null) {
|
||||
this.#uniStream = ReadableStream.from(
|
||||
uniStream(this.#resource, this.#closed),
|
||||
);
|
||||
}
|
||||
return this.#uniStream;
|
||||
}
|
||||
|
||||
get maxDatagramSize() {
|
||||
return op_quic_max_datagram_size(this.#resource);
|
||||
}
|
||||
|
||||
async readDatagram(p) {
|
||||
const view = p || new Uint8Array(this.maxDatagramSize);
|
||||
const nread = await op_quic_read_datagram(this.#resource, view);
|
||||
return TypedArrayPrototypeSubarray(view, 0, nread);
|
||||
}
|
||||
|
||||
async sendDatagram(data) {
|
||||
await op_quic_send_datagram(this.#resource, data);
|
||||
}
|
||||
|
||||
get closed() {
|
||||
core.refOpPromise(this.#closed);
|
||||
return this.#closed;
|
||||
}
|
||||
|
||||
close({ closeCode, reason }) {
|
||||
op_quic_close_connection(this.#resource, closeCode, reason);
|
||||
}
|
||||
}
|
||||
|
||||
class QuicIncoming {
|
||||
#incoming;
|
||||
|
||||
constructor(incoming) {
|
||||
this.#incoming = incoming;
|
||||
}
|
||||
|
||||
get localIp() {
|
||||
return op_quic_incoming_local_ip(this.#incoming);
|
||||
}
|
||||
|
||||
get remoteAddr() {
|
||||
return op_quic_incoming_remote_addr(this.#incoming);
|
||||
}
|
||||
|
||||
get remoteAddressValidated() {
|
||||
return op_quic_incoming_remote_addr_validated(this.#incoming);
|
||||
}
|
||||
|
||||
async accept() {
|
||||
const conn = await op_quic_incoming_accept(this.#incoming);
|
||||
return new QuicConn(conn);
|
||||
}
|
||||
|
||||
refuse() {
|
||||
op_quic_incoming_refuse(this.#incoming);
|
||||
}
|
||||
|
||||
ignore() {
|
||||
op_quic_incoming_ignore(this.#incoming);
|
||||
}
|
||||
}
|
||||
|
||||
class QuicListener {
|
||||
#endpoint;
|
||||
|
||||
constructor(endpoint) {
|
||||
this.#endpoint = endpoint;
|
||||
}
|
||||
|
||||
get addr() {
|
||||
return op_quic_endpoint_get_addr(this.#endpoint);
|
||||
}
|
||||
|
||||
async accept() {
|
||||
const conn = await op_quic_accept(this.#endpoint);
|
||||
return new QuicConn(conn);
|
||||
}
|
||||
|
||||
async incoming() {
|
||||
const incoming = await op_quic_accept_incoming(this.#endpoint);
|
||||
return new QuicIncoming(incoming);
|
||||
}
|
||||
|
||||
async next() {
|
||||
let conn;
|
||||
try {
|
||||
conn = await this.accept();
|
||||
} catch (error) {
|
||||
if (ObjectPrototypeIsPrototypeOf(BadResourcePrototype, error)) {
|
||||
return { value: undefined, done: true };
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
return { value: conn, done: false };
|
||||
}
|
||||
|
||||
[SymbolAsyncIterator]() {
|
||||
return this;
|
||||
}
|
||||
|
||||
close({ closeCode, reason }) {
|
||||
op_quic_close_endpoint(this.#endpoint, closeCode, reason);
|
||||
}
|
||||
}
|
||||
|
||||
async function listenQuic(
|
||||
{
|
||||
hostname,
|
||||
port,
|
||||
cert,
|
||||
key,
|
||||
alpnProtocols,
|
||||
keepAliveInterval,
|
||||
maxIdleTimeout,
|
||||
maxConcurrentBidirectionalStreams,
|
||||
maxConcurrentUnidirectionalStreams,
|
||||
},
|
||||
) {
|
||||
hostname = hostname || "0.0.0.0";
|
||||
const keyPair = loadTlsKeyPair("Deno.listenQuic", { cert, key });
|
||||
const endpoint = await op_quic_listen(
|
||||
{ hostname, port },
|
||||
{ alpnProtocols },
|
||||
{
|
||||
keepAliveInterval,
|
||||
maxIdleTimeout,
|
||||
maxConcurrentBidirectionalStreams,
|
||||
maxConcurrentUnidirectionalStreams,
|
||||
},
|
||||
keyPair,
|
||||
);
|
||||
return new QuicListener(endpoint);
|
||||
}
|
||||
|
||||
async function connectQuic(
|
||||
{
|
||||
hostname,
|
||||
port,
|
||||
serverName,
|
||||
caCerts,
|
||||
cert,
|
||||
key,
|
||||
alpnProtocols,
|
||||
keepAliveInterval,
|
||||
maxIdleTimeout,
|
||||
maxConcurrentBidirectionalStreams,
|
||||
maxConcurrentUnidirectionalStreams,
|
||||
congestionControl,
|
||||
},
|
||||
) {
|
||||
const keyPair = loadTlsKeyPair("Deno.connectQuic", { cert, key });
|
||||
const conn = await op_quic_connect(
|
||||
{ hostname, port },
|
||||
{
|
||||
caCerts,
|
||||
alpnProtocols,
|
||||
serverName,
|
||||
},
|
||||
{
|
||||
keepAliveInterval,
|
||||
maxIdleTimeout,
|
||||
maxConcurrentBidirectionalStreams,
|
||||
maxConcurrentUnidirectionalStreams,
|
||||
congestionControl,
|
||||
},
|
||||
keyPair,
|
||||
);
|
||||
return new QuicConn(conn);
|
||||
}
|
||||
|
||||
export {
|
||||
connectQuic,
|
||||
listenQuic,
|
||||
QuicBidirectionalStream,
|
||||
QuicConn,
|
||||
QuicIncoming,
|
||||
QuicListener,
|
||||
QuicReceiveStream,
|
||||
QuicSendStream,
|
||||
};
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_net"
|
||||
version = "0.175.0"
|
||||
version = "0.176.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
@ -20,6 +20,7 @@ deno_tls.workspace = true
|
|||
hickory-proto = "0.25.0-alpha.4"
|
||||
hickory-resolver.workspace = true
|
||||
pin-project.workspace = true
|
||||
quinn = { version = "0.11.6", default-features = false, features = ["runtime-tokio", "rustls", "ring"] }
|
||||
rustls-tokio-stream.workspace = true
|
||||
serde.workspace = true
|
||||
socket2.workspace = true
|
||||
|
|
288
ext/net/lib.deno_net.d.ts
vendored
288
ext/net/lib.deno_net.d.ts
vendored
|
@ -450,5 +450,293 @@ declare namespace Deno {
|
|||
options?: StartTlsOptions,
|
||||
): Promise<TlsConn>;
|
||||
|
||||
/**
|
||||
* **UNSTABLE**: New API, yet to be vetted.
|
||||
* @experimental
|
||||
* @category Network
|
||||
*/
|
||||
export interface QuicTransportOptions {
|
||||
/** Period of inactivity before sending a keep-alive packet. Keep-alive
|
||||
* packets prevent an inactive but otherwise healthy connection from timing
|
||||
* out. Only one side of any given connection needs keep-alive enabled for
|
||||
* the connection to be preserved.
|
||||
* @default {undefined}
|
||||
*/
|
||||
keepAliveInterval?: number;
|
||||
/** Maximum duration of inactivity to accept before timing out the
|
||||
* connection. The true idle timeout is the minimum of this and the peer’s
|
||||
* own max idle timeout.
|
||||
* @default {undefined}
|
||||
*/
|
||||
maxIdleTimeout?: number;
|
||||
/** Maximum number of incoming bidirectional streams that may be open
|
||||
* concurrently.
|
||||
* @default {100}
|
||||
*/
|
||||
maxConcurrentBidirectionalStreams?: number;
|
||||
/** Maximum number of incoming unidirectional streams that may be open
|
||||
* concurrently.
|
||||
* @default {100}
|
||||
*/
|
||||
maxConcurrentUnidirectionalStreams?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* **UNSTABLE**: New API, yet to be vetted.
|
||||
* @experimental
|
||||
* @category Network
|
||||
*/
|
||||
export interface ListenQuicOptions extends QuicTransportOptions {
|
||||
/** The port to connect to. */
|
||||
port: number;
|
||||
/**
|
||||
* A literal IP address or host name that can be resolved to an IP address.
|
||||
* @default {"0.0.0.0"}
|
||||
*/
|
||||
hostname?: string;
|
||||
/** Server private key in PEM format */
|
||||
key: string;
|
||||
/** Cert chain in PEM format */
|
||||
cert: string;
|
||||
/** Application-Layer Protocol Negotiation (ALPN) protocols to announce to
|
||||
* the client. QUIC requires the use of ALPN.
|
||||
*/
|
||||
alpnProtocols: string[];
|
||||
}
|
||||
|
||||
/**
|
||||
* **UNSTABLE**: New API, yet to be vetted.
|
||||
* Listen announces on the local transport address over QUIC.
|
||||
*
|
||||
* ```ts
|
||||
* const lstnr = await Deno.listenQuic({ port: 443, cert: "...", key: "...", alpnProtocols: ["h3"] });
|
||||
* ```
|
||||
*
|
||||
* Requires `allow-net` permission.
|
||||
*
|
||||
* @experimental
|
||||
* @tags allow-net
|
||||
* @category Network
|
||||
*/
|
||||
export function listenQuic(options: ListenQuicOptions): Promise<QuicListener>;
|
||||
|
||||
/**
|
||||
* **UNSTABLE**: New API, yet to be vetted.
|
||||
* @experimental
|
||||
* @category Network
|
||||
*/
|
||||
export interface ConnectQuicOptions extends QuicTransportOptions {
|
||||
/** The port to connect to. */
|
||||
port: number;
|
||||
/** A literal IP address or host name that can be resolved to an IP address. */
|
||||
hostname: string;
|
||||
/** The name used for validating the certificate provided by the server. If
|
||||
* not provided, defaults to `hostname`. */
|
||||
serverName?: string | undefined;
|
||||
/** Application-Layer Protocol Negotiation (ALPN) protocols supported by
|
||||
* the client. QUIC requires the use of ALPN.
|
||||
*/
|
||||
alpnProtocols: string[];
|
||||
/** A list of root certificates that will be used in addition to the
|
||||
* default root certificates to verify the peer's certificate.
|
||||
*
|
||||
* Must be in PEM format. */
|
||||
caCerts?: string[];
|
||||
/**
|
||||
* The congestion control algorithm used when sending data over this connection.
|
||||
*/
|
||||
congestionControl?: "throughput" | "low-latency";
|
||||
}
|
||||
|
||||
/**
|
||||
* **UNSTABLE**: New API, yet to be vetted.
|
||||
* Establishes a secure connection over QUIC using a hostname and port. The
|
||||
* cert file is optional and if not included Mozilla's root certificates will
|
||||
* be used. See also https://github.com/ctz/webpki-roots for specifics.
|
||||
*
|
||||
* ```ts
|
||||
* const caCert = await Deno.readTextFile("./certs/my_custom_root_CA.pem");
|
||||
* const conn1 = await Deno.connectQuic({ hostname: "example.com", port: 443, alpnProtocols: ["h3"] });
|
||||
* const conn2 = await Deno.connectQuic({ caCerts: [caCert], hostname: "example.com", port: 443, alpnProtocols: ["h3"] });
|
||||
* ```
|
||||
*
|
||||
* Requires `allow-net` permission.
|
||||
*
|
||||
* @experimental
|
||||
* @tags allow-net
|
||||
* @category Network
|
||||
*/
|
||||
export function connectQuic(options: ConnectQuicOptions): Promise<QuicConn>;
|
||||
|
||||
/**
|
||||
* **UNSTABLE**: New API, yet to be vetted.
|
||||
* @experimental
|
||||
* @category Network
|
||||
*/
|
||||
export interface QuicCloseInfo {
|
||||
/** A number representing the error code for the error. */
|
||||
closeCode: number;
|
||||
/** A string representing the reason for closing the connection. */
|
||||
reason: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* **UNSTABLE**: New API, yet to be vetted.
|
||||
* An incoming connection for which the server has not yet begun its part of the handshake.
|
||||
*
|
||||
* @experimental
|
||||
* @category Network
|
||||
*/
|
||||
export interface QuicIncoming {
|
||||
/**
|
||||
* The local IP address which was used when the peer established the connection.
|
||||
*/
|
||||
readonly localIp: string;
|
||||
|
||||
/**
|
||||
* The peer’s UDP address.
|
||||
*/
|
||||
readonly remoteAddr: NetAddr;
|
||||
|
||||
/**
|
||||
* Whether the socket address that is initiating this connection has proven that they can receive traffic.
|
||||
*/
|
||||
readonly remoteAddressValidated: boolean;
|
||||
|
||||
/**
|
||||
* Accept this incoming connection.
|
||||
*/
|
||||
accept(): Promise<QuicConn>;
|
||||
|
||||
/**
|
||||
* Refuse this incoming connection.
|
||||
*/
|
||||
refuse(): void;
|
||||
|
||||
/**
|
||||
* Ignore this incoming connection attempt, not sending any packet in response.
|
||||
*/
|
||||
ignore(): void;
|
||||
}
|
||||
|
||||
/**
|
||||
* **UNSTABLE**: New API, yet to be vetted.
|
||||
* Specialized listener that accepts QUIC connections.
|
||||
*
|
||||
* @experimental
|
||||
* @category Network
|
||||
*/
|
||||
export interface QuicListener extends AsyncIterable<QuicConn> {
|
||||
/** Return the address of the `QuicListener`. */
|
||||
readonly addr: NetAddr;
|
||||
|
||||
/** Waits for and resolves to the next connection to the `QuicListener`. */
|
||||
accept(): Promise<QuicConn>;
|
||||
|
||||
/** Waits for and resolves to the next incoming request to the `QuicListener`. */
|
||||
incoming(): Promise<QuicIncoming>;
|
||||
|
||||
/** Close closes the listener. Any pending accept promises will be rejected
|
||||
* with errors. */
|
||||
close(info: QuicCloseInfo): void;
|
||||
|
||||
[Symbol.asyncIterator](): AsyncIterableIterator<QuicConn>;
|
||||
}
|
||||
|
||||
/**
|
||||
* **UNSTABLE**: New API, yet to be vetted.
|
||||
*
|
||||
* @experimental
|
||||
* @category Network
|
||||
*/
|
||||
export interface QuicSendStreamOptions {
|
||||
/** Indicates the send priority of this stream relative to other streams for
|
||||
* which the value has been set.
|
||||
* @default {undefined}
|
||||
*/
|
||||
sendOrder?: number;
|
||||
/** Wait until there is sufficient flow credit to create the stream.
|
||||
* @default {false}
|
||||
*/
|
||||
waitUntilAvailable?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* **UNSTABLE**: New API, yet to be vetted.
|
||||
*
|
||||
* @experimental
|
||||
* @category Network
|
||||
*/
|
||||
export interface QuicConn {
|
||||
/** Close closes the listener. Any pending accept promises will be rejected
|
||||
* with errors. */
|
||||
close(info: QuicCloseInfo): void;
|
||||
/** Opens and returns a bidirectional stream. */
|
||||
createBidirectionalStream(
|
||||
options?: QuicSendStreamOptions,
|
||||
): Promise<QuicBidirectionalStream>;
|
||||
/** Opens and returns a unidirectional stream. */
|
||||
createUnidirectionalStream(
|
||||
options?: QuicSendStreamOptions,
|
||||
): Promise<QuicSendStream>;
|
||||
/** Send a datagram. The provided data cannot be larger than
|
||||
* `maxDatagramSize`. */
|
||||
sendDatagram(data: Uint8Array): Promise<void>;
|
||||
/** Receive a datagram. If no buffer is provider, one will be allocated.
|
||||
* The size of the provided buffer should be at least `maxDatagramSize`. */
|
||||
readDatagram(buffer?: Uint8Array): Promise<Uint8Array>;
|
||||
|
||||
/** Return the remote address for the connection. Clients may change
|
||||
* addresses at will, for example when switching to a cellular internet
|
||||
* connection.
|
||||
*/
|
||||
readonly remoteAddr: NetAddr;
|
||||
/** The negotiated ALPN protocol, if provided. */
|
||||
readonly protocol: string | undefined;
|
||||
/** Returns a promise that resolves when the connection is closed. */
|
||||
readonly closed: Promise<QuicCloseInfo>;
|
||||
/** A stream of bidirectional streams opened by the peer. */
|
||||
readonly incomingBidirectionalStreams: ReadableStream<
|
||||
QuicBidirectionalStream
|
||||
>;
|
||||
/** A stream of unidirectional streams opened by the peer. */
|
||||
readonly incomingUnidirectionalStreams: ReadableStream<QuicReceiveStream>;
|
||||
/** Returns the datagram stream for sending and receiving datagrams. */
|
||||
readonly maxDatagramSize: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* **UNSTABLE**: New API, yet to be vetted.
|
||||
*
|
||||
* @experimental
|
||||
* @category Network
|
||||
*/
|
||||
export interface QuicBidirectionalStream {
|
||||
/** Returns a QuicReceiveStream instance that can be used to read incoming data. */
|
||||
readonly readable: QuicReceiveStream;
|
||||
/** Returns a QuicSendStream instance that can be used to write outgoing data. */
|
||||
readonly writable: QuicSendStream;
|
||||
}
|
||||
|
||||
/**
|
||||
* **UNSTABLE**: New API, yet to be vetted.
|
||||
*
|
||||
* @experimental
|
||||
* @category Network
|
||||
*/
|
||||
export interface QuicSendStream extends WritableStream<Uint8Array> {
|
||||
/** Indicates the send priority of this stream relative to other streams for
|
||||
* which the value has been set. */
|
||||
sendOrder: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* **UNSTABLE**: New API, yet to be vetted.
|
||||
*
|
||||
* @experimental
|
||||
* @category Network
|
||||
*/
|
||||
export interface QuicReceiveStream extends ReadableStream<Uint8Array> {}
|
||||
|
||||
export {}; // only export exports
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ pub mod ops;
|
|||
pub mod ops_tls;
|
||||
#[cfg(unix)]
|
||||
pub mod ops_unix;
|
||||
mod quic;
|
||||
pub mod raw;
|
||||
pub mod resolve_addr;
|
||||
pub mod tcp;
|
||||
|
@ -158,8 +159,34 @@ deno_core::extension!(deno_net,
|
|||
ops_unix::op_node_unstable_net_listen_unixpacket<P>,
|
||||
ops_unix::op_net_recv_unixpacket,
|
||||
ops_unix::op_net_send_unixpacket<P>,
|
||||
|
||||
quic::op_quic_accept,
|
||||
quic::op_quic_accept_bi,
|
||||
quic::op_quic_accept_incoming,
|
||||
quic::op_quic_accept_uni,
|
||||
quic::op_quic_close_connection,
|
||||
quic::op_quic_close_endpoint,
|
||||
quic::op_quic_connection_closed,
|
||||
quic::op_quic_connection_get_protocol,
|
||||
quic::op_quic_connection_get_remote_addr,
|
||||
quic::op_quic_connect<P>,
|
||||
quic::op_quic_endpoint_get_addr,
|
||||
quic::op_quic_get_send_stream_priority,
|
||||
quic::op_quic_incoming_accept,
|
||||
quic::op_quic_incoming_refuse,
|
||||
quic::op_quic_incoming_ignore,
|
||||
quic::op_quic_incoming_local_ip,
|
||||
quic::op_quic_incoming_remote_addr,
|
||||
quic::op_quic_incoming_remote_addr_validated,
|
||||
quic::op_quic_listen<P>,
|
||||
quic::op_quic_max_datagram_size,
|
||||
quic::op_quic_open_bi,
|
||||
quic::op_quic_open_uni,
|
||||
quic::op_quic_read_datagram,
|
||||
quic::op_quic_send_datagram,
|
||||
quic::op_quic_set_send_stream_priority,
|
||||
],
|
||||
esm = [ "01_net.js", "02_tls.js" ],
|
||||
esm = [ "01_net.js", "02_tls.js", "03_quic.js" ],
|
||||
options = {
|
||||
root_cert_store_provider: Option<Arc<dyn RootCertStoreProvider>>,
|
||||
unsafely_ignore_certificate_errors: Option<Vec<String>>,
|
||||
|
|
660
ext/net/quic.rs
Normal file
660
ext/net/quic.rs
Normal file
|
@ -0,0 +1,660 @@
|
|||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use crate::resolve_addr::resolve_addr;
|
||||
use crate::DefaultTlsOptions;
|
||||
use crate::NetPermissions;
|
||||
use crate::UnsafelyIgnoreCertificateErrors;
|
||||
use deno_core::error::bad_resource;
|
||||
use deno_core::error::generic_error;
|
||||
use deno_core::error::AnyError;
|
||||
use deno_core::futures::task::noop_waker_ref;
|
||||
use deno_core::op2;
|
||||
use deno_core::AsyncRefCell;
|
||||
use deno_core::AsyncResult;
|
||||
use deno_core::BufView;
|
||||
use deno_core::GarbageCollected;
|
||||
use deno_core::JsBuffer;
|
||||
use deno_core::OpState;
|
||||
use deno_core::RcRef;
|
||||
use deno_core::Resource;
|
||||
use deno_core::ResourceId;
|
||||
use deno_core::WriteOutcome;
|
||||
use deno_tls::create_client_config;
|
||||
use deno_tls::SocketUse;
|
||||
use deno_tls::TlsKeys;
|
||||
use deno_tls::TlsKeysHolder;
|
||||
use quinn::crypto::rustls::QuicClientConfig;
|
||||
use quinn::crypto::rustls::QuicServerConfig;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::borrow::Cow;
|
||||
use std::cell::RefCell;
|
||||
use std::future::Future;
|
||||
use std::net::IpAddr;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::net::SocketAddrV4;
|
||||
use std::net::SocketAddrV6;
|
||||
use std::pin::pin;
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
use std::task::Context;
|
||||
use std::task::Poll;
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
struct Addr {
|
||||
hostname: String,
|
||||
port: u16,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ListenArgs {
|
||||
alpn_protocols: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct TransportConfig {
|
||||
keep_alive_interval: Option<u64>,
|
||||
max_idle_timeout: Option<u64>,
|
||||
max_concurrent_bidirectional_streams: Option<u32>,
|
||||
max_concurrent_unidirectional_streams: Option<u32>,
|
||||
preferred_address_v4: Option<SocketAddrV4>,
|
||||
preferred_address_v6: Option<SocketAddrV6>,
|
||||
congestion_control: Option<String>,
|
||||
}
|
||||
|
||||
impl TryInto<quinn::TransportConfig> for TransportConfig {
|
||||
type Error = AnyError;
|
||||
|
||||
fn try_into(self) -> Result<quinn::TransportConfig, AnyError> {
|
||||
let mut cfg = quinn::TransportConfig::default();
|
||||
|
||||
if let Some(interval) = self.keep_alive_interval {
|
||||
cfg.keep_alive_interval(Some(Duration::from_millis(interval)));
|
||||
}
|
||||
|
||||
if let Some(timeout) = self.max_idle_timeout {
|
||||
cfg.max_idle_timeout(Some(Duration::from_millis(timeout).try_into()?));
|
||||
}
|
||||
|
||||
if let Some(max) = self.max_concurrent_bidirectional_streams {
|
||||
cfg.max_concurrent_bidi_streams(max.into());
|
||||
}
|
||||
|
||||
if let Some(max) = self.max_concurrent_unidirectional_streams {
|
||||
cfg.max_concurrent_uni_streams(max.into());
|
||||
}
|
||||
|
||||
if let Some(v) = self.congestion_control {
|
||||
let controller: Option<
|
||||
Arc<dyn quinn::congestion::ControllerFactory + Send + Sync + 'static>,
|
||||
> = match v.as_str() {
|
||||
"low-latency" => {
|
||||
Some(Arc::new(quinn::congestion::BbrConfig::default()))
|
||||
}
|
||||
"throughput" => {
|
||||
Some(Arc::new(quinn::congestion::CubicConfig::default()))
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
if let Some(controller) = controller {
|
||||
cfg.congestion_controller_factory(controller);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(cfg)
|
||||
}
|
||||
}
|
||||
|
||||
struct EndpointResource(quinn::Endpoint, Arc<QuicServerConfig>);
|
||||
|
||||
impl GarbageCollected for EndpointResource {}
|
||||
|
||||
#[op2(async)]
|
||||
#[cppgc]
|
||||
pub(crate) async fn op_quic_listen<NP>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
#[serde] addr: Addr,
|
||||
#[serde] args: ListenArgs,
|
||||
#[serde] transport_config: TransportConfig,
|
||||
#[cppgc] keys: &TlsKeysHolder,
|
||||
) -> Result<EndpointResource, AnyError>
|
||||
where
|
||||
NP: NetPermissions + 'static,
|
||||
{
|
||||
state
|
||||
.borrow_mut()
|
||||
.borrow_mut::<NP>()
|
||||
.check_net(&(&addr.hostname, Some(addr.port)), "Deno.listenQuic()")?;
|
||||
|
||||
let addr = resolve_addr(&addr.hostname, addr.port)
|
||||
.await?
|
||||
.next()
|
||||
.ok_or_else(|| generic_error("No resolved address found"))?;
|
||||
|
||||
let TlsKeys::Static(deno_tls::TlsKey(cert, key)) = keys.take() else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
let mut crypto =
|
||||
quinn::rustls::ServerConfig::builder_with_protocol_versions(&[
|
||||
&quinn::rustls::version::TLS13,
|
||||
])
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(cert.clone(), key.clone_key())?;
|
||||
|
||||
if let Some(alpn_protocols) = args.alpn_protocols {
|
||||
crypto.alpn_protocols = alpn_protocols
|
||||
.into_iter()
|
||||
.map(|alpn| alpn.into_bytes())
|
||||
.collect();
|
||||
}
|
||||
|
||||
let server_config = Arc::new(QuicServerConfig::try_from(crypto)?);
|
||||
let mut config = quinn::ServerConfig::with_crypto(server_config.clone());
|
||||
config.preferred_address_v4(transport_config.preferred_address_v4);
|
||||
config.preferred_address_v6(transport_config.preferred_address_v6);
|
||||
config.transport_config(Arc::new(transport_config.try_into()?));
|
||||
let endpoint = quinn::Endpoint::server(config, addr)?;
|
||||
|
||||
Ok(EndpointResource(endpoint, server_config))
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[serde]
|
||||
pub(crate) fn op_quic_endpoint_get_addr(
|
||||
#[cppgc] endpoint: &EndpointResource,
|
||||
) -> Result<Addr, AnyError> {
|
||||
let addr = endpoint.0.local_addr()?;
|
||||
let addr = Addr {
|
||||
hostname: format!("{}", addr.ip()),
|
||||
port: addr.port(),
|
||||
};
|
||||
Ok(addr)
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct CloseInfo {
|
||||
close_code: u64,
|
||||
reason: String,
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
pub(crate) fn op_quic_close_endpoint(
|
||||
#[cppgc] endpoint: &EndpointResource,
|
||||
#[bigint] close_code: u64,
|
||||
#[string] reason: String,
|
||||
) -> Result<(), AnyError> {
|
||||
endpoint
|
||||
.0
|
||||
.close(quinn::VarInt::from_u64(close_code)?, reason.as_bytes());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct ConnectionResource(quinn::Connection);
|
||||
|
||||
impl GarbageCollected for ConnectionResource {}
|
||||
|
||||
#[op2(async)]
|
||||
#[cppgc]
|
||||
pub(crate) async fn op_quic_accept(
|
||||
#[cppgc] endpoint: &EndpointResource,
|
||||
) -> Result<ConnectionResource, AnyError> {
|
||||
match endpoint.0.accept().await {
|
||||
Some(incoming) => {
|
||||
let conn = incoming.accept()?.await?;
|
||||
Ok(ConnectionResource(conn))
|
||||
}
|
||||
None => Err(bad_resource("QuicListener is closed")),
|
||||
}
|
||||
}
|
||||
|
||||
struct IncomingResource(
|
||||
RefCell<Option<quinn::Incoming>>,
|
||||
Arc<QuicServerConfig>,
|
||||
);
|
||||
|
||||
impl GarbageCollected for IncomingResource {}
|
||||
|
||||
#[op2(async)]
|
||||
#[cppgc]
|
||||
pub(crate) async fn op_quic_accept_incoming(
|
||||
#[cppgc] endpoint: &EndpointResource,
|
||||
) -> Result<IncomingResource, AnyError> {
|
||||
match endpoint.0.accept().await {
|
||||
Some(incoming) => Ok(IncomingResource(
|
||||
RefCell::new(Some(incoming)),
|
||||
endpoint.1.clone(),
|
||||
)),
|
||||
None => Err(bad_resource("QuicListener is closed")),
|
||||
}
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[string]
|
||||
pub(crate) fn op_quic_incoming_local_ip(
|
||||
#[cppgc] incoming_resource: &IncomingResource,
|
||||
) -> Result<Option<String>, AnyError> {
|
||||
let Some(incoming) = incoming_resource.0.borrow_mut().take() else {
|
||||
return Err(bad_resource("QuicIncoming already used"));
|
||||
};
|
||||
Ok(incoming.local_ip().map(|ip| ip.to_string()))
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[serde]
|
||||
pub(crate) fn op_quic_incoming_remote_addr(
|
||||
#[cppgc] incoming_resource: &IncomingResource,
|
||||
) -> Result<Addr, AnyError> {
|
||||
let Some(incoming) = incoming_resource.0.borrow_mut().take() else {
|
||||
return Err(bad_resource("QuicIncoming already used"));
|
||||
};
|
||||
let addr = incoming.remote_address();
|
||||
Ok(Addr {
|
||||
hostname: format!("{}", addr.ip()),
|
||||
port: addr.port(),
|
||||
})
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
pub(crate) fn op_quic_incoming_remote_addr_validated(
|
||||
#[cppgc] incoming_resource: &IncomingResource,
|
||||
) -> Result<bool, AnyError> {
|
||||
let Some(incoming) = incoming_resource.0.borrow_mut().take() else {
|
||||
return Err(bad_resource("QuicIncoming already used"));
|
||||
};
|
||||
Ok(incoming.remote_address_validated())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[cppgc]
|
||||
pub(crate) async fn op_quic_incoming_accept(
|
||||
#[cppgc] incoming_resource: &IncomingResource,
|
||||
#[serde] transport_config: Option<TransportConfig>,
|
||||
) -> Result<ConnectionResource, AnyError> {
|
||||
let Some(incoming) = incoming_resource.0.borrow_mut().take() else {
|
||||
return Err(bad_resource("QuicIncoming already used"));
|
||||
};
|
||||
let conn = match transport_config {
|
||||
Some(transport_config) => {
|
||||
let mut config =
|
||||
quinn::ServerConfig::with_crypto(incoming_resource.1.clone());
|
||||
config.preferred_address_v4(transport_config.preferred_address_v4);
|
||||
config.preferred_address_v6(transport_config.preferred_address_v6);
|
||||
config.transport_config(Arc::new(transport_config.try_into()?));
|
||||
incoming.accept_with(Arc::new(config))?.await?
|
||||
}
|
||||
None => incoming.accept()?.await?,
|
||||
};
|
||||
Ok(ConnectionResource(conn))
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[serde]
|
||||
pub(crate) fn op_quic_incoming_refuse(
|
||||
#[cppgc] incoming: &IncomingResource,
|
||||
) -> Result<(), AnyError> {
|
||||
let Some(incoming) = incoming.0.borrow_mut().take() else {
|
||||
return Err(bad_resource("QuicIncoming already used"));
|
||||
};
|
||||
incoming.refuse();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[serde]
|
||||
pub(crate) fn op_quic_incoming_ignore(
|
||||
#[cppgc] incoming: &IncomingResource,
|
||||
) -> Result<(), AnyError> {
|
||||
let Some(incoming) = incoming.0.borrow_mut().take() else {
|
||||
return Err(bad_resource("QuicIncoming already used"));
|
||||
};
|
||||
incoming.ignore();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ConnectArgs {
|
||||
ca_certs: Option<Vec<String>>,
|
||||
alpn_protocols: Option<Vec<String>>,
|
||||
server_name: Option<String>,
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[cppgc]
|
||||
pub(crate) async fn op_quic_connect<NP>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
#[serde] addr: Addr,
|
||||
#[serde] args: ConnectArgs,
|
||||
#[serde] transport_config: TransportConfig,
|
||||
#[cppgc] key_pair: &TlsKeysHolder,
|
||||
) -> Result<ConnectionResource, AnyError>
|
||||
where
|
||||
NP: NetPermissions + 'static,
|
||||
{
|
||||
state
|
||||
.borrow_mut()
|
||||
.borrow_mut::<NP>()
|
||||
.check_net(&(&addr.hostname, Some(addr.port)), "Deno.connectQuic()")?;
|
||||
|
||||
let sock_addr = resolve_addr(&addr.hostname, addr.port)
|
||||
.await?
|
||||
.next()
|
||||
.ok_or_else(|| generic_error("No resolved address found"))?;
|
||||
|
||||
let root_cert_store = state
|
||||
.borrow()
|
||||
.borrow::<DefaultTlsOptions>()
|
||||
.root_cert_store()?;
|
||||
|
||||
let unsafely_ignore_certificate_errors = state
|
||||
.borrow()
|
||||
.try_borrow::<UnsafelyIgnoreCertificateErrors>()
|
||||
.and_then(|it| it.0.clone());
|
||||
|
||||
let ca_certs = args
|
||||
.ca_certs
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|s| s.into_bytes())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut tls_config = create_client_config(
|
||||
root_cert_store,
|
||||
ca_certs,
|
||||
unsafely_ignore_certificate_errors,
|
||||
key_pair.take(),
|
||||
SocketUse::GeneralSsl,
|
||||
)?;
|
||||
|
||||
if let Some(alpn_protocols) = args.alpn_protocols {
|
||||
tls_config.alpn_protocols =
|
||||
alpn_protocols.into_iter().map(|s| s.into_bytes()).collect();
|
||||
}
|
||||
|
||||
let client_config = QuicClientConfig::try_from(tls_config)?;
|
||||
let mut client_config = quinn::ClientConfig::new(Arc::new(client_config));
|
||||
client_config.transport_config(Arc::new(transport_config.try_into()?));
|
||||
|
||||
let local_addr = match sock_addr.ip() {
|
||||
IpAddr::V4(_) => IpAddr::from(Ipv4Addr::new(0, 0, 0, 0)),
|
||||
IpAddr::V6(_) => IpAddr::from(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)),
|
||||
};
|
||||
|
||||
let conn = quinn::Endpoint::client((local_addr, 0).into())?
|
||||
.connect_with(
|
||||
client_config,
|
||||
sock_addr,
|
||||
&args.server_name.unwrap_or(addr.hostname),
|
||||
)?
|
||||
.await?;
|
||||
|
||||
Ok(ConnectionResource(conn))
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[string]
|
||||
pub(crate) fn op_quic_connection_get_protocol(
|
||||
#[cppgc] connection: &ConnectionResource,
|
||||
) -> Option<String> {
|
||||
connection
|
||||
.0
|
||||
.handshake_data()
|
||||
.and_then(|h| h.downcast::<quinn::crypto::rustls::HandshakeData>().ok())
|
||||
.and_then(|h| h.protocol)
|
||||
.map(|p| String::from_utf8_lossy(&p).into_owned())
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[serde]
|
||||
pub(crate) fn op_quic_connection_get_remote_addr(
|
||||
#[cppgc] connection: &ConnectionResource,
|
||||
) -> Result<Addr, AnyError> {
|
||||
let addr = connection.0.remote_address();
|
||||
Ok(Addr {
|
||||
hostname: format!("{}", addr.ip()),
|
||||
port: addr.port(),
|
||||
})
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
pub(crate) fn op_quic_close_connection(
|
||||
#[cppgc] connection: &ConnectionResource,
|
||||
#[bigint] close_code: u64,
|
||||
#[string] reason: String,
|
||||
) -> Result<(), AnyError> {
|
||||
connection
|
||||
.0
|
||||
.close(quinn::VarInt::from_u64(close_code)?, reason.as_bytes());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[serde]
|
||||
pub(crate) async fn op_quic_connection_closed(
|
||||
#[cppgc] connection: &ConnectionResource,
|
||||
) -> Result<CloseInfo, AnyError> {
|
||||
let e = connection.0.closed().await;
|
||||
match e {
|
||||
quinn::ConnectionError::LocallyClosed => Ok(CloseInfo {
|
||||
close_code: 0,
|
||||
reason: "".into(),
|
||||
}),
|
||||
quinn::ConnectionError::ApplicationClosed(i) => Ok(CloseInfo {
|
||||
close_code: i.error_code.into(),
|
||||
reason: String::from_utf8_lossy(&i.reason).into_owned(),
|
||||
}),
|
||||
e => Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
struct SendStreamResource(AsyncRefCell<quinn::SendStream>);
|
||||
|
||||
impl SendStreamResource {
|
||||
fn new(stream: quinn::SendStream) -> Self {
|
||||
Self(AsyncRefCell::new(stream))
|
||||
}
|
||||
}
|
||||
|
||||
impl Resource for SendStreamResource {
|
||||
fn name(&self) -> Cow<str> {
|
||||
"quicSendStream".into()
|
||||
}
|
||||
|
||||
fn write(self: Rc<Self>, view: BufView) -> AsyncResult<WriteOutcome> {
|
||||
Box::pin(async move {
|
||||
let mut r = RcRef::map(self, |r| &r.0).borrow_mut().await;
|
||||
let nwritten = r.write(&view).await?;
|
||||
Ok(WriteOutcome::Partial { nwritten, view })
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
struct RecvStreamResource(AsyncRefCell<quinn::RecvStream>);
|
||||
|
||||
impl RecvStreamResource {
|
||||
fn new(stream: quinn::RecvStream) -> Self {
|
||||
Self(AsyncRefCell::new(stream))
|
||||
}
|
||||
}
|
||||
|
||||
impl Resource for RecvStreamResource {
|
||||
fn name(&self) -> Cow<str> {
|
||||
"quicReceiveStream".into()
|
||||
}
|
||||
|
||||
fn read(self: Rc<Self>, limit: usize) -> AsyncResult<BufView> {
|
||||
Box::pin(async move {
|
||||
let mut r = RcRef::map(self, |r| &r.0).borrow_mut().await;
|
||||
let mut data = vec![0; limit];
|
||||
let nread = r.read(&mut data).await?.unwrap_or(0);
|
||||
data.truncate(nread);
|
||||
Ok(BufView::from(data))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[serde]
|
||||
pub(crate) async fn op_quic_accept_bi(
|
||||
#[cppgc] connection: &ConnectionResource,
|
||||
state: Rc<RefCell<OpState>>,
|
||||
) -> Result<(ResourceId, ResourceId), AnyError> {
|
||||
match connection.0.accept_bi().await {
|
||||
Ok((tx, rx)) => {
|
||||
let mut state = state.borrow_mut();
|
||||
let tx_rid = state.resource_table.add(SendStreamResource::new(tx));
|
||||
let rx_rid = state.resource_table.add(RecvStreamResource::new(rx));
|
||||
Ok((tx_rid, rx_rid))
|
||||
}
|
||||
Err(e) => match e {
|
||||
quinn::ConnectionError::LocallyClosed
|
||||
| quinn::ConnectionError::ApplicationClosed(..) => {
|
||||
Err(bad_resource("QuicConn is closed"))
|
||||
}
|
||||
_ => Err(e.into()),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[serde]
|
||||
pub(crate) async fn op_quic_open_bi(
|
||||
#[cppgc] connection: &ConnectionResource,
|
||||
state: Rc<RefCell<OpState>>,
|
||||
wait_for_available: bool,
|
||||
) -> Result<(ResourceId, ResourceId), AnyError> {
|
||||
let (tx, rx) = if wait_for_available {
|
||||
connection.0.open_bi().await?
|
||||
} else {
|
||||
let waker = noop_waker_ref();
|
||||
let mut cx = Context::from_waker(waker);
|
||||
match pin!(connection.0.open_bi()).poll(&mut cx) {
|
||||
Poll::Ready(r) => r?,
|
||||
Poll::Pending => {
|
||||
return Err(generic_error("Connection has reached the maximum number of outgoing concurrent bidirectional streams"));
|
||||
}
|
||||
}
|
||||
};
|
||||
let mut state = state.borrow_mut();
|
||||
let tx_rid = state.resource_table.add(SendStreamResource::new(tx));
|
||||
let rx_rid = state.resource_table.add(RecvStreamResource::new(rx));
|
||||
Ok((tx_rid, rx_rid))
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[serde]
|
||||
pub(crate) async fn op_quic_accept_uni(
|
||||
#[cppgc] connection: &ConnectionResource,
|
||||
state: Rc<RefCell<OpState>>,
|
||||
) -> Result<ResourceId, AnyError> {
|
||||
match connection.0.accept_uni().await {
|
||||
Ok(rx) => {
|
||||
let rid = state
|
||||
.borrow_mut()
|
||||
.resource_table
|
||||
.add(RecvStreamResource::new(rx));
|
||||
Ok(rid)
|
||||
}
|
||||
Err(e) => match e {
|
||||
quinn::ConnectionError::LocallyClosed
|
||||
| quinn::ConnectionError::ApplicationClosed(..) => {
|
||||
Err(bad_resource("QuicConn is closed"))
|
||||
}
|
||||
_ => Err(e.into()),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[serde]
|
||||
pub(crate) async fn op_quic_open_uni(
|
||||
#[cppgc] connection: &ConnectionResource,
|
||||
state: Rc<RefCell<OpState>>,
|
||||
wait_for_available: bool,
|
||||
) -> Result<ResourceId, AnyError> {
|
||||
let tx = if wait_for_available {
|
||||
connection.0.open_uni().await?
|
||||
} else {
|
||||
let waker = noop_waker_ref();
|
||||
let mut cx = Context::from_waker(waker);
|
||||
match pin!(connection.0.open_uni()).poll(&mut cx) {
|
||||
Poll::Ready(r) => r?,
|
||||
Poll::Pending => {
|
||||
return Err(generic_error("Connection has reached the maximum number of outgoing concurrent unidirectional streams"));
|
||||
}
|
||||
}
|
||||
};
|
||||
let rid = state
|
||||
.borrow_mut()
|
||||
.resource_table
|
||||
.add(SendStreamResource::new(tx));
|
||||
Ok(rid)
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
pub(crate) async fn op_quic_send_datagram(
|
||||
#[cppgc] connection: &ConnectionResource,
|
||||
#[buffer] buf: JsBuffer,
|
||||
) -> Result<(), AnyError> {
|
||||
connection.0.send_datagram_wait(buf.to_vec().into()).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
pub(crate) async fn op_quic_read_datagram(
|
||||
#[cppgc] connection: &ConnectionResource,
|
||||
#[buffer] mut buf: JsBuffer,
|
||||
) -> Result<u32, AnyError> {
|
||||
let data = connection.0.read_datagram().await?;
|
||||
buf[0..data.len()].copy_from_slice(&data);
|
||||
Ok(data.len() as _)
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
pub(crate) fn op_quic_max_datagram_size(
|
||||
#[cppgc] connection: &ConnectionResource,
|
||||
) -> Result<u32, AnyError> {
|
||||
Ok(connection.0.max_datagram_size().unwrap_or(0) as _)
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
pub(crate) fn op_quic_get_send_stream_priority(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
#[smi] rid: ResourceId,
|
||||
) -> Result<i32, AnyError> {
|
||||
let resource = state
|
||||
.borrow()
|
||||
.resource_table
|
||||
.get::<SendStreamResource>(rid)?;
|
||||
let r = RcRef::map(resource, |r| &r.0).try_borrow();
|
||||
match r {
|
||||
Some(s) => Ok(s.priority()?),
|
||||
None => Err(generic_error("Unable to get priority")),
|
||||
}
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
pub(crate) fn op_quic_set_send_stream_priority(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
#[smi] rid: ResourceId,
|
||||
priority: i32,
|
||||
) -> Result<(), AnyError> {
|
||||
let resource = state
|
||||
.borrow()
|
||||
.resource_table
|
||||
.get::<SendStreamResource>(rid)?;
|
||||
let r = RcRef::map(resource, |r| &r.0).try_borrow();
|
||||
match r {
|
||||
Some(s) => {
|
||||
s.set_priority(priority)?;
|
||||
Ok(())
|
||||
}
|
||||
None => Err(generic_error("Unable to set priority")),
|
||||
}
|
||||
}
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_node"
|
||||
version = "0.120.0"
|
||||
version = "0.122.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -73,12 +73,17 @@ pub fn cpu_info() -> Option<Vec<CpuInfo>> {
|
|||
cpu_speed = 2_400_000_000;
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
fn mach_host_self() -> std::ffi::c_uint;
|
||||
static mut mach_task_self_: std::ffi::c_uint;
|
||||
}
|
||||
|
||||
let mut num_cpus: libc::natural_t = 0;
|
||||
let mut info: *mut libc::processor_cpu_load_info_data_t =
|
||||
std::ptr::null_mut();
|
||||
let mut msg_type: libc::mach_msg_type_number_t = 0;
|
||||
if libc::host_processor_info(
|
||||
libc::mach_host_self(),
|
||||
mach_host_self(),
|
||||
libc::PROCESSOR_CPU_LOAD_INFO,
|
||||
&mut num_cpus,
|
||||
&mut info as *mut _ as *mut libc::processor_info_array_t,
|
||||
|
@ -111,7 +116,7 @@ pub fn cpu_info() -> Option<Vec<CpuInfo>> {
|
|||
}
|
||||
|
||||
libc::vm_deallocate(
|
||||
libc::mach_task_self(),
|
||||
mach_task_self_,
|
||||
info.as_ptr() as libc::vm_address_t,
|
||||
msg_type as _,
|
||||
);
|
||||
|
|
|
@ -67,9 +67,9 @@ generate_builtin_node_module_lists! {
|
|||
"process",
|
||||
"punycode",
|
||||
"querystring",
|
||||
"repl",
|
||||
"readline",
|
||||
"readline/promises",
|
||||
"repl",
|
||||
"stream",
|
||||
"stream/consumers",
|
||||
"stream/promises",
|
||||
|
@ -90,3 +90,10 @@ generate_builtin_node_module_lists! {
|
|||
"worker_threads",
|
||||
"zlib",
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_builtins_are_sorted() {
|
||||
let mut builtins_list = SUPPORTED_BUILTIN_NODE_MODULES.to_vec();
|
||||
builtins_list.sort();
|
||||
assert_eq!(SUPPORTED_BUILTIN_NODE_MODULES, builtins_list);
|
||||
}
|
||||
|
|
|
@ -30,22 +30,28 @@ export function access(
|
|||
mode = getValidMode(mode, "access");
|
||||
const cb = makeCallback(callback);
|
||||
|
||||
Deno.lstat(path).then((info) => {
|
||||
Deno.lstat(path).then(
|
||||
(info) => {
|
||||
if (info.mode === null) {
|
||||
// If the file mode is unavailable, we pretend it has
|
||||
// the permission
|
||||
cb(null);
|
||||
return;
|
||||
}
|
||||
const m = +mode || 0;
|
||||
let m = +mode || 0;
|
||||
let fileMode = +info.mode || 0;
|
||||
if (Deno.build.os !== "windows" && info.uid === Deno.uid()) {
|
||||
|
||||
if (Deno.build.os === "windows") {
|
||||
m &= ~fs.X_OK; // Ignore the X_OK bit on Windows
|
||||
} else if (info.uid === Deno.uid()) {
|
||||
// If the user is the owner of the file, then use the owner bits of
|
||||
// the file permission
|
||||
fileMode >>= 6;
|
||||
}
|
||||
|
||||
// TODO(kt3k): Also check the case when the user belong to the group
|
||||
// of the file
|
||||
|
||||
if ((m & fileMode) === m) {
|
||||
// all required flags exist
|
||||
cb(null);
|
||||
|
@ -59,7 +65,8 @@ export function access(
|
|||
e.code = "EACCES";
|
||||
cb(e);
|
||||
}
|
||||
}, (err) => {
|
||||
},
|
||||
(err) => {
|
||||
if (err instanceof Deno.errors.NotFound) {
|
||||
// deno-lint-ignore no-explicit-any
|
||||
const e: any = new Error(
|
||||
|
@ -73,7 +80,8 @@ export function access(
|
|||
} else {
|
||||
cb(err);
|
||||
}
|
||||
});
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
export const accessPromise = promisify(access) as (
|
||||
|
@ -91,9 +99,11 @@ export function accessSync(path: string | Buffer | URL, mode?: number) {
|
|||
// the permission
|
||||
return;
|
||||
}
|
||||
const m = +mode! || 0;
|
||||
let m = +mode! || 0;
|
||||
let fileMode = +info.mode! || 0;
|
||||
if (Deno.build.os !== "windows" && info.uid === Deno.uid()) {
|
||||
if (Deno.build.os === "windows") {
|
||||
m &= ~fs.X_OK; // Ignore the X_OK bit on Windows
|
||||
} else if (info.uid === Deno.uid()) {
|
||||
// If the user is the owner of the file, then use the owner bits of
|
||||
// the file permission
|
||||
fileMode >>= 6;
|
||||
|
|
|
@ -16,16 +16,24 @@ export function ftruncate(
|
|||
: undefined;
|
||||
const callback: CallbackWithError = typeof lenOrCallback === "function"
|
||||
? lenOrCallback
|
||||
: maybeCallback as CallbackWithError;
|
||||
: (maybeCallback as CallbackWithError);
|
||||
|
||||
if (!callback) throw new Error("No callback function supplied");
|
||||
|
||||
new FsFile(fd, Symbol.for("Deno.internal.FsFile")).truncate(len).then(
|
||||
() => callback(null),
|
||||
callback,
|
||||
);
|
||||
new FsFile(fd, Symbol.for("Deno.internal.FsFile"))
|
||||
.truncate(len)
|
||||
.then(() => callback(null), callback);
|
||||
}
|
||||
|
||||
export function ftruncateSync(fd: number, len?: number) {
|
||||
new FsFile(fd, Symbol.for("Deno.internal.FsFile")).truncateSync(len);
|
||||
}
|
||||
|
||||
export function ftruncatePromise(fd: number, len?: number): Promise<void> {
|
||||
return new Promise((resolve, reject) => {
|
||||
ftruncate(fd, len, (err) => {
|
||||
if (err) reject(err);
|
||||
else resolve();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import {
|
|||
ReadOptions,
|
||||
TextOptionsArgument,
|
||||
} from "ext:deno_node/_fs/_fs_common.ts";
|
||||
import { ftruncatePromise } from "ext:deno_node/_fs/_fs_ftruncate.ts";
|
||||
import { core } from "ext:core/mod.js";
|
||||
|
||||
interface WriteResult {
|
||||
|
@ -73,6 +74,10 @@ export class FileHandle extends EventEmitter {
|
|||
}
|
||||
}
|
||||
|
||||
truncate(len?: number): Promise<void> {
|
||||
return fsCall(ftruncatePromise, this, len);
|
||||
}
|
||||
|
||||
readFile(
|
||||
opt?: TextOptionsArgument | BinaryOptionsArgument | FileOptionsArgument,
|
||||
): Promise<string | Buffer> {
|
||||
|
@ -85,11 +90,7 @@ export class FileHandle extends EventEmitter {
|
|||
length: number,
|
||||
position: number,
|
||||
): Promise<WriteResult>;
|
||||
write(
|
||||
str: string,
|
||||
position: number,
|
||||
encoding: string,
|
||||
): Promise<WriteResult>;
|
||||
write(str: string, position: number, encoding: string): Promise<WriteResult>;
|
||||
write(
|
||||
bufferOrStr: Uint8Array | string,
|
||||
offsetOrPosition: number,
|
||||
|
@ -120,16 +121,10 @@ export class FileHandle extends EventEmitter {
|
|||
const encoding = lengthOrEncoding;
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
write(
|
||||
this.fd,
|
||||
str,
|
||||
position,
|
||||
encoding,
|
||||
(err, bytesWritten, buffer) => {
|
||||
write(this.fd, str, position, encoding, (err, bytesWritten, buffer) => {
|
||||
if (err) reject(err);
|
||||
else resolve({ buffer, bytesWritten });
|
||||
},
|
||||
);
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ import {
|
|||
nodeWorkerThreadCloseCb,
|
||||
refMessagePort,
|
||||
serializeJsMessageData,
|
||||
unrefPollForMessages,
|
||||
unrefParentPort,
|
||||
} from "ext:deno_web/13_message_port.js";
|
||||
import * as webidl from "ext:deno_webidl/00_webidl.js";
|
||||
import { notImplemented } from "ext:deno_node/_utils.ts";
|
||||
|
@ -451,10 +451,10 @@ internals.__initWorkerThreads = (
|
|||
parentPort.emit("close");
|
||||
});
|
||||
parentPort.unref = () => {
|
||||
parentPort[unrefPollForMessages] = true;
|
||||
parentPort[unrefParentPort] = true;
|
||||
};
|
||||
parentPort.ref = () => {
|
||||
parentPort[unrefPollForMessages] = false;
|
||||
parentPort[unrefParentPort] = false;
|
||||
};
|
||||
|
||||
if (isWorkerThread) {
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_telemetry"
|
||||
version = "0.5.0"
|
||||
version = "0.6.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_tls"
|
||||
version = "0.170.0"
|
||||
version = "0.171.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_url"
|
||||
version = "0.183.0"
|
||||
version = "0.184.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -908,8 +908,8 @@ const _original = Symbol("[[original]]");
|
|||
* @param {boolean=} autoClose If the resource should be auto-closed when the stream closes. Defaults to true.
|
||||
* @returns {ReadableStream<Uint8Array>}
|
||||
*/
|
||||
function readableStreamForRid(rid, autoClose = true) {
|
||||
const stream = new ReadableStream(_brand);
|
||||
function readableStreamForRid(rid, autoClose = true, Super) {
|
||||
const stream = new (Super ?? ReadableStream)(_brand);
|
||||
stream[_resourceBacking] = { rid, autoClose };
|
||||
|
||||
const tryClose = () => {
|
||||
|
@ -1130,8 +1130,8 @@ async function readableStreamCollectIntoUint8Array(stream) {
|
|||
* @param {boolean=} autoClose If the resource should be auto-closed when the stream closes. Defaults to true.
|
||||
* @returns {ReadableStream<Uint8Array>}
|
||||
*/
|
||||
function writableStreamForRid(rid, autoClose = true) {
|
||||
const stream = new WritableStream(_brand);
|
||||
function writableStreamForRid(rid, autoClose = true, Super) {
|
||||
const stream = new (Super ?? WritableStream)(_brand);
|
||||
stream[_resourceBacking] = { rid, autoClose };
|
||||
|
||||
const tryClose = () => {
|
||||
|
|
|
@ -102,8 +102,8 @@ const nodeWorkerThreadCloseCb = Symbol("nodeWorkerThreadCloseCb");
|
|||
const nodeWorkerThreadCloseCbInvoked = Symbol("nodeWorkerThreadCloseCbInvoked");
|
||||
export const refMessagePort = Symbol("refMessagePort");
|
||||
/** It is used by 99_main.js and worker_threads to
|
||||
* unref/ref on the global pollForMessages promise. */
|
||||
export const unrefPollForMessages = Symbol("unrefPollForMessages");
|
||||
* unref/ref on the global message event handler count. */
|
||||
export const unrefParentPort = Symbol("unrefParentPort");
|
||||
|
||||
/**
|
||||
* @param {number} id
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_web"
|
||||
version = "0.214.0"
|
||||
version = "0.215.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_webgpu"
|
||||
version = "0.150.0"
|
||||
version = "0.151.0"
|
||||
authors = ["the Deno authors"]
|
||||
edition.workspace = true
|
||||
license = "MIT"
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_webidl"
|
||||
version = "0.183.0"
|
||||
version = "0.184.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_websocket"
|
||||
version = "0.188.0"
|
||||
version = "0.189.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_webstorage"
|
||||
version = "0.178.0"
|
||||
version = "0.179.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -9,6 +9,7 @@ use deno_package_json::PackageJsonDepValue;
|
|||
use deno_package_json::PackageJsonRc;
|
||||
use deno_path_util::url_to_file_path;
|
||||
use deno_semver::package::PackageReq;
|
||||
use deno_semver::StackString;
|
||||
use deno_semver::Version;
|
||||
use node_resolver::env::NodeResolverEnv;
|
||||
use node_resolver::errors::PackageFolderResolveError;
|
||||
|
@ -30,7 +31,7 @@ use super::ResolvePkgFolderFromDenoReqError;
|
|||
#[derive(Debug, Error)]
|
||||
pub enum ByonmResolvePkgFolderFromDenoReqError {
|
||||
#[error("Could not find \"{}\" in a node_modules folder. Deno expects the node_modules/ directory to be up to date. Did you forget to run `deno install`?", .0)]
|
||||
MissingAlias(String),
|
||||
MissingAlias(StackString),
|
||||
#[error(transparent)]
|
||||
PackageJson(#[from] PackageJsonLoadError),
|
||||
#[error("Could not find a matching package for 'npm:{}' in the node_modules directory. Ensure you have all your JSR and npm dependencies listed in your deno.json or package.json, then run `deno install`. Alternatively, turn on auto-install by specifying `\"nodeModulesDir\": \"auto\"` in your deno.json file.", .0)]
|
||||
|
@ -177,16 +178,14 @@ impl<Fs: DenoResolverFs, TEnv: NodeResolverEnv> ByonmNpmResolver<Fs, TEnv> {
|
|||
&self,
|
||||
req: &PackageReq,
|
||||
referrer: &Url,
|
||||
) -> Result<Option<(PackageJsonRc, String)>, PackageJsonLoadError> {
|
||||
) -> Result<Option<(PackageJsonRc, StackString)>, PackageJsonLoadError> {
|
||||
fn resolve_alias_from_pkg_json(
|
||||
req: &PackageReq,
|
||||
pkg_json: &PackageJson,
|
||||
) -> Option<String> {
|
||||
) -> Option<StackString> {
|
||||
let deps = pkg_json.resolve_local_package_json_deps();
|
||||
for (key, value) in deps
|
||||
.dependencies
|
||||
.into_iter()
|
||||
.chain(deps.dev_dependencies.into_iter())
|
||||
for (key, value) in
|
||||
deps.dependencies.iter().chain(deps.dev_dependencies.iter())
|
||||
{
|
||||
if let Ok(value) = value {
|
||||
match value {
|
||||
|
@ -194,12 +193,14 @@ impl<Fs: DenoResolverFs, TEnv: NodeResolverEnv> ByonmNpmResolver<Fs, TEnv> {
|
|||
if dep_req.name == req.name
|
||||
&& dep_req.version_req.intersects(&req.version_req)
|
||||
{
|
||||
return Some(key);
|
||||
return Some(key.clone());
|
||||
}
|
||||
}
|
||||
PackageJsonDepValue::Workspace(_workspace) => {
|
||||
if key == req.name && req.version_req.tag() == Some("workspace") {
|
||||
return Some(key);
|
||||
if key.as_str() == req.name
|
||||
&& req.version_req.tag() == Some("workspace")
|
||||
{
|
||||
return Some(key.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -246,7 +247,7 @@ impl<Fs: DenoResolverFs, TEnv: NodeResolverEnv> ByonmNpmResolver<Fs, TEnv> {
|
|||
if let Ok(Some(dep_pkg_json)) =
|
||||
self.load_pkg_json(&pkg_folder.join("package.json"))
|
||||
{
|
||||
if dep_pkg_json.name.as_ref() == Some(&req.name) {
|
||||
if dep_pkg_json.name.as_deref() == Some(req.name.as_str()) {
|
||||
let matches_req = dep_pkg_json
|
||||
.version
|
||||
.as_ref()
|
||||
|
|
|
@ -162,7 +162,7 @@ impl<TCjsCodeAnalyzer: CjsCodeAnalyzer, TNodeResolverEnv: NodeResolverEnv>
|
|||
add_export(
|
||||
&mut source,
|
||||
export,
|
||||
&format!("mod[\"{}\"]", escape_for_double_quote_string(export)),
|
||||
&format!("mod[{}]", to_double_quote_string(export)),
|
||||
&mut temp_var_count,
|
||||
);
|
||||
}
|
||||
|
@ -561,8 +561,8 @@ fn add_export(
|
|||
"const __deno_export_{temp_var_count}__ = {initializer};"
|
||||
));
|
||||
source.push(format!(
|
||||
"export {{ __deno_export_{temp_var_count}__ as \"{}\" }};",
|
||||
escape_for_double_quote_string(name)
|
||||
"export {{ __deno_export_{temp_var_count}__ as {} }};",
|
||||
to_double_quote_string(name)
|
||||
));
|
||||
} else {
|
||||
source.push(format!("export const {name} = {initializer};"));
|
||||
|
@ -620,14 +620,9 @@ fn not_found(path: &str, referrer: &Path) -> AnyError {
|
|||
std::io::Error::new(std::io::ErrorKind::NotFound, msg).into()
|
||||
}
|
||||
|
||||
fn escape_for_double_quote_string(text: &str) -> Cow<str> {
|
||||
// this should be rare, so doing a scan first before allocating is ok
|
||||
if text.chars().any(|c| matches!(c, '"' | '\\')) {
|
||||
// don't bother making this more complex for perf because it's rare
|
||||
Cow::Owned(text.replace('\\', "\\\\").replace('"', "\\\""))
|
||||
} else {
|
||||
Cow::Borrowed(text)
|
||||
}
|
||||
fn to_double_quote_string(text: &str) -> String {
|
||||
// serde can handle this for us
|
||||
serde_json::to_string(text).unwrap()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -665,4 +660,13 @@ mod tests {
|
|||
Some(("@some-package/core".to_string(), "./actions".to_string()))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_to_double_quote_string() {
|
||||
assert_eq!(to_double_quote_string("test"), "\"test\"");
|
||||
assert_eq!(
|
||||
to_double_quote_string("\r\n\t\"test"),
|
||||
"\"\\r\\n\\t\\\"test\""
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -320,7 +320,6 @@ impl NodeJsErrorCoded for PackageJsonLoadError {
|
|||
impl NodeJsErrorCoded for ClosestPkgJsonError {
|
||||
fn code(&self) -> NodeJsErrorCode {
|
||||
match self.as_kind() {
|
||||
ClosestPkgJsonErrorKind::CanonicalizingDir(e) => e.code(),
|
||||
ClosestPkgJsonErrorKind::Load(e) => e.code(),
|
||||
}
|
||||
}
|
||||
|
@ -331,26 +330,10 @@ pub struct ClosestPkgJsonError(pub Box<ClosestPkgJsonErrorKind>);
|
|||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum ClosestPkgJsonErrorKind {
|
||||
#[error(transparent)]
|
||||
CanonicalizingDir(#[from] CanonicalizingPkgJsonDirError),
|
||||
#[error(transparent)]
|
||||
Load(#[from] PackageJsonLoadError),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[error("[{}] Failed canonicalizing package.json directory '{}'.", self.code(), dir_path.display())]
|
||||
pub struct CanonicalizingPkgJsonDirError {
|
||||
pub dir_path: PathBuf,
|
||||
#[source]
|
||||
pub source: std::io::Error,
|
||||
}
|
||||
|
||||
impl NodeJsErrorCoded for CanonicalizingPkgJsonDirError {
|
||||
fn code(&self) -> NodeJsErrorCode {
|
||||
NodeJsErrorCode::ERR_MODULE_NOT_FOUND
|
||||
}
|
||||
}
|
||||
|
||||
// todo(https://github.com/denoland/deno_core/issues/810): make this a TypeError
|
||||
#[derive(Debug, Error)]
|
||||
#[error(
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
|
||||
use deno_package_json::PackageJson;
|
||||
use deno_package_json::PackageJsonRc;
|
||||
use deno_path_util::strip_unc_prefix;
|
||||
use std::cell::RefCell;
|
||||
use std::collections::HashMap;
|
||||
use std::io::ErrorKind;
|
||||
|
@ -11,7 +10,6 @@ use std::path::PathBuf;
|
|||
use url::Url;
|
||||
|
||||
use crate::env::NodeResolverEnv;
|
||||
use crate::errors::CanonicalizingPkgJsonDirError;
|
||||
use crate::errors::ClosestPkgJsonError;
|
||||
use crate::errors::PackageJsonLoadError;
|
||||
|
||||
|
@ -67,37 +65,8 @@ impl<TEnv: NodeResolverEnv> PackageJsonResolver<TEnv> {
|
|||
&self,
|
||||
file_path: &Path,
|
||||
) -> Result<Option<PackageJsonRc>, ClosestPkgJsonError> {
|
||||
// we use this for deno compile using byonm because the script paths
|
||||
// won't be in virtual file system, but the package.json paths will be
|
||||
fn canonicalize_first_ancestor_exists<TEnv: NodeResolverEnv>(
|
||||
dir_path: &Path,
|
||||
env: &TEnv,
|
||||
) -> Result<Option<PathBuf>, std::io::Error> {
|
||||
for ancestor in dir_path.ancestors() {
|
||||
match env.realpath_sync(ancestor) {
|
||||
Ok(dir_path) => return Ok(Some(dir_path)),
|
||||
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
|
||||
// keep searching
|
||||
}
|
||||
Err(err) => return Err(err),
|
||||
}
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
let parent_dir = file_path.parent().unwrap();
|
||||
let Some(start_dir) = canonicalize_first_ancestor_exists(
|
||||
parent_dir, &self.env,
|
||||
)
|
||||
.map_err(|source| CanonicalizingPkgJsonDirError {
|
||||
dir_path: parent_dir.to_path_buf(),
|
||||
source,
|
||||
})?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
let start_dir = strip_unc_prefix(start_dir);
|
||||
for current_dir in start_dir.ancestors() {
|
||||
for current_dir in parent_dir.ancestors() {
|
||||
let package_json_path = current_dir.join("package.json");
|
||||
if let Some(pkg_json) = self.load_package_json(&package_json_path)? {
|
||||
return Ok(Some(pkg_json));
|
||||
|
|
|
@ -318,6 +318,8 @@ impl<TEnv: NodeResolverEnv> NodeResolver<TEnv> {
|
|||
resolution_mode: ResolutionMode,
|
||||
resolution_kind: NodeResolutionKind,
|
||||
) -> Result<Url, PackageSubpathResolveError> {
|
||||
// todo(dsherret): don't allocate a string here (maybe use an
|
||||
// enum that says the subpath is not prefixed with a ./)
|
||||
let package_subpath = package_subpath
|
||||
.map(|s| format!("./{s}"))
|
||||
.unwrap_or_else(|| ".".to_string());
|
||||
|
|
|
@ -23,6 +23,7 @@ async-trait.workspace = true
|
|||
base64.workspace = true
|
||||
boxed_error.workspace = true
|
||||
deno_cache_dir.workspace = true
|
||||
deno_error.workspace = true
|
||||
deno_npm.workspace = true
|
||||
deno_semver.workspace = true
|
||||
deno_unsync = { workspace = true, features = ["tokio"] }
|
||||
|
|
|
@ -15,6 +15,7 @@ use deno_npm::npm_rc::ResolvedNpmRc;
|
|||
use deno_npm::registry::NpmPackageInfo;
|
||||
use deno_npm::NpmPackageCacheFolderId;
|
||||
use deno_semver::package::PackageNv;
|
||||
use deno_semver::StackString;
|
||||
use deno_semver::Version;
|
||||
use http::HeaderName;
|
||||
use http::HeaderValue;
|
||||
|
@ -260,7 +261,7 @@ impl<TEnv: NpmCacheEnv> NpmCache<TEnv> {
|
|||
.and_then(|cache_id| {
|
||||
Some(NpmPackageCacheFolderId {
|
||||
nv: PackageNv {
|
||||
name: cache_id.name,
|
||||
name: StackString::from_string(cache_id.name),
|
||||
version: Version::parse_from_npm(&cache_id.version).ok()?,
|
||||
},
|
||||
copy_index: cache_id.copy_index,
|
||||
|
|
|
@ -18,6 +18,7 @@ use deno_unsync::sync::MultiRuntimeAsyncValueCreator;
|
|||
use futures::future::LocalBoxFuture;
|
||||
use futures::FutureExt;
|
||||
use parking_lot::Mutex;
|
||||
use thiserror::Error;
|
||||
use url::Url;
|
||||
|
||||
use crate::remote::maybe_auth_header_for_npm_registry;
|
||||
|
@ -28,6 +29,31 @@ use crate::NpmCacheSetting;
|
|||
type LoadResult = Result<FutureResult, Arc<AnyError>>;
|
||||
type LoadFuture = LocalBoxFuture<'static, LoadResult>;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[error(transparent)]
|
||||
pub struct AnyhowJsError(pub AnyError);
|
||||
|
||||
impl deno_error::JsErrorClass for AnyhowJsError {
|
||||
fn get_class(&self) -> &'static str {
|
||||
"generic"
|
||||
}
|
||||
|
||||
fn get_message(&self) -> std::borrow::Cow<'static, str> {
|
||||
self.0.to_string().into()
|
||||
}
|
||||
|
||||
fn get_additional_properties(
|
||||
&self,
|
||||
) -> Option<
|
||||
Vec<(
|
||||
std::borrow::Cow<'static, str>,
|
||||
std::borrow::Cow<'static, str>,
|
||||
)>,
|
||||
> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
enum FutureResult {
|
||||
PackageNotExists,
|
||||
|
@ -157,9 +183,9 @@ impl<TEnv: NpmCacheEnv> RegistryInfoProvider<TEnv> {
|
|||
Ok(None) => Err(NpmRegistryPackageInfoLoadError::PackageNotExists {
|
||||
package_name: name.to_string(),
|
||||
}),
|
||||
Err(err) => {
|
||||
Err(NpmRegistryPackageInfoLoadError::LoadError(Arc::new(err)))
|
||||
}
|
||||
Err(err) => Err(NpmRegistryPackageInfoLoadError::LoadError(Arc::new(
|
||||
AnyhowJsError(err),
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue