mirror of
https://github.com/denoland/deno.git
synced 2025-01-21 04:52:26 -05:00
Merge branch 'main' into DENO_ROOT_INSTALL_Fix
This commit is contained in:
commit
e0ee36c297
530 changed files with 10849 additions and 3840 deletions
|
@ -31,6 +31,8 @@
|
|||
"cli/tsc/dts/lib.scripthost.d.ts",
|
||||
"cli/tsc/dts/lib.webworker*.d.ts",
|
||||
"cli/tsc/dts/typescript.d.ts",
|
||||
"cli/tools/doc/prism.css",
|
||||
"cli/tools/doc/prism.js",
|
||||
"ext/websocket/autobahn/reports",
|
||||
"gh-pages",
|
||||
"target",
|
||||
|
|
2
.github/workflows/ci.generate.ts
vendored
2
.github/workflows/ci.generate.ts
vendored
|
@ -5,7 +5,7 @@ import { stringify } from "jsr:@std/yaml@^0.221/stringify";
|
|||
// Bump this number when you want to purge the cache.
|
||||
// Note: the tools/release/01_bump_crate_versions.ts script will update this version
|
||||
// automatically via regex, so ensure that this line maintains this format.
|
||||
const cacheVersion = 25;
|
||||
const cacheVersion = 27;
|
||||
|
||||
const ubuntuX86Runner = "ubuntu-24.04";
|
||||
const ubuntuX86XlRunner = "ubuntu-24.04-xl";
|
||||
|
|
8
.github/workflows/ci.yml
vendored
8
.github/workflows/ci.yml
vendored
|
@ -361,8 +361,8 @@ jobs:
|
|||
path: |-
|
||||
~/.cargo/registry/index
|
||||
~/.cargo/registry/cache
|
||||
key: '25-cargo-home-${{ matrix.os }}-${{ matrix.arch }}-${{ hashFiles(''Cargo.lock'') }}'
|
||||
restore-keys: '25-cargo-home-${{ matrix.os }}-${{ matrix.arch }}'
|
||||
key: '27-cargo-home-${{ matrix.os }}-${{ matrix.arch }}-${{ hashFiles(''Cargo.lock'') }}'
|
||||
restore-keys: '27-cargo-home-${{ matrix.os }}-${{ matrix.arch }}'
|
||||
if: '!(matrix.skip)'
|
||||
- name: Restore cache build output (PR)
|
||||
uses: actions/cache/restore@v4
|
||||
|
@ -375,7 +375,7 @@ jobs:
|
|||
!./target/*/*.zip
|
||||
!./target/*/*.tar.gz
|
||||
key: never_saved
|
||||
restore-keys: '25-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-'
|
||||
restore-keys: '27-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-'
|
||||
- name: Apply and update mtime cache
|
||||
if: '!(matrix.skip) && (!startsWith(github.ref, ''refs/tags/''))'
|
||||
uses: ./.github/mtime_cache
|
||||
|
@ -685,7 +685,7 @@ jobs:
|
|||
!./target/*/*.zip
|
||||
!./target/*/*.sha256sum
|
||||
!./target/*/*.tar.gz
|
||||
key: '25-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-${{ github.sha }}'
|
||||
key: '27-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-${{ github.sha }}'
|
||||
publish-canary:
|
||||
name: publish canary
|
||||
runs-on: ubuntu-24.04
|
||||
|
|
701
Cargo.lock
generated
701
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
64
Cargo.toml
64
Cargo.toml
|
@ -46,19 +46,19 @@ repository = "https://github.com/denoland/deno"
|
|||
|
||||
[workspace.dependencies]
|
||||
deno_ast = { version = "=0.43.3", features = ["transpiling"] }
|
||||
deno_core = { version = "0.319.0" }
|
||||
deno_core = { version = "0.322.0" }
|
||||
|
||||
deno_bench_util = { version = "0.171.0", path = "./bench_util" }
|
||||
deno_config = { version = "=0.38.2", features = ["workspace", "sync"] }
|
||||
deno_bench_util = { version = "0.173.0", path = "./bench_util" }
|
||||
deno_config = { version = "=0.39.2", features = ["workspace", "sync"] }
|
||||
deno_lockfile = "=0.23.1"
|
||||
deno_media_type = { version = "0.2.0", features = ["module_specifier"] }
|
||||
deno_npm = "=0.25.4"
|
||||
deno_path_util = "=0.2.1"
|
||||
deno_permissions = { version = "0.37.0", path = "./runtime/permissions" }
|
||||
deno_runtime = { version = "0.186.0", path = "./runtime" }
|
||||
deno_permissions = { version = "0.39.0", path = "./runtime/permissions" }
|
||||
deno_runtime = { version = "0.188.0", path = "./runtime" }
|
||||
deno_semver = "=0.5.16"
|
||||
deno_terminal = "0.2.0"
|
||||
napi_sym = { version = "0.107.0", path = "./ext/napi/sym" }
|
||||
napi_sym = { version = "0.109.0", path = "./ext/napi/sym" }
|
||||
test_util = { package = "test_server", path = "./tests/util/server" }
|
||||
|
||||
denokv_proto = "0.8.4"
|
||||
|
@ -67,32 +67,32 @@ denokv_remote = "0.8.4"
|
|||
denokv_sqlite = { default-features = false, version = "0.8.4" }
|
||||
|
||||
# exts
|
||||
deno_broadcast_channel = { version = "0.171.0", path = "./ext/broadcast_channel" }
|
||||
deno_cache = { version = "0.109.0", path = "./ext/cache" }
|
||||
deno_canvas = { version = "0.46.0", path = "./ext/canvas" }
|
||||
deno_console = { version = "0.177.0", path = "./ext/console" }
|
||||
deno_cron = { version = "0.57.0", path = "./ext/cron" }
|
||||
deno_crypto = { version = "0.191.0", path = "./ext/crypto" }
|
||||
deno_fetch = { version = "0.201.0", path = "./ext/fetch" }
|
||||
deno_ffi = { version = "0.164.0", path = "./ext/ffi" }
|
||||
deno_fs = { version = "0.87.0", path = "./ext/fs" }
|
||||
deno_http = { version = "0.175.0", path = "./ext/http" }
|
||||
deno_io = { version = "0.87.0", path = "./ext/io" }
|
||||
deno_kv = { version = "0.85.0", path = "./ext/kv" }
|
||||
deno_napi = { version = "0.108.0", path = "./ext/napi" }
|
||||
deno_net = { version = "0.169.0", path = "./ext/net" }
|
||||
deno_node = { version = "0.114.0", path = "./ext/node" }
|
||||
deno_tls = { version = "0.164.0", path = "./ext/tls" }
|
||||
deno_url = { version = "0.177.0", path = "./ext/url" }
|
||||
deno_web = { version = "0.208.0", path = "./ext/web" }
|
||||
deno_webgpu = { version = "0.144.0", path = "./ext/webgpu" }
|
||||
deno_webidl = { version = "0.177.0", path = "./ext/webidl" }
|
||||
deno_websocket = { version = "0.182.0", path = "./ext/websocket" }
|
||||
deno_webstorage = { version = "0.172.0", path = "./ext/webstorage" }
|
||||
deno_broadcast_channel = { version = "0.173.0", path = "./ext/broadcast_channel" }
|
||||
deno_cache = { version = "0.111.0", path = "./ext/cache" }
|
||||
deno_canvas = { version = "0.48.0", path = "./ext/canvas" }
|
||||
deno_console = { version = "0.179.0", path = "./ext/console" }
|
||||
deno_cron = { version = "0.59.0", path = "./ext/cron" }
|
||||
deno_crypto = { version = "0.193.0", path = "./ext/crypto" }
|
||||
deno_fetch = { version = "0.203.0", path = "./ext/fetch" }
|
||||
deno_ffi = { version = "0.166.0", path = "./ext/ffi" }
|
||||
deno_fs = { version = "0.89.0", path = "./ext/fs" }
|
||||
deno_http = { version = "0.177.0", path = "./ext/http" }
|
||||
deno_io = { version = "0.89.0", path = "./ext/io" }
|
||||
deno_kv = { version = "0.87.0", path = "./ext/kv" }
|
||||
deno_napi = { version = "0.110.0", path = "./ext/napi" }
|
||||
deno_net = { version = "0.171.0", path = "./ext/net" }
|
||||
deno_node = { version = "0.116.0", path = "./ext/node" }
|
||||
deno_tls = { version = "0.166.0", path = "./ext/tls" }
|
||||
deno_url = { version = "0.179.0", path = "./ext/url" }
|
||||
deno_web = { version = "0.210.0", path = "./ext/web" }
|
||||
deno_webgpu = { version = "0.146.0", path = "./ext/webgpu" }
|
||||
deno_webidl = { version = "0.179.0", path = "./ext/webidl" }
|
||||
deno_websocket = { version = "0.184.0", path = "./ext/websocket" }
|
||||
deno_webstorage = { version = "0.174.0", path = "./ext/webstorage" }
|
||||
|
||||
# resolvers
|
||||
deno_resolver = { version = "0.9.0", path = "./resolvers/deno" }
|
||||
node_resolver = { version = "0.16.0", path = "./resolvers/node" }
|
||||
deno_resolver = { version = "0.11.0", path = "./resolvers/deno" }
|
||||
node_resolver = { version = "0.18.0", path = "./resolvers/node" }
|
||||
|
||||
aes = "=0.8.3"
|
||||
anyhow = "1.0.57"
|
||||
|
@ -100,6 +100,7 @@ async-trait = "0.1.73"
|
|||
base32 = "=0.5.1"
|
||||
base64 = "0.21.7"
|
||||
bencher = "0.1"
|
||||
boxed_error = "0.2.2"
|
||||
brotli = "6.0.0"
|
||||
bytes = "1.4.0"
|
||||
cache_control = "=0.2.0"
|
||||
|
@ -199,8 +200,7 @@ tower-http = { version = "0.6.1", features = ["decompression-br", "decompression
|
|||
tower-lsp = { package = "deno_tower_lsp", version = "0.1.0", features = ["proposed"] }
|
||||
tower-service = "0.3.2"
|
||||
twox-hash = "=1.6.3"
|
||||
# Upgrading past 2.4.1 may cause WPT failures
|
||||
url = { version = "< 2.5.0", features = ["serde", "expose_internals"] }
|
||||
url = { version = "2.5", features = ["serde", "expose_internals"] }
|
||||
uuid = { version = "1.3.0", features = ["v4"] }
|
||||
webpki-root-certs = "0.26.5"
|
||||
webpki-roots = "0.26"
|
||||
|
|
80
Releases.md
80
Releases.md
|
@ -6,6 +6,86 @@ https://github.com/denoland/deno/releases
|
|||
We also have one-line install commands at:
|
||||
https://github.com/denoland/deno_install
|
||||
|
||||
### 2.1.1 / 2024.11.21
|
||||
|
||||
- docs(add): clarification to add command (#26968)
|
||||
- docs(doc): fix typo in doc subcommand help output (#26321)
|
||||
- fix(node): regression where ts files were sometimes resolved instead of js
|
||||
(#26971)
|
||||
- fix(task): ensure root config always looks up dependencies in root (#26959)
|
||||
- fix(watch): don't panic if there's no path provided (#26972)
|
||||
- fix: Buffer global in --unstable-node-globals (#26973)
|
||||
|
||||
### 2.1.0 / 2024.11.21
|
||||
|
||||
- feat(cli): add `--unstable-node-globals` flag (#26617)
|
||||
- feat(cli): support multiple env file argument (#26527)
|
||||
- feat(compile): ability to embed directory in executable (#26939)
|
||||
- feat(compile): ability to embed local data files (#26934)
|
||||
- feat(ext/fetch): Make fetch client parameters configurable (#26909)
|
||||
- feat(ext/fetch): allow embedders to use `hickory_dns_resolver` instead of
|
||||
default `GaiResolver` (#26740)
|
||||
- feat(ext/fs): add ctime to Deno.stats and use it in node compat layer (#24801)
|
||||
- feat(ext/http): Make http server parameters configurable (#26785)
|
||||
- feat(ext/node): perf_hooks.monitorEventLoopDelay() (#26905)
|
||||
- feat(fetch): accept async iterables for body (#26882)
|
||||
- feat(fmt): support SQL (#26750)
|
||||
- feat(info): show location for Web Cache (#26205)
|
||||
- feat(init): add --npm flag to initialize npm projects (#26896)
|
||||
- feat(jupyter): Add `Deno.jupyter.image` API (#26284)
|
||||
- feat(lint): Add checked files list to the JSON output(#26936)
|
||||
- feat(lsp): auto-imports with @deno-types directives (#26821)
|
||||
- feat(node): stabilize detecting if CJS via `"type": "commonjs"` in a
|
||||
package.json (#26439)
|
||||
- feat(permission): support suffix wildcards in `--allow-env` flag (#25255)
|
||||
- feat(publish): add `--set-version <version>` flag (#26141)
|
||||
- feat(runtime): remove public OTEL trace API (#26854)
|
||||
- feat(task): add --eval flag (#26943)
|
||||
- feat(task): dependencies (#26467)
|
||||
- feat(task): support object notation, remove support for JSDocs (#26886)
|
||||
- feat(task): workspace support with --filter and --recursive (#26949)
|
||||
- feat(watch): log which file changed on HMR or watch change (#25801)
|
||||
- feat: OpenTelemetry Tracing API and Exporting (#26710)
|
||||
- feat: Wasm module support (#26668)
|
||||
- feat: fmt and lint respect .gitignore file (#26897)
|
||||
- feat: permission stack traces in ops (#26938)
|
||||
- feat: subcommand to view and update outdated dependencies (#26942)
|
||||
- feat: upgrade V8 to 13.0 (#26851)
|
||||
- fix(cli): preserve comments in doc tests (#26828)
|
||||
- fix(cli): show prefix hint when installing a package globally (#26629)
|
||||
- fix(ext/cache): gracefully error when cache creation failed (#26895)
|
||||
- fix(ext/http): prefer brotli for `accept-encoding: gzip, deflate, br, zstd`
|
||||
(#26814)
|
||||
- fix(ext/node): New async setInterval function to improve the nodejs
|
||||
compatibility (#26703)
|
||||
- fix(ext/node): add autoSelectFamily option to net.createConnection (#26661)
|
||||
- fix(ext/node): handle `--allow-sys=inspector` (#26836)
|
||||
- fix(ext/node): increase tolerance for interval test (#26899)
|
||||
- fix(ext/node): process.getBuiltinModule (#26833)
|
||||
- fix(ext/node): use ERR_NOT_IMPLEMENTED for notImplemented (#26853)
|
||||
- fix(ext/node): zlib.crc32() (#26856)
|
||||
- fix(ext/webgpu): Create GPUQuerySet converter before usage (#26883)
|
||||
- fix(ext/websocket): initialize `error` attribute of WebSocket ErrorEvent
|
||||
(#26796)
|
||||
- fix(ext/webstorage): use error class for sqlite error case (#26806)
|
||||
- fix(fmt): error instead of panic on unstable format (#26859)
|
||||
- fix(fmt): formatting of .svelte files (#26948)
|
||||
- fix(install): percent encodings in interactive progress bar (#26600)
|
||||
- fix(install): re-setup bin entries after running lifecycle scripts (#26752)
|
||||
- fix(lockfile): track dependencies specified in TypeScript compiler options
|
||||
(#26551)
|
||||
- fix(lsp): ignore editor indent settings if deno.json is present (#26912)
|
||||
- fix(lsp): skip code action edits that can't be converted (#26831)
|
||||
- fix(node): handle resolving ".//<something>" in npm packages (#26920)
|
||||
- fix(node/crypto): support promisify on generateKeyPair (#26913)
|
||||
- fix(permissions): say to use --allow-run instead of --allow-all (#26842)
|
||||
- fix(publish): improve error message when missing exports (#26945)
|
||||
- fix: otel resiliency (#26857)
|
||||
- fix: update message for unsupported schemes with npm and jsr (#26884)
|
||||
- perf(compile): code cache (#26528)
|
||||
- perf(windows): delay load webgpu and some other dlls (#26917)
|
||||
- perf: use available system memory for v8 isolate memory limit (#26868)
|
||||
|
||||
### 2.0.6 / 2024.11.10
|
||||
|
||||
- feat(ext/http): abort event when request is cancelled (#26781)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_bench_util"
|
||||
version = "0.171.0"
|
||||
version = "0.173.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno"
|
||||
version = "2.0.6"
|
||||
version = "2.1.1"
|
||||
authors.workspace = true
|
||||
default-run = "deno"
|
||||
edition.workspace = true
|
||||
|
@ -72,8 +72,8 @@ deno_ast = { workspace = true, features = ["bundler", "cjs", "codegen", "proposa
|
|||
deno_cache_dir.workspace = true
|
||||
deno_config.workspace = true
|
||||
deno_core = { workspace = true, features = ["include_js_files_for_snapshotting"] }
|
||||
deno_doc = { version = "0.156.0", default-features = false, features = ["rust", "html", "syntect"] }
|
||||
deno_graph = { version = "=0.84.1" }
|
||||
deno_doc = { version = "0.160.0", features = ["rust", "comrak"] }
|
||||
deno_graph = { version = "=0.85.0" }
|
||||
deno_lint = { version = "=0.68.0", features = ["docs"] }
|
||||
deno_lockfile.workspace = true
|
||||
deno_npm.workspace = true
|
||||
|
@ -129,7 +129,7 @@ libz-sys.workspace = true
|
|||
log = { workspace = true, features = ["serde"] }
|
||||
lsp-types.workspace = true
|
||||
malva = "=0.11.0"
|
||||
markup_fmt = "=0.15.0"
|
||||
markup_fmt = "=0.16.0"
|
||||
memmem.workspace = true
|
||||
monch.workspace = true
|
||||
notify.workspace = true
|
||||
|
@ -151,6 +151,8 @@ serde_repr.workspace = true
|
|||
sha2.workspace = true
|
||||
shell-escape = "=0.1.5"
|
||||
spki = { version = "0.7", features = ["pem"] }
|
||||
# NOTE(bartlomieju): using temporary fork for now, revert back to `sqlformat-rs` later
|
||||
sqlformat = { package = "deno_sqlformat", version = "0.3.2" }
|
||||
strsim = "0.11.1"
|
||||
tar.workspace = true
|
||||
tempfile.workspace = true
|
||||
|
|
|
@ -70,7 +70,41 @@ pub fn deno_json_deps(
|
|||
let values = imports_values(config.json.imports.as_ref())
|
||||
.into_iter()
|
||||
.chain(scope_values(config.json.scopes.as_ref()));
|
||||
values_to_set(values)
|
||||
let mut set = values_to_set(values);
|
||||
|
||||
if let Some(serde_json::Value::Object(compiler_options)) =
|
||||
&config.json.compiler_options
|
||||
{
|
||||
// add jsxImportSource
|
||||
if let Some(serde_json::Value::String(value)) =
|
||||
compiler_options.get("jsxImportSource")
|
||||
{
|
||||
if let Some(dep_req) = value_to_dep_req(value) {
|
||||
set.insert(dep_req);
|
||||
}
|
||||
}
|
||||
// add jsxImportSourceTypes
|
||||
if let Some(serde_json::Value::String(value)) =
|
||||
compiler_options.get("jsxImportSourceTypes")
|
||||
{
|
||||
if let Some(dep_req) = value_to_dep_req(value) {
|
||||
set.insert(dep_req);
|
||||
}
|
||||
}
|
||||
// add the dependencies in the types array
|
||||
if let Some(serde_json::Value::Array(types)) = compiler_options.get("types")
|
||||
{
|
||||
for value in types {
|
||||
if let serde_json::Value::String(value) = value {
|
||||
if let Some(dep_req) = value_to_dep_req(value) {
|
||||
set.insert(dep_req);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
set
|
||||
}
|
||||
|
||||
fn imports_values(value: Option<&serde_json::Value>) -> Vec<&String> {
|
||||
|
@ -98,15 +132,23 @@ fn values_to_set<'a>(
|
|||
) -> HashSet<JsrDepPackageReq> {
|
||||
let mut entries = HashSet::new();
|
||||
for value in values {
|
||||
if let Ok(req_ref) = JsrPackageReqReference::from_str(value) {
|
||||
entries.insert(JsrDepPackageReq::jsr(req_ref.into_inner().req));
|
||||
} else if let Ok(req_ref) = NpmPackageReqReference::from_str(value) {
|
||||
entries.insert(JsrDepPackageReq::npm(req_ref.into_inner().req));
|
||||
if let Some(dep_req) = value_to_dep_req(value) {
|
||||
entries.insert(dep_req);
|
||||
}
|
||||
}
|
||||
entries
|
||||
}
|
||||
|
||||
fn value_to_dep_req(value: &str) -> Option<JsrDepPackageReq> {
|
||||
if let Ok(req_ref) = JsrPackageReqReference::from_str(value) {
|
||||
Some(JsrDepPackageReq::jsr(req_ref.into_inner().req))
|
||||
} else if let Ok(req_ref) = NpmPackageReqReference::from_str(value) {
|
||||
Some(JsrDepPackageReq::npm(req_ref.into_inner().req))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_warn_tsconfig(ts_config: &TsConfigForEmit) {
|
||||
if let Some(ignored_options) = &ts_config.maybe_ignored_options {
|
||||
log::warn!("{}", ignored_options);
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -126,11 +126,7 @@ impl CliLockfile {
|
|||
maybe_deno_json: Option<&ConfigFile>,
|
||||
) -> HashSet<JsrDepPackageReq> {
|
||||
maybe_deno_json
|
||||
.map(|c| {
|
||||
crate::args::deno_json::deno_json_deps(c)
|
||||
.into_iter()
|
||||
.collect()
|
||||
})
|
||||
.map(crate::args::deno_json::deno_json_deps)
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
|
|
|
@ -289,6 +289,7 @@ impl BenchOptions {
|
|||
#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)]
|
||||
pub struct UnstableFmtOptions {
|
||||
pub component: bool,
|
||||
pub sql: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
|
@ -322,6 +323,7 @@ impl FmtOptions {
|
|||
options: resolve_fmt_options(fmt_flags, fmt_config.options),
|
||||
unstable: UnstableFmtOptions {
|
||||
component: unstable.component || fmt_flags.unstable_component,
|
||||
sql: unstable.sql || fmt_flags.unstable_sql,
|
||||
},
|
||||
files: fmt_config.files,
|
||||
}
|
||||
|
@ -868,12 +870,8 @@ impl CliOptions {
|
|||
} else {
|
||||
&[]
|
||||
};
|
||||
let config_parse_options = deno_config::deno_json::ConfigParseOptions {
|
||||
include_task_comments: matches!(
|
||||
flags.subcommand,
|
||||
DenoSubcommand::Task(..)
|
||||
),
|
||||
};
|
||||
let config_parse_options =
|
||||
deno_config::deno_json::ConfigParseOptions::default();
|
||||
let discover_pkg_json = flags.config_flag != ConfigFlag::Disabled
|
||||
&& !flags.no_npm
|
||||
&& !has_flag_env_var("DENO_NO_PACKAGE_JSON");
|
||||
|
@ -1132,7 +1130,7 @@ impl CliOptions {
|
|||
self.flags.otel_config()
|
||||
}
|
||||
|
||||
pub fn env_file_name(&self) -> Option<&String> {
|
||||
pub fn env_file_name(&self) -> Option<&Vec<String>> {
|
||||
self.flags.env_file.as_ref()
|
||||
}
|
||||
|
||||
|
@ -1323,6 +1321,7 @@ impl CliOptions {
|
|||
let workspace = self.workspace();
|
||||
UnstableFmtOptions {
|
||||
component: workspace.has_unstable("fmt-component"),
|
||||
sql: workspace.has_unstable("fmt-sql"),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1549,6 +1548,10 @@ impl CliOptions {
|
|||
}) => Url::parse(&flags.module_url)
|
||||
.ok()
|
||||
.map(|url| vec![Cow::Owned(url)]),
|
||||
DenoSubcommand::Doc(DocFlags {
|
||||
source_files: DocSourceFileFlag::Paths(paths),
|
||||
..
|
||||
}) => Some(files_to_urls(paths)),
|
||||
_ => None,
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
@ -1625,8 +1628,10 @@ impl CliOptions {
|
|||
DenoSubcommand::Install(_)
|
||||
| DenoSubcommand::Add(_)
|
||||
| DenoSubcommand::Remove(_)
|
||||
| DenoSubcommand::Init(_)
|
||||
| DenoSubcommand::Outdated(_)
|
||||
) {
|
||||
// For `deno install/add/remove` we want to force the managed resolver so it can set up `node_modules/` directory.
|
||||
// For `deno install/add/remove/init` we want to force the managed resolver so it can set up `node_modules/` directory.
|
||||
return false;
|
||||
}
|
||||
if self.node_modules_dir().ok().flatten().is_none()
|
||||
|
@ -1671,6 +1676,7 @@ impl CliOptions {
|
|||
"byonm",
|
||||
"bare-node-builtins",
|
||||
"fmt-component",
|
||||
"fmt-sql",
|
||||
])
|
||||
.collect();
|
||||
|
||||
|
@ -1908,6 +1914,10 @@ pub fn resolve_no_prompt(flags: &PermissionFlags) -> bool {
|
|||
flags.no_prompt || has_flag_env_var("DENO_NO_PROMPT")
|
||||
}
|
||||
|
||||
pub fn has_trace_permissions_enabled() -> bool {
|
||||
has_flag_env_var("DENO_TRACE_PERMISSIONS")
|
||||
}
|
||||
|
||||
pub fn has_flag_env_var(name: &str) -> bool {
|
||||
let value = env::var(name);
|
||||
matches!(value.as_ref().map(|s| s.as_str()), Ok("1"))
|
||||
|
@ -1939,19 +1949,22 @@ pub fn config_to_deno_graph_workspace_member(
|
|||
})
|
||||
}
|
||||
|
||||
fn load_env_variables_from_env_file(filename: Option<&String>) {
|
||||
let Some(env_file_name) = filename else {
|
||||
fn load_env_variables_from_env_file(filename: Option<&Vec<String>>) {
|
||||
let Some(env_file_names) = filename else {
|
||||
return;
|
||||
};
|
||||
match from_filename(env_file_name) {
|
||||
Ok(_) => (),
|
||||
Err(error) => {
|
||||
match error {
|
||||
|
||||
for env_file_name in env_file_names.iter().rev() {
|
||||
match from_filename(env_file_name) {
|
||||
Ok(_) => (),
|
||||
Err(error) => {
|
||||
match error {
|
||||
dotenvy::Error::LineParse(line, index)=> log::info!("{} Parsing failed within the specified environment file: {} at index: {} of the value: {}",colors::yellow("Warning"), env_file_name, index, line),
|
||||
dotenvy::Error::Io(_)=> log::info!("{} The `--env-file` flag was used, but the environment file specified '{}' was not found.",colors::yellow("Warning"),env_file_name),
|
||||
dotenvy::Error::EnvVar(_)=> log::info!("{} One or more of the environment variables isn't present or not unicode within the specified environment file: {}",colors::yellow("Warning"),env_file_name),
|
||||
_ => log::info!("{} Unknown failure occurred with the specified environment file: {}", colors::yellow("Warning"), env_file_name),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
18
cli/build.rs
18
cli/build.rs
|
@ -400,6 +400,24 @@ fn main() {
|
|||
println!("cargo:rustc-env=TARGET={}", env::var("TARGET").unwrap());
|
||||
println!("cargo:rustc-env=PROFILE={}", env::var("PROFILE").unwrap());
|
||||
|
||||
if cfg!(windows) {
|
||||
// these dls load slowly, so delay loading them
|
||||
let dlls = [
|
||||
// webgpu
|
||||
"d3dcompiler_47",
|
||||
"OPENGL32",
|
||||
// network related functions
|
||||
"iphlpapi",
|
||||
];
|
||||
for dll in dlls {
|
||||
println!("cargo:rustc-link-arg-bin=deno=/delayload:{dll}.dll");
|
||||
println!("cargo:rustc-link-arg-bin=denort=/delayload:{dll}.dll");
|
||||
}
|
||||
// enable delay loading
|
||||
println!("cargo:rustc-link-arg-bin=deno=delayimp.lib");
|
||||
println!("cargo:rustc-link-arg-bin=denort=delayimp.lib");
|
||||
}
|
||||
|
||||
let c = PathBuf::from(env::var_os("CARGO_MANIFEST_DIR").unwrap());
|
||||
let o = PathBuf::from(env::var_os("OUT_DIR").unwrap());
|
||||
|
||||
|
|
10
cli/cache/code_cache.rs
vendored
10
cli/cache/code_cache.rs
vendored
|
@ -1,10 +1,14 @@
|
|||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use deno_ast::ModuleSpecifier;
|
||||
use deno_core::error::AnyError;
|
||||
use deno_runtime::code_cache;
|
||||
use deno_runtime::deno_webstorage::rusqlite::params;
|
||||
|
||||
use crate::worker::CliCodeCache;
|
||||
|
||||
use super::cache_db::CacheDB;
|
||||
use super::cache_db::CacheDBConfiguration;
|
||||
use super::cache_db::CacheDBHash;
|
||||
|
@ -82,6 +86,12 @@ impl CodeCache {
|
|||
}
|
||||
}
|
||||
|
||||
impl CliCodeCache for CodeCache {
|
||||
fn as_code_cache(self: Arc<Self>) -> Arc<dyn code_cache::CodeCache> {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl code_cache::CodeCache for CodeCache {
|
||||
fn get_sync(
|
||||
&self,
|
||||
|
|
|
@ -38,6 +38,7 @@ fn get_module_graph_error_class(err: &ModuleGraphError) -> &'static str {
|
|||
ModuleGraphError::ModuleError(err) => match err {
|
||||
ModuleError::InvalidTypeAssertion { .. } => "SyntaxError",
|
||||
ModuleError::ParseErr(_, diagnostic) => get_diagnostic_class(diagnostic),
|
||||
ModuleError::WasmParseErr(..) => "SyntaxError",
|
||||
ModuleError::UnsupportedMediaType { .. }
|
||||
| ModuleError::UnsupportedImportAttributeType { .. } => "TypeError",
|
||||
ModuleError::Missing(_, _) | ModuleError::MissingDynamic(_, _) => {
|
||||
|
|
|
@ -884,6 +884,7 @@ impl CliFactory {
|
|||
let cli_options = self.cli_options()?;
|
||||
Ok(DenoCompileBinaryWriter::new(
|
||||
self.cjs_tracker()?,
|
||||
self.cli_options()?,
|
||||
self.deno_dir()?,
|
||||
self.emitter()?,
|
||||
self.file_fetcher()?,
|
||||
|
|
|
@ -177,6 +177,52 @@ function isCanvasLike(obj) {
|
|||
return obj !== null && typeof obj === "object" && "toDataURL" in obj;
|
||||
}
|
||||
|
||||
function isJpg(obj) {
|
||||
// Check if obj is a Uint8Array
|
||||
if (!(obj instanceof Uint8Array)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// JPG files start with the magic bytes FF D8
|
||||
if (obj.length < 2 || obj[0] !== 0xFF || obj[1] !== 0xD8) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// JPG files end with the magic bytes FF D9
|
||||
if (
|
||||
obj.length < 2 || obj[obj.length - 2] !== 0xFF ||
|
||||
obj[obj.length - 1] !== 0xD9
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
function isPng(obj) {
|
||||
// Check if obj is a Uint8Array
|
||||
if (!(obj instanceof Uint8Array)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// PNG files start with a specific 8-byte signature
|
||||
const pngSignature = [137, 80, 78, 71, 13, 10, 26, 10];
|
||||
|
||||
// Check if the array is at least as long as the signature
|
||||
if (obj.length < pngSignature.length) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check each byte of the signature
|
||||
for (let i = 0; i < pngSignature.length; i++) {
|
||||
if (obj[i] !== pngSignature[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/** Possible HTML and SVG Elements */
|
||||
function isSVGElementLike(obj) {
|
||||
return obj !== null && typeof obj === "object" && "outerHTML" in obj &&
|
||||
|
@ -233,6 +279,16 @@ async function format(obj) {
|
|||
if (isDataFrameLike(obj)) {
|
||||
return extractDataFrame(obj);
|
||||
}
|
||||
if (isJpg(obj)) {
|
||||
return {
|
||||
"image/jpeg": core.ops.op_base64_encode(obj),
|
||||
};
|
||||
}
|
||||
if (isPng(obj)) {
|
||||
return {
|
||||
"image/png": core.ops.op_base64_encode(obj),
|
||||
};
|
||||
}
|
||||
if (isSVGElementLike(obj)) {
|
||||
return {
|
||||
"image/svg+xml": obj.outerHTML,
|
||||
|
@ -314,6 +370,28 @@ const html = createTaggedTemplateDisplayable("text/html");
|
|||
*/
|
||||
const svg = createTaggedTemplateDisplayable("image/svg+xml");
|
||||
|
||||
function image(obj) {
|
||||
if (typeof obj === "string") {
|
||||
try {
|
||||
obj = Deno.readFileSync(obj);
|
||||
} catch {
|
||||
// pass
|
||||
}
|
||||
}
|
||||
|
||||
if (isJpg(obj)) {
|
||||
return makeDisplayable({ "image/jpeg": core.ops.op_base64_encode(obj) });
|
||||
}
|
||||
|
||||
if (isPng(obj)) {
|
||||
return makeDisplayable({ "image/png": core.ops.op_base64_encode(obj) });
|
||||
}
|
||||
|
||||
throw new TypeError(
|
||||
"Object is not a valid image or a path to an image. `Deno.jupyter.image` supports displaying JPG or PNG images.",
|
||||
);
|
||||
}
|
||||
|
||||
function isMediaBundle(obj) {
|
||||
if (obj == null || typeof obj !== "object" || Array.isArray(obj)) {
|
||||
return false;
|
||||
|
@ -465,6 +543,7 @@ function enableJupyter() {
|
|||
md,
|
||||
html,
|
||||
svg,
|
||||
image,
|
||||
$display,
|
||||
};
|
||||
}
|
||||
|
|
|
@ -883,8 +883,13 @@ impl FileSystemDocuments {
|
|||
let doc = if specifier.scheme() == "file" {
|
||||
let path = url_to_file_path(specifier).ok()?;
|
||||
let bytes = fs::read(path).ok()?;
|
||||
let content =
|
||||
deno_graph::source::decode_owned_source(specifier, bytes, None).ok()?;
|
||||
let content = bytes_to_content(
|
||||
specifier,
|
||||
MediaType::from_specifier(specifier),
|
||||
bytes,
|
||||
None,
|
||||
)
|
||||
.ok()?;
|
||||
Document::new(
|
||||
specifier.clone(),
|
||||
content.into(),
|
||||
|
@ -923,19 +928,24 @@ impl FileSystemDocuments {
|
|||
specifier,
|
||||
Some(&cached_file.metadata.headers),
|
||||
);
|
||||
let content = deno_graph::source::decode_owned_source(
|
||||
let media_type = resolve_media_type(
|
||||
specifier,
|
||||
Some(&cached_file.metadata.headers),
|
||||
None,
|
||||
);
|
||||
let content = bytes_to_content(
|
||||
specifier,
|
||||
media_type,
|
||||
cached_file.content,
|
||||
maybe_charset,
|
||||
)
|
||||
.ok()?;
|
||||
let maybe_headers = Some(cached_file.metadata.headers);
|
||||
Document::new(
|
||||
specifier.clone(),
|
||||
content.into(),
|
||||
None,
|
||||
None,
|
||||
maybe_headers,
|
||||
Some(cached_file.metadata.headers),
|
||||
is_cjs_resolver,
|
||||
resolver.clone(),
|
||||
config.clone(),
|
||||
|
@ -1706,6 +1716,24 @@ fn analyze_module(
|
|||
}
|
||||
}
|
||||
|
||||
fn bytes_to_content(
|
||||
specifier: &ModuleSpecifier,
|
||||
media_type: MediaType,
|
||||
bytes: Vec<u8>,
|
||||
maybe_charset: Option<&str>,
|
||||
) -> Result<String, AnyError> {
|
||||
if media_type == MediaType::Wasm {
|
||||
// we use the dts representation for Wasm modules
|
||||
Ok(deno_graph::source::wasm::wasm_module_to_dts(&bytes)?)
|
||||
} else {
|
||||
Ok(deno_graph::source::decode_owned_source(
|
||||
specifier,
|
||||
bytes,
|
||||
maybe_charset,
|
||||
)?)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
|
|
@ -1396,13 +1396,18 @@ impl Inner {
|
|||
.fmt_config_for_specifier(&specifier)
|
||||
.options
|
||||
.clone();
|
||||
fmt_options.use_tabs = Some(!params.options.insert_spaces);
|
||||
fmt_options.indent_width = Some(params.options.tab_size as u8);
|
||||
let config_data = self.config.tree.data_for_specifier(&specifier);
|
||||
if !config_data.is_some_and(|d| d.maybe_deno_json().is_some()) {
|
||||
fmt_options.use_tabs = Some(!params.options.insert_spaces);
|
||||
fmt_options.indent_width = Some(params.options.tab_size as u8);
|
||||
}
|
||||
let unstable_options = UnstableFmtOptions {
|
||||
component: config_data
|
||||
.map(|d| d.unstable.contains("fmt-component"))
|
||||
.unwrap_or(false),
|
||||
sql: config_data
|
||||
.map(|d| d.unstable.contains("fmt-sql"))
|
||||
.unwrap_or(false),
|
||||
};
|
||||
let document = document.clone();
|
||||
move || {
|
||||
|
@ -3632,9 +3637,8 @@ impl Inner {
|
|||
deno_json_cache: None,
|
||||
pkg_json_cache: None,
|
||||
workspace_cache: None,
|
||||
config_parse_options: deno_config::deno_json::ConfigParseOptions {
|
||||
include_task_comments: false,
|
||||
},
|
||||
config_parse_options:
|
||||
deno_config::deno_json::ConfigParseOptions::default(),
|
||||
additional_config_file_names: &[],
|
||||
discover_pkg_json: !has_flag_env_var("DENO_NO_PACKAGE_JSON"),
|
||||
maybe_vendor_override: if force_global_cache {
|
||||
|
|
|
@ -4454,11 +4454,7 @@ fn op_load<'s>(
|
|||
== NodeModuleKind::Cjs,
|
||||
})
|
||||
};
|
||||
|
||||
lsp_warn!("op_load {} {}", &specifier, maybe_load_response.is_some());
|
||||
|
||||
let serialized = serde_v8::to_v8(scope, maybe_load_response)?;
|
||||
|
||||
state.performance.measure(mark);
|
||||
Ok(serialized)
|
||||
}
|
||||
|
@ -5609,7 +5605,7 @@ mod tests {
|
|||
let (_tx, rx) = mpsc::unbounded_channel();
|
||||
let state =
|
||||
State::new(state_snapshot, Default::default(), Default::default(), rx);
|
||||
let mut op_state = OpState::new(None);
|
||||
let mut op_state = OpState::new(None, None);
|
||||
op_state.put(state);
|
||||
op_state
|
||||
}
|
||||
|
|
12
cli/main.rs
12
cli/main.rs
|
@ -144,9 +144,7 @@ async fn run_subcommand(flags: Arc<Flags>) -> Result<i32, AnyError> {
|
|||
}
|
||||
DenoSubcommand::Init(init_flags) => {
|
||||
spawn_subcommand(async {
|
||||
// make compiler happy since init_project is sync
|
||||
tokio::task::yield_now().await;
|
||||
tools::init::init_project(init_flags)
|
||||
tools::init::init_project(init_flags).await
|
||||
})
|
||||
}
|
||||
DenoSubcommand::Info(info_flags) => {
|
||||
|
@ -188,6 +186,11 @@ async fn run_subcommand(flags: Arc<Flags>) -> Result<i32, AnyError> {
|
|||
tools::lint::lint(flags, lint_flags).await
|
||||
}
|
||||
}),
|
||||
DenoSubcommand::Outdated(update_flags) => {
|
||||
spawn_subcommand(async move {
|
||||
tools::registry::outdated(flags, update_flags).await
|
||||
})
|
||||
}
|
||||
DenoSubcommand::Repl(repl_flags) => {
|
||||
spawn_subcommand(async move { tools::repl::run(flags, repl_flags).await })
|
||||
}
|
||||
|
@ -238,6 +241,9 @@ async fn run_subcommand(flags: Arc<Flags>) -> Result<i32, AnyError> {
|
|||
cwd: None,
|
||||
task: Some(run_flags.script.clone()),
|
||||
is_run: true,
|
||||
recursive: false,
|
||||
filter: None,
|
||||
eval: false,
|
||||
};
|
||||
new_flags.subcommand = DenoSubcommand::Task(task_flags.clone());
|
||||
let result = tools::task::execute_script(Arc::new(new_flags), task_flags.clone()).await;
|
||||
|
|
|
@ -66,6 +66,7 @@ use deno_graph::JsonModule;
|
|||
use deno_graph::Module;
|
||||
use deno_graph::ModuleGraph;
|
||||
use deno_graph::Resolution;
|
||||
use deno_graph::WasmModule;
|
||||
use deno_runtime::code_cache;
|
||||
use deno_runtime::deno_fs::FileSystem;
|
||||
use deno_runtime::deno_node::create_host_defined_options;
|
||||
|
@ -368,7 +369,9 @@ impl<TGraphContainer: ModuleGraphContainer>
|
|||
requested_module_type: RequestedModuleType,
|
||||
) -> Result<ModuleSource, AnyError> {
|
||||
let code_source = self.load_code_source(specifier, maybe_referrer).await?;
|
||||
let code = if self.shared.is_inspecting {
|
||||
let code = if self.shared.is_inspecting
|
||||
|| code_source.media_type == MediaType::Wasm
|
||||
{
|
||||
// we need the code with the source map in order for
|
||||
// it to work with --inspect or --inspect-brk
|
||||
code_source.code
|
||||
|
@ -378,6 +381,7 @@ impl<TGraphContainer: ModuleGraphContainer>
|
|||
};
|
||||
let module_type = match code_source.media_type {
|
||||
MediaType::Json => ModuleType::Json,
|
||||
MediaType::Wasm => ModuleType::Wasm,
|
||||
_ => ModuleType::JavaScript,
|
||||
};
|
||||
|
||||
|
@ -474,21 +478,6 @@ impl<TGraphContainer: ModuleGraphContainer>
|
|||
raw_specifier: &str,
|
||||
referrer: &ModuleSpecifier,
|
||||
) -> Result<ModuleSpecifier, AnyError> {
|
||||
if self.shared.in_npm_pkg_checker.in_npm_package(referrer) {
|
||||
return Ok(
|
||||
self
|
||||
.shared
|
||||
.node_resolver
|
||||
.resolve(
|
||||
raw_specifier,
|
||||
referrer,
|
||||
self.shared.cjs_tracker.get_referrer_kind(referrer),
|
||||
NodeResolutionMode::Execution,
|
||||
)?
|
||||
.into_url(),
|
||||
);
|
||||
}
|
||||
|
||||
let graph = self.graph_container.graph();
|
||||
let resolution = match graph.get(referrer) {
|
||||
Some(Module::Js(module)) => module
|
||||
|
@ -560,6 +549,7 @@ impl<TGraphContainer: ModuleGraphContainer>
|
|||
Some(Module::Node(module)) => module.specifier.clone(),
|
||||
Some(Module::Js(module)) => module.specifier.clone(),
|
||||
Some(Module::Json(module)) => module.specifier.clone(),
|
||||
Some(Module::Wasm(module)) => module.specifier.clone(),
|
||||
Some(Module::External(module)) => {
|
||||
node::resolve_specifier_into_node_modules(
|
||||
&module.specifier,
|
||||
|
@ -731,6 +721,13 @@ impl<TGraphContainer: ModuleGraphContainer>
|
|||
media_type: *media_type,
|
||||
})))
|
||||
}
|
||||
Some(deno_graph::Module::Wasm(WasmModule {
|
||||
source, specifier, ..
|
||||
})) => Ok(Some(CodeOrDeferredEmit::Code(ModuleCodeStringSource {
|
||||
code: ModuleSourceCode::Bytes(source.clone().into()),
|
||||
found_url: specifier.clone(),
|
||||
media_type: MediaType::Wasm,
|
||||
}))),
|
||||
Some(
|
||||
deno_graph::Module::External(_)
|
||||
| deno_graph::Module::Node(_)
|
||||
|
|
|
@ -500,7 +500,7 @@ impl ManagedCliNpmResolver {
|
|||
self.resolve_pkg_folder_from_pkg_id(&pkg_id)
|
||||
}
|
||||
|
||||
fn resolve_pkg_id_from_pkg_req(
|
||||
pub fn resolve_pkg_id_from_pkg_req(
|
||||
&self,
|
||||
req: &PackageReq,
|
||||
) -> Result<NpmPackageId, PackageReqNotFoundError> {
|
||||
|
|
|
@ -51,7 +51,7 @@ fn op_bench_get_origin(state: &mut OpState) -> String {
|
|||
#[derive(Clone)]
|
||||
struct PermissionsHolder(Uuid, PermissionsContainer);
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[serde]
|
||||
pub fn op_pledge_test_permissions(
|
||||
state: &mut OpState,
|
||||
|
|
|
@ -46,7 +46,7 @@ deno_core::extension!(deno_test,
|
|||
#[derive(Clone)]
|
||||
struct PermissionsHolder(Uuid, PermissionsContainer);
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[serde]
|
||||
pub fn op_pledge_test_permissions(
|
||||
state: &mut OpState,
|
||||
|
|
|
@ -431,8 +431,34 @@
|
|||
"type": "object",
|
||||
"patternProperties": {
|
||||
"^[A-Za-z][A-Za-z0-9_\\-:]*$": {
|
||||
"type": "string",
|
||||
"description": "Command to execute for this task name."
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Command to execute for this task name."
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"description": "A definition of a task to execute",
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Description of a task that will be shown when running `deno task` without a task name"
|
||||
},
|
||||
"command": {
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The task to execute"
|
||||
},
|
||||
"dependencies": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": "Tasks that should be executed before this task"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
|
@ -531,6 +557,7 @@
|
|||
"ffi",
|
||||
"fs",
|
||||
"fmt-component",
|
||||
"fmt-sql",
|
||||
"http",
|
||||
"kv",
|
||||
"net",
|
||||
|
|
|
@ -64,6 +64,7 @@ use crate::args::NpmInstallDepsProvider;
|
|||
use crate::args::PermissionFlags;
|
||||
use crate::args::UnstableConfig;
|
||||
use crate::cache::DenoDir;
|
||||
use crate::cache::FastInsecureHasher;
|
||||
use crate::emit::Emitter;
|
||||
use crate::file_fetcher::FileFetcher;
|
||||
use crate::http_util::HttpClientProvider;
|
||||
|
@ -174,6 +175,7 @@ pub struct SerializedWorkspaceResolver {
|
|||
pub struct Metadata {
|
||||
pub argv: Vec<String>,
|
||||
pub seed: Option<u64>,
|
||||
pub code_cache_key: Option<u64>,
|
||||
pub permissions: PermissionFlags,
|
||||
pub location: Option<Url>,
|
||||
pub v8_flags: Vec<String>,
|
||||
|
@ -199,7 +201,8 @@ fn write_binary_bytes(
|
|||
compile_flags: &CompileFlags,
|
||||
) -> Result<(), AnyError> {
|
||||
let data_section_bytes =
|
||||
serialize_binary_data_section(metadata, npm_snapshot, remote_modules, vfs)?;
|
||||
serialize_binary_data_section(metadata, npm_snapshot, remote_modules, vfs)
|
||||
.context("Serializing binary data section.")?;
|
||||
|
||||
let target = compile_flags.resolve_target();
|
||||
if target.contains("linux") {
|
||||
|
@ -362,6 +365,7 @@ pub fn extract_standalone(
|
|||
|
||||
pub struct DenoCompileBinaryWriter<'a> {
|
||||
cjs_tracker: &'a CjsTracker,
|
||||
cli_options: &'a CliOptions,
|
||||
deno_dir: &'a DenoDir,
|
||||
emitter: &'a Emitter,
|
||||
file_fetcher: &'a FileFetcher,
|
||||
|
@ -375,6 +379,7 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
cjs_tracker: &'a CjsTracker,
|
||||
cli_options: &'a CliOptions,
|
||||
deno_dir: &'a DenoDir,
|
||||
emitter: &'a Emitter,
|
||||
file_fetcher: &'a FileFetcher,
|
||||
|
@ -385,6 +390,7 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
) -> Self {
|
||||
Self {
|
||||
cjs_tracker,
|
||||
cli_options,
|
||||
deno_dir,
|
||||
emitter,
|
||||
file_fetcher,
|
||||
|
@ -401,8 +407,8 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
graph: &ModuleGraph,
|
||||
root_dir_url: StandaloneRelativeFileBaseUrl<'_>,
|
||||
entrypoint: &ModuleSpecifier,
|
||||
include_files: &[ModuleSpecifier],
|
||||
compile_flags: &CompileFlags,
|
||||
cli_options: &CliOptions,
|
||||
) -> Result<(), AnyError> {
|
||||
// Select base binary based on target
|
||||
let mut original_binary = self.get_base_binary(compile_flags).await?;
|
||||
|
@ -415,7 +421,8 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
target,
|
||||
)
|
||||
}
|
||||
set_windows_binary_to_gui(&mut original_binary)?;
|
||||
set_windows_binary_to_gui(&mut original_binary)
|
||||
.context("Setting windows binary to GUI.")?;
|
||||
}
|
||||
if compile_flags.icon.is_some() {
|
||||
let target = compile_flags.resolve_target();
|
||||
|
@ -433,7 +440,7 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
graph,
|
||||
root_dir_url,
|
||||
entrypoint,
|
||||
cli_options,
|
||||
include_files,
|
||||
compile_flags,
|
||||
)
|
||||
.await
|
||||
|
@ -476,10 +483,14 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
if !binary_path.exists() {
|
||||
self
|
||||
.download_base_binary(&download_directory, &binary_path_suffix)
|
||||
.await?;
|
||||
.await
|
||||
.context("Setting up base binary.")?;
|
||||
}
|
||||
|
||||
let archive_data = std::fs::read(binary_path)?;
|
||||
let read_file = |path: &Path| -> Result<Vec<u8>, AnyError> {
|
||||
std::fs::read(path).with_context(|| format!("Reading {}", path.display()))
|
||||
};
|
||||
let archive_data = read_file(&binary_path)?;
|
||||
let temp_dir = tempfile::TempDir::new()?;
|
||||
let base_binary_path = archive::unpack_into_dir(archive::UnpackArgs {
|
||||
exe_name: "denort",
|
||||
|
@ -488,7 +499,7 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
is_windows: target.contains("windows"),
|
||||
dest_path: temp_dir.path(),
|
||||
})?;
|
||||
let base_binary = std::fs::read(base_binary_path)?;
|
||||
let base_binary = read_file(&base_binary_path)?;
|
||||
drop(temp_dir); // delete the temp dir
|
||||
Ok(base_binary)
|
||||
}
|
||||
|
@ -516,15 +527,19 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
let bytes = match maybe_bytes {
|
||||
Some(bytes) => bytes,
|
||||
None => {
|
||||
log::info!("Download could not be found, aborting");
|
||||
deno_runtime::exit(1);
|
||||
bail!("Download could not be found, aborting");
|
||||
}
|
||||
};
|
||||
|
||||
std::fs::create_dir_all(output_directory)?;
|
||||
let create_dir_all = |dir: &Path| {
|
||||
std::fs::create_dir_all(dir)
|
||||
.with_context(|| format!("Creating {}", dir.display()))
|
||||
};
|
||||
create_dir_all(output_directory)?;
|
||||
let output_path = output_directory.join(binary_path_suffix);
|
||||
std::fs::create_dir_all(output_path.parent().unwrap())?;
|
||||
tokio::fs::write(output_path, bytes).await?;
|
||||
create_dir_all(output_path.parent().unwrap())?;
|
||||
std::fs::write(&output_path, bytes)
|
||||
.with_context(|| format!("Writing {}", output_path.display()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -538,76 +553,101 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
graph: &ModuleGraph,
|
||||
root_dir_url: StandaloneRelativeFileBaseUrl<'_>,
|
||||
entrypoint: &ModuleSpecifier,
|
||||
cli_options: &CliOptions,
|
||||
include_files: &[ModuleSpecifier],
|
||||
compile_flags: &CompileFlags,
|
||||
) -> Result<(), AnyError> {
|
||||
let ca_data = match cli_options.ca_data() {
|
||||
let ca_data = match self.cli_options.ca_data() {
|
||||
Some(CaData::File(ca_file)) => Some(
|
||||
std::fs::read(ca_file)
|
||||
.with_context(|| format!("Reading: {ca_file}"))?,
|
||||
std::fs::read(ca_file).with_context(|| format!("Reading {ca_file}"))?,
|
||||
),
|
||||
Some(CaData::Bytes(bytes)) => Some(bytes.clone()),
|
||||
None => None,
|
||||
};
|
||||
let root_path = root_dir_url.inner().to_file_path().unwrap();
|
||||
let (maybe_npm_vfs, node_modules, npm_snapshot) = match self
|
||||
.npm_resolver
|
||||
.as_inner()
|
||||
{
|
||||
InnerCliNpmResolverRef::Managed(managed) => {
|
||||
let snapshot =
|
||||
managed.serialized_valid_snapshot_for_system(&self.npm_system_info);
|
||||
if !snapshot.as_serialized().packages.is_empty() {
|
||||
let npm_vfs_builder = self.build_npm_vfs(&root_path, cli_options)?;
|
||||
let (maybe_npm_vfs, node_modules, npm_snapshot) =
|
||||
match self.npm_resolver.as_inner() {
|
||||
InnerCliNpmResolverRef::Managed(managed) => {
|
||||
let snapshot =
|
||||
managed.serialized_valid_snapshot_for_system(&self.npm_system_info);
|
||||
if !snapshot.as_serialized().packages.is_empty() {
|
||||
let npm_vfs_builder = self
|
||||
.build_npm_vfs(&root_path)
|
||||
.context("Building npm vfs.")?;
|
||||
(
|
||||
Some(npm_vfs_builder),
|
||||
Some(NodeModules::Managed {
|
||||
node_modules_dir: self
|
||||
.npm_resolver
|
||||
.root_node_modules_path()
|
||||
.map(|path| {
|
||||
root_dir_url
|
||||
.specifier_key(
|
||||
&ModuleSpecifier::from_directory_path(path).unwrap(),
|
||||
)
|
||||
.into_owned()
|
||||
}),
|
||||
}),
|
||||
Some(snapshot),
|
||||
)
|
||||
} else {
|
||||
(None, None, None)
|
||||
}
|
||||
}
|
||||
InnerCliNpmResolverRef::Byonm(resolver) => {
|
||||
let npm_vfs_builder = self.build_npm_vfs(&root_path)?;
|
||||
(
|
||||
Some(npm_vfs_builder),
|
||||
Some(NodeModules::Managed {
|
||||
node_modules_dir: self.npm_resolver.root_node_modules_path().map(
|
||||
|path| {
|
||||
Some(NodeModules::Byonm {
|
||||
root_node_modules_dir: resolver.root_node_modules_path().map(
|
||||
|node_modules_dir| {
|
||||
root_dir_url
|
||||
.specifier_key(
|
||||
&ModuleSpecifier::from_directory_path(path).unwrap(),
|
||||
&ModuleSpecifier::from_directory_path(node_modules_dir)
|
||||
.unwrap(),
|
||||
)
|
||||
.into_owned()
|
||||
},
|
||||
),
|
||||
}),
|
||||
Some(snapshot),
|
||||
None,
|
||||
)
|
||||
} else {
|
||||
(None, None, None)
|
||||
}
|
||||
}
|
||||
InnerCliNpmResolverRef::Byonm(resolver) => {
|
||||
let npm_vfs_builder = self.build_npm_vfs(&root_path, cli_options)?;
|
||||
(
|
||||
Some(npm_vfs_builder),
|
||||
Some(NodeModules::Byonm {
|
||||
root_node_modules_dir: resolver.root_node_modules_path().map(
|
||||
|node_modules_dir| {
|
||||
root_dir_url
|
||||
.specifier_key(
|
||||
&ModuleSpecifier::from_directory_path(node_modules_dir)
|
||||
.unwrap(),
|
||||
)
|
||||
.into_owned()
|
||||
},
|
||||
),
|
||||
}),
|
||||
None,
|
||||
)
|
||||
}
|
||||
};
|
||||
};
|
||||
let mut vfs = if let Some(npm_vfs) = maybe_npm_vfs {
|
||||
npm_vfs
|
||||
} else {
|
||||
VfsBuilder::new(root_path.clone())?
|
||||
};
|
||||
for include_file in include_files {
|
||||
let path = deno_path_util::url_to_file_path(include_file)?;
|
||||
if path.is_dir() {
|
||||
// TODO(#26941): we should analyze if any of these are
|
||||
// modules in order to include their dependencies
|
||||
vfs
|
||||
.add_dir_recursive(&path)
|
||||
.with_context(|| format!("Including {}", path.display()))?;
|
||||
} else {
|
||||
vfs
|
||||
.add_file_at_path(&path)
|
||||
.with_context(|| format!("Including {}", path.display()))?;
|
||||
}
|
||||
}
|
||||
let mut remote_modules_store = RemoteModulesStoreBuilder::default();
|
||||
let mut code_cache_key_hasher = if self.cli_options.code_cache_enabled() {
|
||||
Some(FastInsecureHasher::new_deno_versioned())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
for module in graph.modules() {
|
||||
if module.specifier().scheme() == "data" {
|
||||
continue; // don't store data urls as an entry as they're in the code
|
||||
}
|
||||
if let Some(hasher) = &mut code_cache_key_hasher {
|
||||
if let Some(source) = module.source() {
|
||||
hasher.write(module.specifier().as_str().as_bytes());
|
||||
hasher.write(source.as_bytes());
|
||||
}
|
||||
}
|
||||
let (maybe_source, media_type) = match module {
|
||||
deno_graph::Module::Js(m) => {
|
||||
let source = if m.media_type.is_emittable() {
|
||||
|
@ -635,6 +675,9 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
deno_graph::Module::Json(m) => {
|
||||
(Some(m.source.as_bytes().to_vec()), m.media_type)
|
||||
}
|
||||
deno_graph::Module::Wasm(m) => {
|
||||
(Some(m.source.to_vec()), MediaType::Wasm)
|
||||
}
|
||||
deno_graph::Module::Npm(_)
|
||||
| deno_graph::Module::Node(_)
|
||||
| deno_graph::Module::External(_) => (None, MediaType::Unknown),
|
||||
|
@ -658,25 +701,33 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
}
|
||||
remote_modules_store.add_redirects(&graph.redirects);
|
||||
|
||||
let env_vars_from_env_file = match cli_options.env_file_name() {
|
||||
Some(env_filename) => {
|
||||
log::info!("{} Environment variables from the file \"{}\" were embedded in the generated executable file", crate::colors::yellow("Warning"), env_filename);
|
||||
get_file_env_vars(env_filename.to_string())?
|
||||
let env_vars_from_env_file = match self.cli_options.env_file_name() {
|
||||
Some(env_filenames) => {
|
||||
let mut aggregated_env_vars = IndexMap::new();
|
||||
for env_filename in env_filenames.iter().rev() {
|
||||
log::info!("{} Environment variables from the file \"{}\" were embedded in the generated executable file", crate::colors::yellow("Warning"), env_filename);
|
||||
|
||||
let env_vars = get_file_env_vars(env_filename.to_string())?;
|
||||
aggregated_env_vars.extend(env_vars);
|
||||
}
|
||||
aggregated_env_vars
|
||||
}
|
||||
None => Default::default(),
|
||||
};
|
||||
|
||||
let metadata = Metadata {
|
||||
argv: compile_flags.args.clone(),
|
||||
seed: cli_options.seed(),
|
||||
location: cli_options.location_flag().clone(),
|
||||
permissions: cli_options.permission_flags().clone(),
|
||||
v8_flags: cli_options.v8_flags().clone(),
|
||||
unsafely_ignore_certificate_errors: cli_options
|
||||
seed: self.cli_options.seed(),
|
||||
code_cache_key: code_cache_key_hasher.map(|h| h.finish()),
|
||||
location: self.cli_options.location_flag().clone(),
|
||||
permissions: self.cli_options.permission_flags().clone(),
|
||||
v8_flags: self.cli_options.v8_flags().clone(),
|
||||
unsafely_ignore_certificate_errors: self
|
||||
.cli_options
|
||||
.unsafely_ignore_certificate_errors()
|
||||
.clone(),
|
||||
log_level: cli_options.log_level(),
|
||||
ca_stores: cli_options.ca_stores().clone(),
|
||||
log_level: self.cli_options.log_level(),
|
||||
ca_stores: self.cli_options.ca_stores().clone(),
|
||||
ca_data,
|
||||
env_vars_from_env_file,
|
||||
entrypoint_key: root_dir_url.specifier_key(entrypoint).into_owned(),
|
||||
|
@ -719,11 +770,11 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
node_modules,
|
||||
unstable_config: UnstableConfig {
|
||||
legacy_flag_enabled: false,
|
||||
bare_node_builtins: cli_options.unstable_bare_node_builtins(),
|
||||
sloppy_imports: cli_options.unstable_sloppy_imports(),
|
||||
features: cli_options.unstable_features(),
|
||||
bare_node_builtins: self.cli_options.unstable_bare_node_builtins(),
|
||||
sloppy_imports: self.cli_options.unstable_sloppy_imports(),
|
||||
features: self.cli_options.unstable_features(),
|
||||
},
|
||||
otel_config: cli_options.otel_config(),
|
||||
otel_config: self.cli_options.otel_config(),
|
||||
};
|
||||
|
||||
write_binary_bytes(
|
||||
|
@ -735,13 +786,10 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
vfs,
|
||||
compile_flags,
|
||||
)
|
||||
.context("Writing binary bytes")
|
||||
}
|
||||
|
||||
fn build_npm_vfs(
|
||||
&self,
|
||||
root_path: &Path,
|
||||
cli_options: &CliOptions,
|
||||
) -> Result<VfsBuilder, AnyError> {
|
||||
fn build_npm_vfs(&self, root_path: &Path) -> Result<VfsBuilder, AnyError> {
|
||||
fn maybe_warn_different_system(system_info: &NpmSystemInfo) {
|
||||
if system_info != &NpmSystemInfo::default() {
|
||||
log::warn!("{} The node_modules directory may be incompatible with the target system.", crate::colors::yellow("Warning"));
|
||||
|
@ -818,13 +866,18 @@ impl<'a> DenoCompileBinaryWriter<'a> {
|
|||
InnerCliNpmResolverRef::Byonm(_) => {
|
||||
maybe_warn_different_system(&self.npm_system_info);
|
||||
let mut builder = VfsBuilder::new(root_path.to_path_buf())?;
|
||||
for pkg_json in cli_options.workspace().package_jsons() {
|
||||
for pkg_json in self.cli_options.workspace().package_jsons() {
|
||||
builder.add_file_at_path(&pkg_json.path)?;
|
||||
}
|
||||
// traverse and add all the node_modules directories in the workspace
|
||||
let mut pending_dirs = VecDeque::new();
|
||||
pending_dirs.push_back(
|
||||
cli_options.workspace().root_dir().to_file_path().unwrap(),
|
||||
self
|
||||
.cli_options
|
||||
.workspace()
|
||||
.root_dir()
|
||||
.to_file_path()
|
||||
.unwrap(),
|
||||
);
|
||||
while let Some(pending_dir) = pending_dirs.pop_front() {
|
||||
let mut entries = fs::read_dir(&pending_dir)
|
||||
|
|
523
cli/standalone/code_cache.rs
Normal file
523
cli/standalone/code_cache.rs
Normal file
|
@ -0,0 +1,523 @@
|
|||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::HashMap;
|
||||
use std::io::BufReader;
|
||||
use std::io::BufWriter;
|
||||
use std::io::Read;
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use deno_ast::ModuleSpecifier;
|
||||
use deno_core::anyhow::bail;
|
||||
use deno_core::error::AnyError;
|
||||
use deno_core::parking_lot::Mutex;
|
||||
use deno_core::unsync::sync::AtomicFlag;
|
||||
use deno_runtime::code_cache::CodeCache;
|
||||
use deno_runtime::code_cache::CodeCacheType;
|
||||
|
||||
use crate::cache::FastInsecureHasher;
|
||||
use crate::util::path::get_atomic_file_path;
|
||||
use crate::worker::CliCodeCache;
|
||||
|
||||
enum CodeCacheStrategy {
|
||||
FirstRun(FirstRunCodeCacheStrategy),
|
||||
SubsequentRun(SubsequentRunCodeCacheStrategy),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct DenoCompileCodeCacheEntry {
|
||||
pub source_hash: u64,
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
pub struct DenoCompileCodeCache {
|
||||
strategy: CodeCacheStrategy,
|
||||
}
|
||||
|
||||
impl DenoCompileCodeCache {
|
||||
pub fn new(file_path: PathBuf, cache_key: u64) -> Self {
|
||||
// attempt to deserialize the cache data
|
||||
match deserialize(&file_path, cache_key) {
|
||||
Ok(data) => {
|
||||
log::debug!(
|
||||
"Loaded {} code cache entries from {}",
|
||||
data.len(),
|
||||
file_path.display()
|
||||
);
|
||||
Self {
|
||||
strategy: CodeCacheStrategy::SubsequentRun(
|
||||
SubsequentRunCodeCacheStrategy {
|
||||
is_finished: AtomicFlag::lowered(),
|
||||
data: Mutex::new(data),
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
log::debug!(
|
||||
"Failed to deserialize code cache from {}: {:#}",
|
||||
file_path.display(),
|
||||
err
|
||||
);
|
||||
Self {
|
||||
strategy: CodeCacheStrategy::FirstRun(FirstRunCodeCacheStrategy {
|
||||
cache_key,
|
||||
file_path,
|
||||
is_finished: AtomicFlag::lowered(),
|
||||
data: Mutex::new(FirstRunCodeCacheData {
|
||||
cache: HashMap::new(),
|
||||
add_count: 0,
|
||||
}),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CodeCache for DenoCompileCodeCache {
|
||||
fn get_sync(
|
||||
&self,
|
||||
specifier: &ModuleSpecifier,
|
||||
code_cache_type: CodeCacheType,
|
||||
source_hash: u64,
|
||||
) -> Option<Vec<u8>> {
|
||||
match &self.strategy {
|
||||
CodeCacheStrategy::FirstRun(strategy) => {
|
||||
if !strategy.is_finished.is_raised() {
|
||||
// we keep track of how many times the cache is requested
|
||||
// then serialize the cache when we get that number of
|
||||
// "set" calls
|
||||
strategy.data.lock().add_count += 1;
|
||||
}
|
||||
None
|
||||
}
|
||||
CodeCacheStrategy::SubsequentRun(strategy) => {
|
||||
if strategy.is_finished.is_raised() {
|
||||
return None;
|
||||
}
|
||||
strategy.take_from_cache(specifier, code_cache_type, source_hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn set_sync(
|
||||
&self,
|
||||
specifier: ModuleSpecifier,
|
||||
code_cache_type: CodeCacheType,
|
||||
source_hash: u64,
|
||||
bytes: &[u8],
|
||||
) {
|
||||
match &self.strategy {
|
||||
CodeCacheStrategy::FirstRun(strategy) => {
|
||||
if strategy.is_finished.is_raised() {
|
||||
return;
|
||||
}
|
||||
|
||||
let data_to_serialize = {
|
||||
let mut data = strategy.data.lock();
|
||||
data.cache.insert(
|
||||
(specifier.to_string(), code_cache_type),
|
||||
DenoCompileCodeCacheEntry {
|
||||
source_hash,
|
||||
data: bytes.to_vec(),
|
||||
},
|
||||
);
|
||||
if data.add_count != 0 {
|
||||
data.add_count -= 1;
|
||||
}
|
||||
if data.add_count == 0 {
|
||||
// don't allow using the cache anymore
|
||||
strategy.is_finished.raise();
|
||||
if data.cache.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(std::mem::take(&mut data.cache))
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
if let Some(cache_data) = &data_to_serialize {
|
||||
strategy.write_cache_data(cache_data);
|
||||
}
|
||||
}
|
||||
CodeCacheStrategy::SubsequentRun(_) => {
|
||||
// do nothing
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CliCodeCache for DenoCompileCodeCache {
|
||||
fn enabled(&self) -> bool {
|
||||
match &self.strategy {
|
||||
CodeCacheStrategy::FirstRun(strategy) => {
|
||||
!strategy.is_finished.is_raised()
|
||||
}
|
||||
CodeCacheStrategy::SubsequentRun(strategy) => {
|
||||
!strategy.is_finished.is_raised()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn as_code_cache(self: Arc<Self>) -> Arc<dyn CodeCache> {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
type CodeCacheKey = (String, CodeCacheType);
|
||||
|
||||
struct FirstRunCodeCacheData {
|
||||
cache: HashMap<CodeCacheKey, DenoCompileCodeCacheEntry>,
|
||||
add_count: usize,
|
||||
}
|
||||
|
||||
struct FirstRunCodeCacheStrategy {
|
||||
cache_key: u64,
|
||||
file_path: PathBuf,
|
||||
is_finished: AtomicFlag,
|
||||
data: Mutex<FirstRunCodeCacheData>,
|
||||
}
|
||||
|
||||
impl FirstRunCodeCacheStrategy {
|
||||
fn write_cache_data(
|
||||
&self,
|
||||
cache_data: &HashMap<CodeCacheKey, DenoCompileCodeCacheEntry>,
|
||||
) {
|
||||
let count = cache_data.len();
|
||||
let temp_file = get_atomic_file_path(&self.file_path);
|
||||
match serialize(&temp_file, self.cache_key, cache_data) {
|
||||
Ok(()) => {
|
||||
if let Err(err) = std::fs::rename(&temp_file, &self.file_path) {
|
||||
log::debug!("Failed to rename code cache: {}", err);
|
||||
let _ = std::fs::remove_file(&temp_file);
|
||||
} else {
|
||||
log::debug!("Serialized {} code cache entries", count);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
let _ = std::fs::remove_file(&temp_file);
|
||||
log::debug!("Failed to serialize code cache: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct SubsequentRunCodeCacheStrategy {
|
||||
is_finished: AtomicFlag,
|
||||
data: Mutex<HashMap<CodeCacheKey, DenoCompileCodeCacheEntry>>,
|
||||
}
|
||||
|
||||
impl SubsequentRunCodeCacheStrategy {
|
||||
fn take_from_cache(
|
||||
&self,
|
||||
specifier: &ModuleSpecifier,
|
||||
code_cache_type: CodeCacheType,
|
||||
source_hash: u64,
|
||||
) -> Option<Vec<u8>> {
|
||||
let mut data = self.data.lock();
|
||||
// todo(dsherret): how to avoid the clone here?
|
||||
let entry = data.remove(&(specifier.to_string(), code_cache_type))?;
|
||||
if entry.source_hash != source_hash {
|
||||
return None;
|
||||
}
|
||||
if data.is_empty() {
|
||||
self.is_finished.raise();
|
||||
}
|
||||
Some(entry.data)
|
||||
}
|
||||
}
|
||||
|
||||
/// File format:
|
||||
/// - <header>
|
||||
/// - <cache key>
|
||||
/// - <u32: number of entries>
|
||||
/// - <[entry length]> - u64 * number of entries
|
||||
/// - <[entry]>
|
||||
/// - <[u8]: entry data>
|
||||
/// - <String: specifier>
|
||||
/// - <u8>: code cache type
|
||||
/// - <u32: specifier length>
|
||||
/// - <u64: source hash>
|
||||
/// - <u64: entry data hash>
|
||||
fn serialize(
|
||||
file_path: &Path,
|
||||
cache_key: u64,
|
||||
cache: &HashMap<CodeCacheKey, DenoCompileCodeCacheEntry>,
|
||||
) -> Result<(), AnyError> {
|
||||
let cache_file = std::fs::OpenOptions::new()
|
||||
.create(true)
|
||||
.truncate(true)
|
||||
.write(true)
|
||||
.open(file_path)?;
|
||||
let mut writer = BufWriter::new(cache_file);
|
||||
serialize_with_writer(&mut writer, cache_key, cache)
|
||||
}
|
||||
|
||||
fn serialize_with_writer<T: Write>(
|
||||
writer: &mut BufWriter<T>,
|
||||
cache_key: u64,
|
||||
cache: &HashMap<CodeCacheKey, DenoCompileCodeCacheEntry>,
|
||||
) -> Result<(), AnyError> {
|
||||
// header
|
||||
writer.write_all(&cache_key.to_le_bytes())?;
|
||||
writer.write_all(&(cache.len() as u32).to_le_bytes())?;
|
||||
// lengths of each entry
|
||||
for ((specifier, _), entry) in cache {
|
||||
let len: u64 =
|
||||
entry.data.len() as u64 + specifier.len() as u64 + 1 + 4 + 8 + 8;
|
||||
writer.write_all(&len.to_le_bytes())?;
|
||||
}
|
||||
// entries
|
||||
for ((specifier, code_cache_type), entry) in cache {
|
||||
writer.write_all(&entry.data)?;
|
||||
writer.write_all(&[match code_cache_type {
|
||||
CodeCacheType::EsModule => 0,
|
||||
CodeCacheType::Script => 1,
|
||||
}])?;
|
||||
writer.write_all(specifier.as_bytes())?;
|
||||
writer.write_all(&(specifier.len() as u32).to_le_bytes())?;
|
||||
writer.write_all(&entry.source_hash.to_le_bytes())?;
|
||||
let hash: u64 = FastInsecureHasher::new_without_deno_version()
|
||||
.write(&entry.data)
|
||||
.finish();
|
||||
writer.write_all(&hash.to_le_bytes())?;
|
||||
}
|
||||
|
||||
writer.flush()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize(
|
||||
file_path: &Path,
|
||||
expected_cache_key: u64,
|
||||
) -> Result<HashMap<CodeCacheKey, DenoCompileCodeCacheEntry>, AnyError> {
|
||||
let cache_file = std::fs::File::open(file_path)?;
|
||||
let mut reader = BufReader::new(cache_file);
|
||||
deserialize_with_reader(&mut reader, expected_cache_key)
|
||||
}
|
||||
|
||||
fn deserialize_with_reader<T: Read>(
|
||||
reader: &mut BufReader<T>,
|
||||
expected_cache_key: u64,
|
||||
) -> Result<HashMap<CodeCacheKey, DenoCompileCodeCacheEntry>, AnyError> {
|
||||
// it's very important to use this below so that a corrupt cache file
|
||||
// doesn't cause a memory allocation error
|
||||
fn new_vec_sized<T: Clone>(
|
||||
capacity: usize,
|
||||
default_value: T,
|
||||
) -> Result<Vec<T>, AnyError> {
|
||||
let mut vec = Vec::new();
|
||||
vec.try_reserve(capacity)?;
|
||||
vec.resize(capacity, default_value);
|
||||
Ok(vec)
|
||||
}
|
||||
|
||||
fn try_subtract(a: usize, b: usize) -> Result<usize, AnyError> {
|
||||
if a < b {
|
||||
bail!("Integer underflow");
|
||||
}
|
||||
Ok(a - b)
|
||||
}
|
||||
|
||||
let mut header_bytes = vec![0; 8 + 4];
|
||||
reader.read_exact(&mut header_bytes)?;
|
||||
let actual_cache_key = u64::from_le_bytes(header_bytes[..8].try_into()?);
|
||||
if actual_cache_key != expected_cache_key {
|
||||
// cache bust
|
||||
bail!("Cache key mismatch");
|
||||
}
|
||||
let len = u32::from_le_bytes(header_bytes[8..].try_into()?) as usize;
|
||||
// read the lengths for each entry found in the file
|
||||
let entry_len_bytes_capacity = len * 8;
|
||||
let mut entry_len_bytes = new_vec_sized(entry_len_bytes_capacity, 0)?;
|
||||
reader.read_exact(&mut entry_len_bytes)?;
|
||||
let mut lengths = Vec::new();
|
||||
lengths.try_reserve(len)?;
|
||||
for i in 0..len {
|
||||
let pos = i * 8;
|
||||
lengths.push(
|
||||
u64::from_le_bytes(entry_len_bytes[pos..pos + 8].try_into()?) as usize,
|
||||
);
|
||||
}
|
||||
|
||||
let mut map = HashMap::new();
|
||||
map.try_reserve(len)?;
|
||||
for len in lengths {
|
||||
let mut buffer = new_vec_sized(len, 0)?;
|
||||
reader.read_exact(&mut buffer)?;
|
||||
let entry_data_hash_start_pos = try_subtract(buffer.len(), 8)?;
|
||||
let expected_entry_data_hash =
|
||||
u64::from_le_bytes(buffer[entry_data_hash_start_pos..].try_into()?);
|
||||
let source_hash_start_pos = try_subtract(entry_data_hash_start_pos, 8)?;
|
||||
let source_hash = u64::from_le_bytes(
|
||||
buffer[source_hash_start_pos..entry_data_hash_start_pos].try_into()?,
|
||||
);
|
||||
let specifier_end_pos = try_subtract(source_hash_start_pos, 4)?;
|
||||
let specifier_len = u32::from_le_bytes(
|
||||
buffer[specifier_end_pos..source_hash_start_pos].try_into()?,
|
||||
) as usize;
|
||||
let specifier_start_pos = try_subtract(specifier_end_pos, specifier_len)?;
|
||||
let specifier = String::from_utf8(
|
||||
buffer[specifier_start_pos..specifier_end_pos].to_vec(),
|
||||
)?;
|
||||
let code_cache_type_pos = try_subtract(specifier_start_pos, 1)?;
|
||||
let code_cache_type = match buffer[code_cache_type_pos] {
|
||||
0 => CodeCacheType::EsModule,
|
||||
1 => CodeCacheType::Script,
|
||||
_ => bail!("Invalid code cache type"),
|
||||
};
|
||||
buffer.truncate(code_cache_type_pos);
|
||||
let actual_entry_data_hash: u64 =
|
||||
FastInsecureHasher::new_without_deno_version()
|
||||
.write(&buffer)
|
||||
.finish();
|
||||
if expected_entry_data_hash != actual_entry_data_hash {
|
||||
bail!("Hash mismatch.")
|
||||
}
|
||||
map.insert(
|
||||
(specifier, code_cache_type),
|
||||
DenoCompileCodeCacheEntry {
|
||||
source_hash,
|
||||
data: buffer,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
Ok(map)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use test_util::TempDir;
|
||||
|
||||
use super::*;
|
||||
use std::fs::File;
|
||||
|
||||
#[test]
|
||||
fn serialize_deserialize() {
|
||||
let cache_key = 123456;
|
||||
let cache = {
|
||||
let mut cache = HashMap::new();
|
||||
cache.insert(
|
||||
("specifier1".to_string(), CodeCacheType::EsModule),
|
||||
DenoCompileCodeCacheEntry {
|
||||
source_hash: 1,
|
||||
data: vec![1, 2, 3],
|
||||
},
|
||||
);
|
||||
cache.insert(
|
||||
("specifier2".to_string(), CodeCacheType::EsModule),
|
||||
DenoCompileCodeCacheEntry {
|
||||
source_hash: 2,
|
||||
data: vec![4, 5, 6],
|
||||
},
|
||||
);
|
||||
cache.insert(
|
||||
("specifier2".to_string(), CodeCacheType::Script),
|
||||
DenoCompileCodeCacheEntry {
|
||||
source_hash: 2,
|
||||
data: vec![6, 5, 1],
|
||||
},
|
||||
);
|
||||
cache
|
||||
};
|
||||
let mut buffer = Vec::new();
|
||||
serialize_with_writer(&mut BufWriter::new(&mut buffer), cache_key, &cache)
|
||||
.unwrap();
|
||||
let deserialized =
|
||||
deserialize_with_reader(&mut BufReader::new(&buffer[..]), cache_key)
|
||||
.unwrap();
|
||||
assert_eq!(cache, deserialized);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_deserialize_empty() {
|
||||
let cache_key = 1234;
|
||||
let cache = HashMap::new();
|
||||
let mut buffer = Vec::new();
|
||||
serialize_with_writer(&mut BufWriter::new(&mut buffer), cache_key, &cache)
|
||||
.unwrap();
|
||||
let deserialized =
|
||||
deserialize_with_reader(&mut BufReader::new(&buffer[..]), cache_key)
|
||||
.unwrap();
|
||||
assert_eq!(cache, deserialized);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_deserialize_corrupt() {
|
||||
let buffer = "corrupttestingtestingtesting".as_bytes().to_vec();
|
||||
let err = deserialize_with_reader(&mut BufReader::new(&buffer[..]), 1234)
|
||||
.unwrap_err();
|
||||
assert_eq!(err.to_string(), "Cache key mismatch");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn code_cache() {
|
||||
let temp_dir = TempDir::new();
|
||||
let file_path = temp_dir.path().join("cache.bin").to_path_buf();
|
||||
let url1 = ModuleSpecifier::parse("https://deno.land/example1.js").unwrap();
|
||||
let url2 = ModuleSpecifier::parse("https://deno.land/example2.js").unwrap();
|
||||
// first run
|
||||
{
|
||||
let code_cache = DenoCompileCodeCache::new(file_path.clone(), 1234);
|
||||
assert!(code_cache
|
||||
.get_sync(&url1, CodeCacheType::EsModule, 0)
|
||||
.is_none());
|
||||
assert!(code_cache
|
||||
.get_sync(&url2, CodeCacheType::EsModule, 1)
|
||||
.is_none());
|
||||
assert!(code_cache.enabled());
|
||||
code_cache.set_sync(url1.clone(), CodeCacheType::EsModule, 0, &[1, 2, 3]);
|
||||
assert!(code_cache.enabled());
|
||||
assert!(!file_path.exists());
|
||||
code_cache.set_sync(url2.clone(), CodeCacheType::EsModule, 1, &[2, 1, 3]);
|
||||
assert!(file_path.exists()); // now the new code cache exists
|
||||
assert!(!code_cache.enabled()); // no longer enabled
|
||||
}
|
||||
// second run
|
||||
{
|
||||
let code_cache = DenoCompileCodeCache::new(file_path.clone(), 1234);
|
||||
assert!(code_cache.enabled());
|
||||
let result1 = code_cache
|
||||
.get_sync(&url1, CodeCacheType::EsModule, 0)
|
||||
.unwrap();
|
||||
assert!(code_cache.enabled());
|
||||
let result2 = code_cache
|
||||
.get_sync(&url2, CodeCacheType::EsModule, 1)
|
||||
.unwrap();
|
||||
assert!(!code_cache.enabled()); // no longer enabled
|
||||
assert_eq!(result1, vec![1, 2, 3]);
|
||||
assert_eq!(result2, vec![2, 1, 3]);
|
||||
}
|
||||
|
||||
// new cache key first run
|
||||
{
|
||||
let code_cache = DenoCompileCodeCache::new(file_path.clone(), 54321);
|
||||
assert!(code_cache
|
||||
.get_sync(&url1, CodeCacheType::EsModule, 0)
|
||||
.is_none());
|
||||
assert!(code_cache
|
||||
.get_sync(&url2, CodeCacheType::EsModule, 1)
|
||||
.is_none());
|
||||
code_cache.set_sync(url1.clone(), CodeCacheType::EsModule, 0, &[2, 2, 3]);
|
||||
code_cache.set_sync(url2.clone(), CodeCacheType::EsModule, 1, &[3, 2, 3]);
|
||||
}
|
||||
// new cache key second run
|
||||
{
|
||||
let code_cache = DenoCompileCodeCache::new(file_path.clone(), 54321);
|
||||
let result1 = code_cache
|
||||
.get_sync(&url1, CodeCacheType::EsModule, 0)
|
||||
.unwrap();
|
||||
assert_eq!(result1, vec![2, 2, 3]);
|
||||
assert!(code_cache
|
||||
.get_sync(&url2, CodeCacheType::EsModule, 5) // different hash will cause none
|
||||
.is_none());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
use binary::StandaloneData;
|
||||
use binary::StandaloneModules;
|
||||
use code_cache::DenoCompileCodeCache;
|
||||
use deno_ast::MediaType;
|
||||
use deno_cache_dir::npm::NpmCacheDir;
|
||||
use deno_config::workspace::MappedResolution;
|
||||
|
@ -17,6 +18,7 @@ use deno_core::anyhow::Context;
|
|||
use deno_core::error::generic_error;
|
||||
use deno_core::error::type_error;
|
||||
use deno_core::error::AnyError;
|
||||
use deno_core::futures::future::LocalBoxFuture;
|
||||
use deno_core::futures::FutureExt;
|
||||
use deno_core::v8_set_flags;
|
||||
use deno_core::FastString;
|
||||
|
@ -27,6 +29,7 @@ use deno_core::ModuleSpecifier;
|
|||
use deno_core::ModuleType;
|
||||
use deno_core::RequestedModuleType;
|
||||
use deno_core::ResolutionKind;
|
||||
use deno_core::SourceCodeCacheInfo;
|
||||
use deno_npm::npm_rc::ResolvedNpmRc;
|
||||
use deno_package_json::PackageJsonDepValue;
|
||||
use deno_resolver::npm::NpmReqResolverOptions;
|
||||
|
@ -64,6 +67,7 @@ use crate::args::StorageKeyResolver;
|
|||
use crate::cache::Caches;
|
||||
use crate::cache::DenoCacheEnvFsAdapter;
|
||||
use crate::cache::DenoDirProvider;
|
||||
use crate::cache::FastInsecureHasher;
|
||||
use crate::cache::NodeAnalysisCache;
|
||||
use crate::cache::RealDenoCacheEnv;
|
||||
use crate::http_util::HttpClientProvider;
|
||||
|
@ -86,12 +90,14 @@ use crate::resolver::NpmModuleLoader;
|
|||
use crate::util::progress_bar::ProgressBar;
|
||||
use crate::util::progress_bar::ProgressBarStyle;
|
||||
use crate::util::v8::construct_v8_flags;
|
||||
use crate::worker::CliCodeCache;
|
||||
use crate::worker::CliMainWorkerFactory;
|
||||
use crate::worker::CliMainWorkerOptions;
|
||||
use crate::worker::CreateModuleLoaderResult;
|
||||
use crate::worker::ModuleLoaderFactory;
|
||||
|
||||
pub mod binary;
|
||||
mod code_cache;
|
||||
mod file_system;
|
||||
mod serialization;
|
||||
mod virtual_fs;
|
||||
|
@ -113,6 +119,35 @@ struct SharedModuleLoaderState {
|
|||
npm_req_resolver: Arc<CliNpmReqResolver>,
|
||||
npm_resolver: Arc<dyn CliNpmResolver>,
|
||||
workspace_resolver: WorkspaceResolver,
|
||||
code_cache: Option<Arc<dyn CliCodeCache>>,
|
||||
}
|
||||
|
||||
impl SharedModuleLoaderState {
|
||||
fn get_code_cache(
|
||||
&self,
|
||||
specifier: &ModuleSpecifier,
|
||||
source: &[u8],
|
||||
) -> Option<SourceCodeCacheInfo> {
|
||||
let Some(code_cache) = &self.code_cache else {
|
||||
return None;
|
||||
};
|
||||
if !code_cache.enabled() {
|
||||
return None;
|
||||
}
|
||||
// deno version is already included in the root cache key
|
||||
let hash = FastInsecureHasher::new_without_deno_version()
|
||||
.write_hashable(source)
|
||||
.finish();
|
||||
let data = code_cache.get_sync(
|
||||
specifier,
|
||||
deno_runtime::code_cache::CodeCacheType::EsModule,
|
||||
hash,
|
||||
);
|
||||
Some(SourceCodeCacheInfo {
|
||||
hash,
|
||||
data: data.map(Cow::Owned),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
|
@ -329,14 +364,19 @@ impl ModuleLoader for EmbeddedModuleLoader {
|
|||
}
|
||||
|
||||
if self.shared.node_resolver.in_npm_package(original_specifier) {
|
||||
let npm_module_loader = self.shared.npm_module_loader.clone();
|
||||
let shared = self.shared.clone();
|
||||
let original_specifier = original_specifier.clone();
|
||||
let maybe_referrer = maybe_referrer.cloned();
|
||||
return deno_core::ModuleLoadResponse::Async(
|
||||
async move {
|
||||
let code_source = npm_module_loader
|
||||
let code_source = shared
|
||||
.npm_module_loader
|
||||
.load(&original_specifier, maybe_referrer.as_ref())
|
||||
.await?;
|
||||
let code_cache_entry = shared.get_code_cache(
|
||||
&code_source.found_url,
|
||||
code_source.code.as_bytes(),
|
||||
);
|
||||
Ok(deno_core::ModuleSource::new_with_redirect(
|
||||
match code_source.media_type {
|
||||
MediaType::Json => ModuleType::Json,
|
||||
|
@ -345,7 +385,7 @@ impl ModuleLoader for EmbeddedModuleLoader {
|
|||
code_source.code,
|
||||
&original_specifier,
|
||||
&code_source.found_url,
|
||||
None,
|
||||
code_cache_entry,
|
||||
))
|
||||
}
|
||||
.boxed_local(),
|
||||
|
@ -398,25 +438,30 @@ impl ModuleLoader for EmbeddedModuleLoader {
|
|||
ModuleSourceCode::String(FastString::from_static(source))
|
||||
}
|
||||
};
|
||||
let code_cache_entry = shared
|
||||
.get_code_cache(&module_specifier, module_source.as_bytes());
|
||||
Ok(deno_core::ModuleSource::new_with_redirect(
|
||||
module_type,
|
||||
module_source,
|
||||
&original_specifier,
|
||||
&module_specifier,
|
||||
None,
|
||||
code_cache_entry,
|
||||
))
|
||||
}
|
||||
.boxed_local(),
|
||||
)
|
||||
} else {
|
||||
let module_source = module_source.into_for_v8();
|
||||
let code_cache_entry = self
|
||||
.shared
|
||||
.get_code_cache(module_specifier, module_source.as_bytes());
|
||||
deno_core::ModuleLoadResponse::Sync(Ok(
|
||||
deno_core::ModuleSource::new_with_redirect(
|
||||
module_type,
|
||||
module_source,
|
||||
original_specifier,
|
||||
module_specifier,
|
||||
None,
|
||||
code_cache_entry,
|
||||
),
|
||||
))
|
||||
}
|
||||
|
@ -429,6 +474,23 @@ impl ModuleLoader for EmbeddedModuleLoader {
|
|||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn code_cache_ready(
|
||||
&self,
|
||||
specifier: ModuleSpecifier,
|
||||
source_hash: u64,
|
||||
code_cache_data: &[u8],
|
||||
) -> LocalBoxFuture<'static, ()> {
|
||||
if let Some(code_cache) = &self.shared.code_cache {
|
||||
code_cache.set_sync(
|
||||
specifier,
|
||||
deno_runtime::code_cache::CodeCacheType::EsModule,
|
||||
source_hash,
|
||||
code_cache_data,
|
||||
);
|
||||
}
|
||||
std::future::ready(()).boxed_local()
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeRequireLoader for EmbeddedModuleLoader {
|
||||
|
@ -739,6 +801,19 @@ pub async fn run(data: StandaloneData) -> Result<i32, AnyError> {
|
|||
metadata.workspace_resolver.pkg_json_resolution,
|
||||
)
|
||||
};
|
||||
let code_cache = match metadata.code_cache_key {
|
||||
Some(code_cache_key) => Some(Arc::new(DenoCompileCodeCache::new(
|
||||
root_path.with_file_name(format!(
|
||||
"{}.cache",
|
||||
root_path.file_name().unwrap().to_string_lossy()
|
||||
)),
|
||||
code_cache_key,
|
||||
)) as Arc<dyn CliCodeCache>),
|
||||
None => {
|
||||
log::debug!("Code cache disabled.");
|
||||
None
|
||||
}
|
||||
};
|
||||
let module_loader_factory = StandaloneModuleLoaderFactory {
|
||||
shared: Arc::new(SharedModuleLoaderState {
|
||||
cjs_tracker: cjs_tracker.clone(),
|
||||
|
@ -751,6 +826,7 @@ pub async fn run(data: StandaloneData) -> Result<i32, AnyError> {
|
|||
fs.clone(),
|
||||
node_code_translator,
|
||||
)),
|
||||
code_cache: code_cache.clone(),
|
||||
npm_resolver: npm_resolver.clone(),
|
||||
workspace_resolver,
|
||||
npm_req_resolver,
|
||||
|
@ -792,8 +868,7 @@ pub async fn run(data: StandaloneData) -> Result<i32, AnyError> {
|
|||
});
|
||||
let worker_factory = CliMainWorkerFactory::new(
|
||||
Arc::new(BlobStore::default()),
|
||||
// Code cache is not supported for standalone binary yet.
|
||||
None,
|
||||
code_cache,
|
||||
feature_checker,
|
||||
fs,
|
||||
None,
|
||||
|
|
|
@ -51,7 +51,8 @@ pub struct VfsBuilder {
|
|||
|
||||
impl VfsBuilder {
|
||||
pub fn new(root_path: PathBuf) -> Result<Self, AnyError> {
|
||||
let root_path = canonicalize_path(&root_path)?;
|
||||
let root_path = canonicalize_path(&root_path)
|
||||
.with_context(|| format!("Canonicalizing {}", root_path.display()))?;
|
||||
log::debug!("Building vfs with root '{}'", root_path.display());
|
||||
Ok(Self {
|
||||
root_dir: VirtualDirectory {
|
||||
|
@ -633,7 +634,7 @@ impl FileBackedVfsFile {
|
|||
}
|
||||
|
||||
fn read_to_buf(&self, buf: &mut [u8]) -> FsResult<usize> {
|
||||
let pos = {
|
||||
let read_pos = {
|
||||
let mut pos = self.pos.lock();
|
||||
let read_pos = *pos;
|
||||
// advance the position due to the read
|
||||
|
@ -642,12 +643,12 @@ impl FileBackedVfsFile {
|
|||
};
|
||||
self
|
||||
.vfs
|
||||
.read_file(&self.file, pos, buf)
|
||||
.read_file(&self.file, read_pos, buf)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn read_to_end(&self) -> FsResult<Vec<u8>> {
|
||||
let pos = {
|
||||
let read_pos = {
|
||||
let mut pos = self.pos.lock();
|
||||
let read_pos = *pos;
|
||||
// todo(dsherret): should this always set it to the end of the file?
|
||||
|
@ -657,12 +658,12 @@ impl FileBackedVfsFile {
|
|||
}
|
||||
read_pos
|
||||
};
|
||||
if pos > self.file.len {
|
||||
if read_pos > self.file.len {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
let size = (self.file.len - pos) as usize;
|
||||
let size = (self.file.len - read_pos) as usize;
|
||||
let mut buf = vec![0; size];
|
||||
self.vfs.read_file(&self.file, pos, &mut buf)?;
|
||||
self.vfs.read_file(&self.file, read_pos, &mut buf)?;
|
||||
Ok(buf)
|
||||
}
|
||||
}
|
||||
|
@ -892,8 +893,9 @@ impl FileBackedVfs {
|
|||
buf: &mut [u8],
|
||||
) -> std::io::Result<usize> {
|
||||
let read_range = self.get_read_range(file, pos, buf.len() as u64)?;
|
||||
buf.copy_from_slice(&self.vfs_data[read_range]);
|
||||
Ok(buf.len())
|
||||
let read_len = read_range.len();
|
||||
buf[..read_len].copy_from_slice(&self.vfs_data[read_range]);
|
||||
Ok(read_len)
|
||||
}
|
||||
|
||||
fn get_read_range(
|
||||
|
@ -902,15 +904,15 @@ impl FileBackedVfs {
|
|||
pos: u64,
|
||||
len: u64,
|
||||
) -> std::io::Result<Range<usize>> {
|
||||
let data = &self.vfs_data;
|
||||
let start = self.fs_root.start_file_offset + file.offset + pos;
|
||||
let end = start + len;
|
||||
if end > data.len() as u64 {
|
||||
if pos > file.len {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::UnexpectedEof,
|
||||
"unexpected EOF",
|
||||
));
|
||||
}
|
||||
let file_offset = self.fs_root.start_file_offset + file.offset;
|
||||
let start = file_offset + pos;
|
||||
let end = file_offset + std::cmp::min(pos + len, file.len);
|
||||
Ok(start as usize..end as usize)
|
||||
}
|
||||
|
||||
|
|
|
@ -483,20 +483,32 @@ fn resolve_execution_path_from_npx_shim(
|
|||
static SCRIPT_PATH_RE: Lazy<Regex> =
|
||||
lazy_regex::lazy_regex!(r#""\$basedir\/([^"]+)" "\$@""#);
|
||||
|
||||
if text.starts_with("#!/usr/bin/env node") {
|
||||
// launch this file itself because it's a JS file
|
||||
Some(file_path)
|
||||
} else {
|
||||
// Search for...
|
||||
// > "$basedir/../next/dist/bin/next" "$@"
|
||||
// ...which is what it will look like on Windows
|
||||
SCRIPT_PATH_RE
|
||||
.captures(text)
|
||||
.and_then(|c| c.get(1))
|
||||
.map(|relative_path| {
|
||||
file_path.parent().unwrap().join(relative_path.as_str())
|
||||
})
|
||||
let maybe_first_line = {
|
||||
let index = text.find("\n")?;
|
||||
Some(&text[0..index])
|
||||
};
|
||||
|
||||
if let Some(first_line) = maybe_first_line {
|
||||
// NOTE(bartlomieju): this is not perfect, but handle two most common scenarios
|
||||
// where Node is run without any args. If there are args then we use `NodeCommand`
|
||||
// struct.
|
||||
if first_line == "#!/usr/bin/env node"
|
||||
|| first_line == "#!/usr/bin/env -S node"
|
||||
{
|
||||
// launch this file itself because it's a JS file
|
||||
return Some(file_path);
|
||||
}
|
||||
}
|
||||
|
||||
// Search for...
|
||||
// > "$basedir/../next/dist/bin/next" "$@"
|
||||
// ...which is what it will look like on Windows
|
||||
SCRIPT_PATH_RE
|
||||
.captures(text)
|
||||
.and_then(|c| c.get(1))
|
||||
.map(|relative_path| {
|
||||
file_path.parent().unwrap().join(relative_path.as_str())
|
||||
})
|
||||
}
|
||||
|
||||
fn resolve_managed_npm_commands(
|
||||
|
@ -564,6 +576,16 @@ mod test {
|
|||
let unix_shim = r#"#!/usr/bin/env node
|
||||
"use strict";
|
||||
console.log('Hi!');
|
||||
"#;
|
||||
let path = PathBuf::from("/node_modules/.bin/example");
|
||||
assert_eq!(
|
||||
resolve_execution_path_from_npx_shim(path.clone(), unix_shim).unwrap(),
|
||||
path
|
||||
);
|
||||
// example shim on unix
|
||||
let unix_shim = r#"#!/usr/bin/env -S node
|
||||
"use strict";
|
||||
console.log('Hi!');
|
||||
"#;
|
||||
let path = PathBuf::from("/node_modules/.bin/example");
|
||||
assert_eq!(
|
||||
|
|
|
@ -486,6 +486,7 @@ pub async fn run_benchmarks_with_watch(
|
|||
),
|
||||
move |flags, watcher_communicator, changed_paths| {
|
||||
let bench_flags = bench_flags.clone();
|
||||
watcher_communicator.show_path_changed(changed_paths.clone());
|
||||
Ok(async move {
|
||||
let factory = CliFactory::from_flags_for_watcher(
|
||||
flags,
|
||||
|
|
|
@ -380,6 +380,11 @@ fn get_check_hash(
|
|||
hasher.write_str(module.specifier.as_str());
|
||||
hasher.write_str(&module.source);
|
||||
}
|
||||
Module::Wasm(module) => {
|
||||
has_file_to_type_check = true;
|
||||
hasher.write_str(module.specifier.as_str());
|
||||
hasher.write_str(&module.source_dts);
|
||||
}
|
||||
Module::External(module) => {
|
||||
hasher.write_str(module.specifier.as_str());
|
||||
}
|
||||
|
@ -437,6 +442,7 @@ fn get_tsc_roots(
|
|||
| MediaType::SourceMap
|
||||
| MediaType::Unknown => None,
|
||||
},
|
||||
Module::Wasm(module) => Some((module.specifier.clone(), MediaType::Dmts)),
|
||||
Module::External(_)
|
||||
| Module::Node(_)
|
||||
| Module::Npm(_)
|
||||
|
|
|
@ -7,6 +7,7 @@ use crate::factory::CliFactory;
|
|||
use crate::http_util::HttpClientProvider;
|
||||
use crate::standalone::binary::StandaloneRelativeFileBaseUrl;
|
||||
use crate::standalone::is_standalone_binary;
|
||||
use deno_ast::MediaType;
|
||||
use deno_ast::ModuleSpecifier;
|
||||
use deno_core::anyhow::bail;
|
||||
use deno_core::anyhow::Context;
|
||||
|
@ -31,15 +32,12 @@ pub async fn compile(
|
|||
let module_graph_creator = factory.module_graph_creator().await?;
|
||||
let binary_writer = factory.create_compile_binary_writer().await?;
|
||||
let http_client = factory.http_client_provider();
|
||||
let module_specifier = cli_options.resolve_main_module()?;
|
||||
let module_roots = {
|
||||
let mut vec = Vec::with_capacity(compile_flags.include.len() + 1);
|
||||
vec.push(module_specifier.clone());
|
||||
for side_module in &compile_flags.include {
|
||||
vec.push(resolve_url_or_path(side_module, cli_options.initial_cwd())?);
|
||||
}
|
||||
vec
|
||||
};
|
||||
let entrypoint = cli_options.resolve_main_module()?;
|
||||
let (module_roots, include_files) = get_module_roots_and_include_files(
|
||||
entrypoint,
|
||||
&compile_flags,
|
||||
cli_options.initial_cwd(),
|
||||
)?;
|
||||
|
||||
// this is not supported, so show a warning about it, but don't error in order
|
||||
// to allow someone to still run `deno compile` when this is in a deno.json
|
||||
|
@ -82,18 +80,22 @@ pub async fn compile(
|
|||
check_warn_tsconfig(&ts_config_for_emit);
|
||||
let root_dir_url = resolve_root_dir_from_specifiers(
|
||||
cli_options.workspace().root_dir(),
|
||||
graph.specifiers().map(|(s, _)| s).chain(
|
||||
cli_options
|
||||
.node_modules_dir_path()
|
||||
.and_then(|p| ModuleSpecifier::from_directory_path(p).ok())
|
||||
.iter(),
|
||||
),
|
||||
graph
|
||||
.specifiers()
|
||||
.map(|(s, _)| s)
|
||||
.chain(
|
||||
cli_options
|
||||
.node_modules_dir_path()
|
||||
.and_then(|p| ModuleSpecifier::from_directory_path(p).ok())
|
||||
.iter(),
|
||||
)
|
||||
.chain(include_files.iter()),
|
||||
);
|
||||
log::debug!("Binary root dir: {}", root_dir_url);
|
||||
log::info!(
|
||||
"{} {} to {}",
|
||||
colors::green("Compile"),
|
||||
module_specifier.to_string(),
|
||||
entrypoint,
|
||||
output_path.display(),
|
||||
);
|
||||
validate_output_path(&output_path)?;
|
||||
|
@ -118,9 +120,9 @@ pub async fn compile(
|
|||
file,
|
||||
&graph,
|
||||
StandaloneRelativeFileBaseUrl::from(&root_dir_url),
|
||||
module_specifier,
|
||||
entrypoint,
|
||||
&include_files,
|
||||
&compile_flags,
|
||||
cli_options,
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
|
@ -212,6 +214,48 @@ fn validate_output_path(output_path: &Path) -> Result<(), AnyError> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn get_module_roots_and_include_files(
|
||||
entrypoint: &ModuleSpecifier,
|
||||
compile_flags: &CompileFlags,
|
||||
initial_cwd: &Path,
|
||||
) -> Result<(Vec<ModuleSpecifier>, Vec<ModuleSpecifier>), AnyError> {
|
||||
fn is_module_graph_module(url: &ModuleSpecifier) -> bool {
|
||||
if url.scheme() != "file" {
|
||||
return true;
|
||||
}
|
||||
let media_type = MediaType::from_specifier(url);
|
||||
match media_type {
|
||||
MediaType::JavaScript
|
||||
| MediaType::Jsx
|
||||
| MediaType::Mjs
|
||||
| MediaType::Cjs
|
||||
| MediaType::TypeScript
|
||||
| MediaType::Mts
|
||||
| MediaType::Cts
|
||||
| MediaType::Dts
|
||||
| MediaType::Dmts
|
||||
| MediaType::Dcts
|
||||
| MediaType::Tsx
|
||||
| MediaType::Json
|
||||
| MediaType::Wasm => true,
|
||||
MediaType::Css | MediaType::SourceMap | MediaType::Unknown => false,
|
||||
}
|
||||
}
|
||||
|
||||
let mut module_roots = Vec::with_capacity(compile_flags.include.len() + 1);
|
||||
let mut include_files = Vec::with_capacity(compile_flags.include.len());
|
||||
module_roots.push(entrypoint.clone());
|
||||
for side_module in &compile_flags.include {
|
||||
let url = resolve_url_or_path(side_module, initial_cwd)?;
|
||||
if is_module_graph_module(&url) {
|
||||
module_roots.push(url);
|
||||
} else {
|
||||
include_files.push(url);
|
||||
}
|
||||
}
|
||||
Ok((module_roots, include_files))
|
||||
}
|
||||
|
||||
async fn resolve_compile_executable_output_path(
|
||||
http_client_provider: &HttpClientProvider,
|
||||
compile_flags: &CompileFlags,
|
||||
|
|
200
cli/tools/doc.rs
200
cli/tools/doc.rs
|
@ -21,6 +21,8 @@ use deno_core::error::AnyError;
|
|||
use deno_core::serde_json;
|
||||
use deno_doc as doc;
|
||||
use deno_doc::html::UrlResolveKind;
|
||||
use deno_doc::html::UsageComposer;
|
||||
use deno_doc::html::UsageComposerEntry;
|
||||
use deno_graph::source::NullFileSystem;
|
||||
use deno_graph::EsParser;
|
||||
use deno_graph::GraphKind;
|
||||
|
@ -35,6 +37,9 @@ use std::sync::Arc;
|
|||
|
||||
const JSON_SCHEMA_VERSION: u8 = 1;
|
||||
|
||||
const PRISM_CSS: &str = include_str!("./doc/prism.css");
|
||||
const PRISM_JS: &str = include_str!("./doc/prism.js");
|
||||
|
||||
async fn generate_doc_nodes_for_builtin_types(
|
||||
doc_flags: DocFlags,
|
||||
parser: &dyn EsParser,
|
||||
|
@ -312,10 +317,6 @@ impl deno_doc::html::HrefResolver for DocResolver {
|
|||
None
|
||||
}
|
||||
|
||||
fn resolve_usage(&self, current_resolve: UrlResolveKind) -> Option<String> {
|
||||
current_resolve.get_file().map(|file| file.path.to_string())
|
||||
}
|
||||
|
||||
fn resolve_source(&self, location: &deno_doc::Location) -> Option<String> {
|
||||
Some(location.filename.to_string())
|
||||
}
|
||||
|
@ -350,105 +351,30 @@ impl deno_doc::html::HrefResolver for DocResolver {
|
|||
}
|
||||
}
|
||||
|
||||
struct DenoDocResolver(bool);
|
||||
struct DocComposer;
|
||||
|
||||
impl deno_doc::html::HrefResolver for DenoDocResolver {
|
||||
fn resolve_path(
|
||||
impl UsageComposer for DocComposer {
|
||||
fn is_single_mode(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn compose(
|
||||
&self,
|
||||
current: UrlResolveKind,
|
||||
target: UrlResolveKind,
|
||||
) -> String {
|
||||
let path = deno_doc::html::href_path_resolve(current, target);
|
||||
if self.0 {
|
||||
if let Some(path) = path
|
||||
.strip_suffix("index.html")
|
||||
.or_else(|| path.strip_suffix(".html"))
|
||||
{
|
||||
return path.to_owned();
|
||||
}
|
||||
}
|
||||
|
||||
path
|
||||
}
|
||||
|
||||
fn resolve_global_symbol(&self, _symbol: &[String]) -> Option<String> {
|
||||
None
|
||||
}
|
||||
|
||||
fn resolve_import_href(
|
||||
&self,
|
||||
_symbol: &[String],
|
||||
_src: &str,
|
||||
) -> Option<String> {
|
||||
None
|
||||
}
|
||||
|
||||
fn resolve_usage(&self, _current_resolve: UrlResolveKind) -> Option<String> {
|
||||
None
|
||||
}
|
||||
|
||||
fn resolve_source(&self, _location: &deno_doc::Location) -> Option<String> {
|
||||
None
|
||||
}
|
||||
|
||||
fn resolve_external_jsdoc_module(
|
||||
&self,
|
||||
_module: &str,
|
||||
_symbol: Option<&str>,
|
||||
) -> Option<(String, String)> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
struct NodeDocResolver(bool);
|
||||
|
||||
impl deno_doc::html::HrefResolver for NodeDocResolver {
|
||||
fn resolve_path(
|
||||
&self,
|
||||
current: UrlResolveKind,
|
||||
target: UrlResolveKind,
|
||||
) -> String {
|
||||
let path = deno_doc::html::href_path_resolve(current, target);
|
||||
if self.0 {
|
||||
if let Some(path) = path
|
||||
.strip_suffix("index.html")
|
||||
.or_else(|| path.strip_suffix(".html"))
|
||||
{
|
||||
return path.to_owned();
|
||||
}
|
||||
}
|
||||
|
||||
path
|
||||
}
|
||||
|
||||
fn resolve_global_symbol(&self, _symbol: &[String]) -> Option<String> {
|
||||
None
|
||||
}
|
||||
|
||||
fn resolve_import_href(
|
||||
&self,
|
||||
_symbol: &[String],
|
||||
_src: &str,
|
||||
) -> Option<String> {
|
||||
None
|
||||
}
|
||||
|
||||
fn resolve_usage(&self, current_resolve: UrlResolveKind) -> Option<String> {
|
||||
current_resolve: UrlResolveKind,
|
||||
usage_to_md: deno_doc::html::UsageToMd,
|
||||
) -> IndexMap<UsageComposerEntry, String> {
|
||||
current_resolve
|
||||
.get_file()
|
||||
.map(|file| format!("node:{}", file.path))
|
||||
}
|
||||
|
||||
fn resolve_source(&self, _location: &deno_doc::Location) -> Option<String> {
|
||||
None
|
||||
}
|
||||
|
||||
fn resolve_external_jsdoc_module(
|
||||
&self,
|
||||
_module: &str,
|
||||
_symbol: Option<&str>,
|
||||
) -> Option<(String, String)> {
|
||||
None
|
||||
.map(|current_file| {
|
||||
IndexMap::from([(
|
||||
UsageComposerEntry {
|
||||
name: "".to_string(),
|
||||
icon: None,
|
||||
},
|
||||
usage_to_md(current_file.path.as_str(), None),
|
||||
)])
|
||||
})
|
||||
.unwrap_or_default()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -461,30 +387,10 @@ fn generate_docs_directory(
|
|||
let cwd = std::env::current_dir().context("Failed to get CWD")?;
|
||||
let output_dir_resolved = cwd.join(&html_options.output);
|
||||
|
||||
let internal_env = std::env::var("DENO_INTERNAL_HTML_DOCS").ok();
|
||||
|
||||
let href_resolver: Rc<dyn deno_doc::html::HrefResolver> = if internal_env
|
||||
.as_ref()
|
||||
.is_some_and(|internal_html_docs| internal_html_docs == "node")
|
||||
{
|
||||
Rc::new(NodeDocResolver(html_options.strip_trailing_html))
|
||||
} else if internal_env
|
||||
.as_ref()
|
||||
.is_some_and(|internal_html_docs| internal_html_docs == "deno")
|
||||
|| deno_ns.is_empty()
|
||||
{
|
||||
Rc::new(DenoDocResolver(html_options.strip_trailing_html))
|
||||
} else {
|
||||
Rc::new(DocResolver {
|
||||
deno_ns,
|
||||
strip_trailing_html: html_options.strip_trailing_html,
|
||||
})
|
||||
};
|
||||
|
||||
let category_docs =
|
||||
if let Some(category_docs_path) = &html_options.category_docs_path {
|
||||
let content = std::fs::read(category_docs_path)?;
|
||||
Some(deno_core::serde_json::from_slice(&content)?)
|
||||
Some(serde_json::from_slice(&content)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
@ -493,7 +399,7 @@ fn generate_docs_directory(
|
|||
&html_options.symbol_redirect_map_path
|
||||
{
|
||||
let content = std::fs::read(symbol_redirect_map_path)?;
|
||||
Some(deno_core::serde_json::from_slice(&content)?)
|
||||
Some(serde_json::from_slice(&content)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
@ -502,7 +408,7 @@ fn generate_docs_directory(
|
|||
&html_options.default_symbol_map_path
|
||||
{
|
||||
let content = std::fs::read(default_symbol_map_path)?;
|
||||
Some(deno_core::serde_json::from_slice(&content)?)
|
||||
Some(serde_json::from_slice(&content)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
@ -511,17 +417,59 @@ fn generate_docs_directory(
|
|||
package_name: html_options.name.clone(),
|
||||
main_entrypoint: None,
|
||||
rewrite_map,
|
||||
href_resolver,
|
||||
usage_composer: None,
|
||||
href_resolver: Rc::new(DocResolver {
|
||||
deno_ns,
|
||||
strip_trailing_html: html_options.strip_trailing_html,
|
||||
}),
|
||||
usage_composer: Rc::new(DocComposer),
|
||||
category_docs,
|
||||
disable_search: internal_env.is_some(),
|
||||
disable_search: false,
|
||||
symbol_redirect_map,
|
||||
default_symbol_map,
|
||||
markdown_renderer: deno_doc::html::comrak::create_renderer(
|
||||
None,
|
||||
Some(Box::new(|ammonia| {
|
||||
ammonia.add_allowed_classes(
|
||||
"code",
|
||||
&[
|
||||
"language-ts",
|
||||
"language-tsx",
|
||||
"language-typescript",
|
||||
"language-js",
|
||||
"language-jsx",
|
||||
"language-javascript",
|
||||
"language-bash",
|
||||
"language-shell",
|
||||
"language-md",
|
||||
"language-markdown",
|
||||
"language-rs",
|
||||
"language-rust",
|
||||
"language-html",
|
||||
"language-xml",
|
||||
"language-css",
|
||||
"language-json",
|
||||
"language-regex",
|
||||
"language-svg",
|
||||
],
|
||||
);
|
||||
})),
|
||||
None,
|
||||
),
|
||||
markdown_stripper: Rc::new(deno_doc::html::comrak::strip),
|
||||
head_inject: Some(Rc::new(|root| {
|
||||
format!(
|
||||
r#"<link href="{root}{}" rel="stylesheet" /><link href="{root}prism.css" rel="stylesheet" /><script src="{root}prism.js"></script>"#,
|
||||
deno_doc::html::comrak::COMRAK_STYLESHEET_FILENAME
|
||||
)
|
||||
})),
|
||||
};
|
||||
|
||||
let files = deno_doc::html::generate(options, doc_nodes_by_url)
|
||||
let mut files = deno_doc::html::generate(options, doc_nodes_by_url)
|
||||
.context("Failed to generate HTML documentation")?;
|
||||
|
||||
files.insert("prism.js".to_string(), PRISM_JS.to_string());
|
||||
files.insert("prism.css".to_string(), PRISM_CSS.to_string());
|
||||
|
||||
let path = &output_dir_resolved;
|
||||
let _ = std::fs::remove_dir_all(path);
|
||||
std::fs::create_dir(path)
|
||||
|
|
3
cli/tools/doc/prism.css
Normal file
3
cli/tools/doc/prism.css
Normal file
|
@ -0,0 +1,3 @@
|
|||
/* PrismJS 1.29.0
|
||||
https://prismjs.com/download.html#themes=prism&languages=markup+css+clike+javascript+bash+json+markdown+regex+rust+typescript */
|
||||
code[class*=language-],pre[class*=language-]{color:#000;background:0 0;text-shadow:0 1px #fff;font-family:Consolas,Monaco,'Andale Mono','Ubuntu Mono',monospace;font-size:1em;text-align:left;white-space:pre;word-spacing:normal;word-break:normal;word-wrap:normal;line-height:1.5;-moz-tab-size:4;-o-tab-size:4;tab-size:4;-webkit-hyphens:none;-moz-hyphens:none;-ms-hyphens:none;hyphens:none}code[class*=language-] ::-moz-selection,code[class*=language-]::-moz-selection,pre[class*=language-] ::-moz-selection,pre[class*=language-]::-moz-selection{text-shadow:none;background:#b3d4fc}code[class*=language-] ::selection,code[class*=language-]::selection,pre[class*=language-] ::selection,pre[class*=language-]::selection{text-shadow:none;background:#b3d4fc}@media print{code[class*=language-],pre[class*=language-]{text-shadow:none}}pre[class*=language-]{overflow:auto}:not(pre)>code[class*=language-],pre[class*=language-]{background:#f5f2f0}:not(pre)>code[class*=language-]{padding:.1em;border-radius:.3em;white-space:normal}.token.cdata,.token.comment,.token.doctype,.token.prolog{color:#708090}.token.punctuation{color:#999}.token.namespace{opacity:.7}.token.boolean,.token.constant,.token.deleted,.token.number,.token.property,.token.symbol,.token.tag{color:#905}.token.attr-name,.token.builtin,.token.char,.token.inserted,.token.selector,.token.string{color:#690}.language-css .token.string,.style .token.string,.token.entity,.token.operator,.token.url{color:#9a6e3a;background:hsla(0,0%,100%,.5)}.token.atrule,.token.attr-value,.token.keyword{color:#07a}.token.class-name,.token.function{color:#dd4a68}.token.important,.token.regex,.token.variable{color:#e90}.token.bold,.token.important{font-weight:700}.token.italic{font-style:italic}.token.entity{cursor:help}
|
15
cli/tools/doc/prism.js
Normal file
15
cli/tools/doc/prism.js
Normal file
File diff suppressed because one or more lines are too long
|
@ -83,6 +83,7 @@ pub async fn format(
|
|||
file_watcher::PrintConfig::new("Fmt", !watch_flags.no_clear_screen),
|
||||
move |flags, watcher_communicator, changed_paths| {
|
||||
let fmt_flags = fmt_flags.clone();
|
||||
watcher_communicator.show_path_changed(changed_paths.clone());
|
||||
Ok(async move {
|
||||
let factory = CliFactory::from_flags(flags);
|
||||
let cli_options = factory.cli_options()?;
|
||||
|
@ -227,6 +228,7 @@ fn collect_fmt_files(
|
|||
})
|
||||
.ignore_git_folder()
|
||||
.ignore_node_modules()
|
||||
.use_gitignore()
|
||||
.set_vendor_folder(cli_options.vendor_dir_path().map(ToOwned::to_owned))
|
||||
.collect_file_patterns(&deno_config::fs::RealDenoConfigFs, files)
|
||||
}
|
||||
|
@ -270,6 +272,7 @@ fn format_markdown(
|
|||
| "njk"
|
||||
| "yml"
|
||||
| "yaml"
|
||||
| "sql"
|
||||
) {
|
||||
// It's important to tell dprint proper file extension, otherwise
|
||||
// it might parse the file twice.
|
||||
|
@ -299,6 +302,13 @@ fn format_markdown(
|
|||
}
|
||||
}
|
||||
"yml" | "yaml" => format_yaml(text, fmt_options),
|
||||
"sql" => {
|
||||
if unstable_options.sql {
|
||||
format_sql(text, fmt_options)
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
let mut codeblock_config =
|
||||
get_resolved_typescript_config(fmt_options);
|
||||
|
@ -501,7 +511,52 @@ pub fn format_html(
|
|||
})
|
||||
}
|
||||
|
||||
/// Formats a single TS, TSX, JS, JSX, JSONC, JSON, MD, or IPYNB file.
|
||||
pub fn format_sql(
|
||||
file_text: &str,
|
||||
fmt_options: &FmtOptionsConfig,
|
||||
) -> Result<Option<String>, AnyError> {
|
||||
let ignore_file = file_text
|
||||
.lines()
|
||||
.take_while(|line| line.starts_with("--"))
|
||||
.any(|line| {
|
||||
line
|
||||
.strip_prefix("--")
|
||||
.unwrap()
|
||||
.trim()
|
||||
.starts_with("deno-fmt-ignore-file")
|
||||
});
|
||||
|
||||
if ignore_file {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let mut formatted_str = sqlformat::format(
|
||||
file_text,
|
||||
&sqlformat::QueryParams::None,
|
||||
&sqlformat::FormatOptions {
|
||||
ignore_case_convert: None,
|
||||
indent: if fmt_options.use_tabs.unwrap_or_default() {
|
||||
sqlformat::Indent::Tabs
|
||||
} else {
|
||||
sqlformat::Indent::Spaces(fmt_options.indent_width.unwrap_or(2))
|
||||
},
|
||||
// leave one blank line between queries.
|
||||
lines_between_queries: 2,
|
||||
uppercase: Some(true),
|
||||
},
|
||||
);
|
||||
|
||||
// Add single new line to the end of file.
|
||||
formatted_str.push('\n');
|
||||
|
||||
Ok(if formatted_str == file_text {
|
||||
None
|
||||
} else {
|
||||
Some(formatted_str)
|
||||
})
|
||||
}
|
||||
|
||||
/// Formats a single TS, TSX, JS, JSX, JSONC, JSON, MD, IPYNB or SQL file.
|
||||
pub fn format_file(
|
||||
file_path: &Path,
|
||||
file_text: &str,
|
||||
|
@ -536,6 +591,13 @@ pub fn format_file(
|
|||
format_file(file_path, &file_text, fmt_options, unstable_options, None)
|
||||
},
|
||||
),
|
||||
"sql" => {
|
||||
if unstable_options.sql {
|
||||
format_sql(file_text, fmt_options)
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
let config = get_resolved_typescript_config(fmt_options);
|
||||
dprint_plugin_typescript::format_text(
|
||||
|
@ -1207,6 +1269,7 @@ fn is_supported_ext_fmt(path: &Path) -> bool {
|
|||
| "yml"
|
||||
| "yaml"
|
||||
| "ipynb"
|
||||
| "sql"
|
||||
)
|
||||
})
|
||||
}
|
||||
|
@ -1267,6 +1330,11 @@ mod test {
|
|||
assert!(is_supported_ext_fmt(Path::new("foo.yaml")));
|
||||
assert!(is_supported_ext_fmt(Path::new("foo.YaML")));
|
||||
assert!(is_supported_ext_fmt(Path::new("foo.ipynb")));
|
||||
assert!(is_supported_ext_fmt(Path::new("foo.sql")));
|
||||
assert!(is_supported_ext_fmt(Path::new("foo.Sql")));
|
||||
assert!(is_supported_ext_fmt(Path::new("foo.sQl")));
|
||||
assert!(is_supported_ext_fmt(Path::new("foo.sqL")));
|
||||
assert!(is_supported_ext_fmt(Path::new("foo.SQL")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
|
@ -126,6 +126,7 @@ fn print_cache_info(
|
|||
let registry_cache = dir.registries_folder_path();
|
||||
let mut origin_dir = dir.origin_data_folder_path();
|
||||
let deno_dir = dir.root_path_for_display().to_string();
|
||||
let web_cache_dir = crate::worker::get_cache_storage_dir();
|
||||
|
||||
if let Some(location) = &location {
|
||||
origin_dir =
|
||||
|
@ -143,6 +144,7 @@ fn print_cache_info(
|
|||
"typescriptCache": typescript_cache,
|
||||
"registryCache": registry_cache,
|
||||
"originStorage": origin_dir,
|
||||
"webCacheStorage": web_cache_dir,
|
||||
});
|
||||
|
||||
if location.is_some() {
|
||||
|
@ -177,6 +179,11 @@ fn print_cache_info(
|
|||
colors::bold("Origin storage:"),
|
||||
origin_dir.display()
|
||||
);
|
||||
println!(
|
||||
"{} {}",
|
||||
colors::bold("Web cache storage:"),
|
||||
web_cache_dir.display()
|
||||
);
|
||||
if location.is_some() {
|
||||
println!(
|
||||
"{} {}",
|
||||
|
@ -446,6 +453,7 @@ impl<'a> GraphDisplayContext<'a> {
|
|||
let maybe_cache_info = match root {
|
||||
Module::Js(module) => module.maybe_cache_info.as_ref(),
|
||||
Module::Json(module) => module.maybe_cache_info.as_ref(),
|
||||
Module::Wasm(module) => module.maybe_cache_info.as_ref(),
|
||||
Module::Node(_) | Module::Npm(_) | Module::External(_) => None,
|
||||
};
|
||||
if let Some(cache_info) = maybe_cache_info {
|
||||
|
@ -468,6 +476,7 @@ impl<'a> GraphDisplayContext<'a> {
|
|||
let size = match m {
|
||||
Module::Js(module) => module.size(),
|
||||
Module::Json(module) => module.size(),
|
||||
Module::Wasm(module) => module.size(),
|
||||
Module::Node(_) | Module::Npm(_) | Module::External(_) => 0,
|
||||
};
|
||||
size as f64
|
||||
|
@ -567,6 +576,7 @@ impl<'a> GraphDisplayContext<'a> {
|
|||
Specifier(_) => match module {
|
||||
Module::Js(module) => Some(module.size() as u64),
|
||||
Module::Json(module) => Some(module.size() as u64),
|
||||
Module::Wasm(module) => Some(module.size() as u64),
|
||||
Module::Node(_) | Module::Npm(_) | Module::External(_) => None,
|
||||
},
|
||||
};
|
||||
|
@ -580,8 +590,8 @@ impl<'a> GraphDisplayContext<'a> {
|
|||
Package(package) => {
|
||||
tree_node.children.extend(self.build_npm_deps(package));
|
||||
}
|
||||
Specifier(_) => {
|
||||
if let Some(module) = module.js() {
|
||||
Specifier(_) => match module {
|
||||
Module::Js(module) => {
|
||||
if let Some(types_dep) = &module.maybe_types_dependency {
|
||||
if let Some(child) =
|
||||
self.build_resolved_info(&types_dep.dependency, true)
|
||||
|
@ -593,7 +603,16 @@ impl<'a> GraphDisplayContext<'a> {
|
|||
tree_node.children.extend(self.build_dep_info(dep));
|
||||
}
|
||||
}
|
||||
}
|
||||
Module::Wasm(module) => {
|
||||
for dep in module.dependencies.values() {
|
||||
tree_node.children.extend(self.build_dep_info(dep));
|
||||
}
|
||||
}
|
||||
Module::Json(_)
|
||||
| Module::Npm(_)
|
||||
| Module::Node(_)
|
||||
| Module::External(_) => {}
|
||||
},
|
||||
}
|
||||
}
|
||||
tree_node
|
||||
|
@ -658,7 +677,7 @@ impl<'a> GraphDisplayContext<'a> {
|
|||
};
|
||||
self.build_error_msg(specifier, message.as_ref())
|
||||
}
|
||||
ModuleError::ParseErr(_, _) => {
|
||||
ModuleError::ParseErr(_, _) | ModuleError::WasmParseErr(_, _) => {
|
||||
self.build_error_msg(specifier, "(parsing error)")
|
||||
}
|
||||
ModuleError::UnsupportedImportAttributeType { .. } => {
|
||||
|
|
|
@ -1,15 +1,28 @@
|
|||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use crate::args::DenoSubcommand;
|
||||
use crate::args::Flags;
|
||||
use crate::args::InitFlags;
|
||||
use crate::args::PackagesAllowedScripts;
|
||||
use crate::args::PermissionFlags;
|
||||
use crate::args::RunFlags;
|
||||
use crate::colors;
|
||||
use color_print::cformat;
|
||||
use color_print::cstr;
|
||||
use deno_core::anyhow::Context;
|
||||
use deno_core::error::AnyError;
|
||||
use deno_core::serde_json::json;
|
||||
use deno_runtime::WorkerExecutionMode;
|
||||
use log::info;
|
||||
use std::io::IsTerminal;
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
|
||||
pub fn init_project(init_flags: InitFlags) -> Result<(), AnyError> {
|
||||
pub async fn init_project(init_flags: InitFlags) -> Result<i32, AnyError> {
|
||||
if let Some(package) = &init_flags.package {
|
||||
return init_npm(package, init_flags.package_args).await;
|
||||
}
|
||||
|
||||
let cwd =
|
||||
std::env::current_dir().context("Can't read current working directory.")?;
|
||||
let dir = if let Some(dir) = &init_flags.dir {
|
||||
|
@ -235,7 +248,58 @@ Deno.test(function addTest() {
|
|||
info!(" {}", colors::gray("# Run the tests"));
|
||||
info!(" deno test");
|
||||
}
|
||||
Ok(())
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
async fn init_npm(name: &str, args: Vec<String>) -> Result<i32, AnyError> {
|
||||
let script_name = format!("npm:create-{}", name);
|
||||
|
||||
fn print_manual_usage(script_name: &str, args: &[String]) -> i32 {
|
||||
log::info!("{}", cformat!("You can initialize project manually by running <u>deno run {} {}</> and applying desired permissions.", script_name, args.join(" ")));
|
||||
1
|
||||
}
|
||||
|
||||
if std::io::stdin().is_terminal() {
|
||||
log::info!(
|
||||
cstr!("⚠️ Do you fully trust <y>{}</> package? Deno will invoke code from it with all permissions. Do you want to continue? <p(245)>[y/n]</>"),
|
||||
script_name
|
||||
);
|
||||
loop {
|
||||
let _ = std::io::stdout().write(b"> ")?;
|
||||
std::io::stdout().flush()?;
|
||||
let mut answer = String::new();
|
||||
if std::io::stdin().read_line(&mut answer).is_ok() {
|
||||
let answer = answer.trim().to_ascii_lowercase();
|
||||
if answer != "y" {
|
||||
return Ok(print_manual_usage(&script_name, &args));
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return Ok(print_manual_usage(&script_name, &args));
|
||||
}
|
||||
|
||||
let new_flags = Flags {
|
||||
permissions: PermissionFlags {
|
||||
allow_all: true,
|
||||
..Default::default()
|
||||
},
|
||||
allow_scripts: PackagesAllowedScripts::All,
|
||||
argv: args,
|
||||
subcommand: DenoSubcommand::Run(RunFlags {
|
||||
script: script_name,
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
};
|
||||
crate::tools::run::run_script(
|
||||
WorkerExecutionMode::Run,
|
||||
new_flags.into(),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
fn create_json_file(
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
use crate::args::resolve_no_prompt;
|
||||
use crate::args::AddFlags;
|
||||
use crate::args::CaData;
|
||||
use crate::args::CacheSetting;
|
||||
use crate::args::ConfigFlag;
|
||||
use crate::args::Flags;
|
||||
use crate::args::InstallFlags;
|
||||
|
@ -13,8 +14,11 @@ use crate::args::TypeCheckMode;
|
|||
use crate::args::UninstallFlags;
|
||||
use crate::args::UninstallKind;
|
||||
use crate::factory::CliFactory;
|
||||
use crate::file_fetcher::FileFetcher;
|
||||
use crate::graph_container::ModuleGraphContainer;
|
||||
use crate::http_util::HttpClientProvider;
|
||||
use crate::jsr::JsrFetchResolver;
|
||||
use crate::npm::NpmFetchResolver;
|
||||
use crate::util::fs::canonicalize_path_maybe_not_exists;
|
||||
|
||||
use deno_core::anyhow::bail;
|
||||
|
@ -359,12 +363,54 @@ async fn install_global(
|
|||
) -> Result<(), AnyError> {
|
||||
// ensure the module is cached
|
||||
let factory = CliFactory::from_flags(flags.clone());
|
||||
|
||||
let cli_options = factory.cli_options()?;
|
||||
let http_client = factory.http_client_provider();
|
||||
let deps_http_cache = factory.global_http_cache()?;
|
||||
let mut deps_file_fetcher = FileFetcher::new(
|
||||
deps_http_cache.clone(),
|
||||
CacheSetting::ReloadAll,
|
||||
true,
|
||||
http_client.clone(),
|
||||
Default::default(),
|
||||
None,
|
||||
);
|
||||
|
||||
let npmrc = factory.cli_options().unwrap().npmrc();
|
||||
|
||||
deps_file_fetcher.set_download_log_level(log::Level::Trace);
|
||||
let deps_file_fetcher = Arc::new(deps_file_fetcher);
|
||||
let jsr_resolver = Arc::new(JsrFetchResolver::new(deps_file_fetcher.clone()));
|
||||
let npm_resolver = Arc::new(NpmFetchResolver::new(
|
||||
deps_file_fetcher.clone(),
|
||||
npmrc.clone(),
|
||||
));
|
||||
|
||||
let entry_text = install_flags_global.module_url.as_str();
|
||||
if !cli_options.initial_cwd().join(entry_text).exists() {
|
||||
// check for package requirement missing prefix
|
||||
if let Ok(Err(package_req)) =
|
||||
super::registry::AddRmPackageReq::parse(entry_text)
|
||||
{
|
||||
if jsr_resolver.req_to_nv(&package_req).await.is_some() {
|
||||
bail!(
|
||||
"{entry_text} is missing a prefix. Did you mean `{}`?",
|
||||
crate::colors::yellow(format!("deno install -g jsr:{package_req}"))
|
||||
);
|
||||
} else if npm_resolver.req_to_nv(&package_req).await.is_some() {
|
||||
bail!(
|
||||
"{entry_text} is missing a prefix. Did you mean `{}`?",
|
||||
crate::colors::yellow(format!("deno install -g npm:{package_req}"))
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
factory
|
||||
.main_module_graph_container()
|
||||
.await?
|
||||
.load_and_type_check_files(&[install_flags_global.module_url.clone()])
|
||||
.await?;
|
||||
let http_client = factory.http_client_provider();
|
||||
|
||||
// create the install shim
|
||||
create_install_shim(http_client, &flags, install_flags_global).await
|
||||
|
|
|
@ -80,6 +80,7 @@ pub async fn lint(
|
|||
file_watcher::PrintConfig::new("Lint", !watch_flags.no_clear_screen),
|
||||
move |flags, watcher_communicator, changed_paths| {
|
||||
let lint_flags = lint_flags.clone();
|
||||
watcher_communicator.show_path_changed(changed_paths.clone());
|
||||
Ok(async move {
|
||||
let factory = CliFactory::from_flags(flags);
|
||||
let cli_options = factory.cli_options()?;
|
||||
|
@ -435,6 +436,7 @@ fn collect_lint_files(
|
|||
})
|
||||
.ignore_git_folder()
|
||||
.ignore_node_modules()
|
||||
.use_gitignore()
|
||||
.set_vendor_folder(cli_options.vendor_dir_path().map(ToOwned::to_owned))
|
||||
.collect_file_patterns(&deno_config::fs::RealDenoConfigFs, files)
|
||||
}
|
||||
|
|
|
@ -175,6 +175,7 @@ struct JsonLintReporter {
|
|||
version: u8,
|
||||
diagnostics: Vec<JsonLintDiagnostic>,
|
||||
errors: Vec<LintError>,
|
||||
checked_files: Vec<String>,
|
||||
}
|
||||
|
||||
impl JsonLintReporter {
|
||||
|
@ -183,6 +184,7 @@ impl JsonLintReporter {
|
|||
version: JSON_SCHEMA_VERSION,
|
||||
diagnostics: Vec::new(),
|
||||
errors: Vec::new(),
|
||||
checked_files: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -209,6 +211,17 @@ impl LintReporter for JsonLintReporter {
|
|||
code: d.code().to_string(),
|
||||
hint: d.hint().map(|h| h.to_string()),
|
||||
});
|
||||
|
||||
let file_path = d
|
||||
.specifier
|
||||
.to_file_path()
|
||||
.unwrap()
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
|
||||
if !self.checked_files.contains(&file_path) {
|
||||
self.checked_files.push(file_path);
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_error(&mut self, file_path: &str, err: &AnyError) {
|
||||
|
@ -216,10 +229,15 @@ impl LintReporter for JsonLintReporter {
|
|||
file_path: file_path.to_string(),
|
||||
message: err.to_string(),
|
||||
});
|
||||
|
||||
if !self.checked_files.contains(&file_path.to_string()) {
|
||||
self.checked_files.push(file_path.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
fn close(&mut self, _check_count: usize) {
|
||||
sort_diagnostics(&mut self.diagnostics);
|
||||
self.checked_files.sort();
|
||||
let json = serde_json::to_string_pretty(&self);
|
||||
#[allow(clippy::print_stdout)]
|
||||
{
|
||||
|
|
|
@ -12,6 +12,7 @@ use std::sync::Arc;
|
|||
use base64::prelude::BASE64_STANDARD;
|
||||
use base64::Engine;
|
||||
use deno_ast::ModuleSpecifier;
|
||||
use deno_config::deno_json::ConfigFile;
|
||||
use deno_config::workspace::JsrPackageConfig;
|
||||
use deno_config::workspace::PackageJsonDepResolution;
|
||||
use deno_config::workspace::Workspace;
|
||||
|
@ -67,8 +68,10 @@ use auth::get_auth_method;
|
|||
use auth::AuthMethod;
|
||||
pub use pm::add;
|
||||
pub use pm::cache_top_level_deps;
|
||||
pub use pm::outdated;
|
||||
pub use pm::remove;
|
||||
pub use pm::AddCommandName;
|
||||
pub use pm::AddRmPackageReq;
|
||||
use publish_order::PublishOrderGraph;
|
||||
use unfurl::SpecifierUnfurler;
|
||||
|
||||
|
@ -89,13 +92,14 @@ pub async fn publish(
|
|||
|
||||
let cli_options = cli_factory.cli_options()?;
|
||||
let directory_path = cli_options.initial_cwd();
|
||||
let publish_configs = cli_options.start_dir.jsr_packages_for_publish();
|
||||
let mut publish_configs = cli_options.start_dir.jsr_packages_for_publish();
|
||||
if publish_configs.is_empty() {
|
||||
match cli_options.start_dir.maybe_deno_json() {
|
||||
Some(deno_json) => {
|
||||
debug_assert!(!deno_json.is_package());
|
||||
error_missing_exports_field(deno_json)?;
|
||||
bail!(
|
||||
"Missing 'name', 'version' and 'exports' field in '{}'.",
|
||||
"Missing 'name' or 'exports' field in '{}'.",
|
||||
deno_json.specifier
|
||||
);
|
||||
}
|
||||
|
@ -107,6 +111,18 @@ pub async fn publish(
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(version) = &publish_flags.set_version {
|
||||
if publish_configs.len() > 1 {
|
||||
bail!("Cannot use --set-version when publishing a workspace. Change your cwd to an individual package instead.");
|
||||
}
|
||||
if let Some(publish_config) = publish_configs.get_mut(0) {
|
||||
let mut config_file = publish_config.config_file.as_ref().clone();
|
||||
config_file.json.version = Some(version.clone());
|
||||
publish_config.config_file = Arc::new(config_file);
|
||||
}
|
||||
}
|
||||
|
||||
let specifier_unfurler = Arc::new(SpecifierUnfurler::new(
|
||||
if cli_options.unstable_sloppy_imports() {
|
||||
Some(CliSloppyImportsResolver::new(SloppyImportsCachedFs::new(
|
||||
|
@ -403,43 +419,15 @@ impl PublishPreparer {
|
|||
graph: Arc<deno_graph::ModuleGraph>,
|
||||
diagnostics_collector: &PublishDiagnosticsCollector,
|
||||
) -> Result<Rc<PreparedPublishPackage>, AnyError> {
|
||||
static SUGGESTED_ENTRYPOINTS: [&str; 4] =
|
||||
["mod.ts", "mod.js", "index.ts", "index.js"];
|
||||
|
||||
let deno_json = &package.config_file;
|
||||
let config_path = deno_json.specifier.to_file_path().unwrap();
|
||||
let root_dir = config_path.parent().unwrap().to_path_buf();
|
||||
let Some(version) = deno_json.json.version.clone() else {
|
||||
bail!("{} is missing 'version' field", deno_json.specifier);
|
||||
};
|
||||
if deno_json.json.exports.is_none() {
|
||||
let mut suggested_entrypoint = None;
|
||||
|
||||
for entrypoint in SUGGESTED_ENTRYPOINTS {
|
||||
if root_dir.join(entrypoint).exists() {
|
||||
suggested_entrypoint = Some(entrypoint);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let exports_content = format!(
|
||||
r#"{{
|
||||
"name": "{}",
|
||||
"version": "{}",
|
||||
"exports": "{}"
|
||||
}}"#,
|
||||
package.name,
|
||||
version,
|
||||
suggested_entrypoint.unwrap_or("<path_to_entrypoint>")
|
||||
);
|
||||
|
||||
bail!(
|
||||
"You did not specify an entrypoint to \"{}\" package in {}. Add `exports` mapping in the configuration file, eg:\n{}",
|
||||
package.name,
|
||||
deno_json.specifier,
|
||||
exports_content
|
||||
);
|
||||
}
|
||||
let version = deno_json.json.version.clone().ok_or_else(|| {
|
||||
deno_core::anyhow::anyhow!(
|
||||
"{} is missing 'version' field",
|
||||
deno_json.specifier
|
||||
)
|
||||
})?;
|
||||
let Some(name_no_at) = package.name.strip_prefix('@') else {
|
||||
bail!("Invalid package name, use '@<scope_name>/<package_name> format");
|
||||
};
|
||||
|
@ -1106,9 +1094,9 @@ fn collect_excluded_module_diagnostics(
|
|||
let graph_specifiers = graph
|
||||
.modules()
|
||||
.filter_map(|m| match m {
|
||||
deno_graph::Module::Js(_) | deno_graph::Module::Json(_) => {
|
||||
Some(m.specifier())
|
||||
}
|
||||
deno_graph::Module::Js(_)
|
||||
| deno_graph::Module::Json(_)
|
||||
| deno_graph::Module::Wasm(_) => Some(m.specifier()),
|
||||
deno_graph::Module::Npm(_)
|
||||
| deno_graph::Module::Node(_)
|
||||
| deno_graph::Module::External(_) => None,
|
||||
|
@ -1271,6 +1259,36 @@ fn has_license_file<'a>(
|
|||
})
|
||||
}
|
||||
|
||||
fn error_missing_exports_field(deno_json: &ConfigFile) -> Result<(), AnyError> {
|
||||
static SUGGESTED_ENTRYPOINTS: [&str; 4] =
|
||||
["mod.ts", "mod.js", "index.ts", "index.js"];
|
||||
let mut suggested_entrypoint = None;
|
||||
|
||||
for entrypoint in SUGGESTED_ENTRYPOINTS {
|
||||
if deno_json.dir_path().join(entrypoint).exists() {
|
||||
suggested_entrypoint = Some(entrypoint);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let exports_content = format!(
|
||||
r#"{{
|
||||
"name": "{}",
|
||||
"version": "{}",
|
||||
"exports": "{}"
|
||||
}}"#,
|
||||
deno_json.json.name.as_deref().unwrap_or("@scope/name"),
|
||||
deno_json.json.name.as_deref().unwrap_or("0.0.0"),
|
||||
suggested_entrypoint.unwrap_or("<path_to_entrypoint>")
|
||||
);
|
||||
|
||||
bail!(
|
||||
"You did not specify an entrypoint in {}. Add `exports` mapping in the configuration file, eg:\n{}",
|
||||
deno_json.specifier,
|
||||
exports_content
|
||||
);
|
||||
}
|
||||
|
||||
#[allow(clippy::print_stderr)]
|
||||
fn ring_bell() {
|
||||
// ASCII code for the bell character.
|
||||
|
|
|
@ -16,6 +16,7 @@ use deno_semver::package::PackageNv;
|
|||
use deno_semver::package::PackageReq;
|
||||
use deno_semver::Version;
|
||||
use deno_semver::VersionReq;
|
||||
use deps::KeyPath;
|
||||
use jsonc_parser::cst::CstObject;
|
||||
use jsonc_parser::cst::CstObjectProp;
|
||||
use jsonc_parser::cst::CstRootNode;
|
||||
|
@ -32,10 +33,13 @@ use crate::jsr::JsrFetchResolver;
|
|||
use crate::npm::NpmFetchResolver;
|
||||
|
||||
mod cache_deps;
|
||||
pub(crate) mod deps;
|
||||
mod outdated;
|
||||
|
||||
pub use cache_deps::cache_top_level_deps;
|
||||
pub use outdated::outdated;
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
#[derive(Debug, Copy, Clone, Hash)]
|
||||
enum ConfigKind {
|
||||
DenoJson,
|
||||
PackageJson,
|
||||
|
@ -86,6 +90,28 @@ impl ConfigUpdater {
|
|||
self.cst.to_string()
|
||||
}
|
||||
|
||||
fn get_property_for_mutation(
|
||||
&mut self,
|
||||
key_path: &KeyPath,
|
||||
) -> Option<CstObjectProp> {
|
||||
let mut current_node = self.root_object.clone();
|
||||
|
||||
self.modified = true;
|
||||
|
||||
for (i, part) in key_path.parts.iter().enumerate() {
|
||||
let s = part.as_str();
|
||||
if i < key_path.parts.len().saturating_sub(1) {
|
||||
let object = current_node.object_value(s)?;
|
||||
current_node = object;
|
||||
} else {
|
||||
// last part
|
||||
return current_node.get(s);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn add(&mut self, selected: SelectedPackage, dev: bool) {
|
||||
fn insert_index(object: &CstObject, searching_name: &str) -> usize {
|
||||
object
|
||||
|
@ -679,7 +705,7 @@ enum AddRmPackageReqValue {
|
|||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
struct AddRmPackageReq {
|
||||
pub struct AddRmPackageReq {
|
||||
alias: String,
|
||||
value: AddRmPackageReqValue,
|
||||
}
|
||||
|
@ -824,7 +850,7 @@ async fn npm_install_after_modification(
|
|||
flags: Arc<Flags>,
|
||||
// explicitly provided to prevent redownloading
|
||||
jsr_resolver: Option<Arc<crate::jsr::JsrFetchResolver>>,
|
||||
) -> Result<(), AnyError> {
|
||||
) -> Result<CliFactory, AnyError> {
|
||||
// clear the previously cached package.json from memory before reloading it
|
||||
node_resolver::PackageJsonThreadLocalCache::clear();
|
||||
|
||||
|
@ -842,7 +868,7 @@ async fn npm_install_after_modification(
|
|||
lockfile.write_if_changed()?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Ok(cli_factory)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
|
|
@ -8,7 +8,7 @@ use crate::graph_container::ModuleGraphUpdatePermit;
|
|||
use deno_core::error::AnyError;
|
||||
use deno_core::futures::stream::FuturesUnordered;
|
||||
use deno_core::futures::StreamExt;
|
||||
use deno_semver::package::PackageReq;
|
||||
use deno_semver::jsr::JsrPackageReqReference;
|
||||
|
||||
pub async fn cache_top_level_deps(
|
||||
// todo(dsherret): don't pass the factory into this function. Instead use ctor deps
|
||||
|
@ -56,15 +56,20 @@ pub async fn cache_top_level_deps(
|
|||
match specifier.scheme() {
|
||||
"jsr" => {
|
||||
let specifier_str = specifier.as_str();
|
||||
let specifier_str =
|
||||
specifier_str.strip_prefix("jsr:").unwrap_or(specifier_str);
|
||||
if let Ok(req) = PackageReq::from_str(specifier_str) {
|
||||
if !seen_reqs.insert(req.clone()) {
|
||||
if let Ok(req) = JsrPackageReqReference::from_str(specifier_str) {
|
||||
if let Some(sub_path) = req.sub_path() {
|
||||
if sub_path.ends_with('/') {
|
||||
continue;
|
||||
}
|
||||
roots.push(specifier.clone());
|
||||
continue;
|
||||
}
|
||||
if !seen_reqs.insert(req.req().clone()) {
|
||||
continue;
|
||||
}
|
||||
let jsr_resolver = jsr_resolver.clone();
|
||||
info_futures.push(async move {
|
||||
if let Some(nv) = jsr_resolver.req_to_nv(&req).await {
|
||||
if let Some(nv) = jsr_resolver.req_to_nv(req.req()).await {
|
||||
if let Some(info) = jsr_resolver.package_version_info(&nv).await
|
||||
{
|
||||
return Some((specifier.clone(), info));
|
||||
|
|
964
cli/tools/registry/pm/deps.rs
Normal file
964
cli/tools/registry/pm/deps.rs
Normal file
|
@ -0,0 +1,964 @@
|
|||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::Arc;
|
||||
|
||||
use deno_ast::ModuleSpecifier;
|
||||
use deno_config::deno_json::ConfigFile;
|
||||
use deno_config::deno_json::ConfigFileRc;
|
||||
use deno_config::workspace::Workspace;
|
||||
use deno_config::workspace::WorkspaceDirectory;
|
||||
use deno_core::anyhow::bail;
|
||||
use deno_core::error::AnyError;
|
||||
use deno_core::futures::future::try_join;
|
||||
use deno_core::futures::stream::FuturesOrdered;
|
||||
use deno_core::futures::stream::FuturesUnordered;
|
||||
use deno_core::futures::FutureExt;
|
||||
use deno_core::futures::StreamExt;
|
||||
use deno_core::serde_json;
|
||||
use deno_graph::FillFromLockfileOptions;
|
||||
use deno_package_json::PackageJsonDepValue;
|
||||
use deno_package_json::PackageJsonDepValueParseError;
|
||||
use deno_package_json::PackageJsonRc;
|
||||
use deno_runtime::deno_permissions::PermissionsContainer;
|
||||
use deno_semver::jsr::JsrPackageReqReference;
|
||||
use deno_semver::npm::NpmPackageReqReference;
|
||||
use deno_semver::package::PackageNv;
|
||||
use deno_semver::package::PackageReq;
|
||||
use deno_semver::package::PackageReqReference;
|
||||
use deno_semver::VersionReq;
|
||||
use import_map::ImportMap;
|
||||
use import_map::ImportMapWithDiagnostics;
|
||||
use import_map::SpecifierMapEntry;
|
||||
use indexmap::IndexMap;
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use crate::args::CliLockfile;
|
||||
use crate::graph_container::MainModuleGraphContainer;
|
||||
use crate::graph_container::ModuleGraphContainer;
|
||||
use crate::graph_container::ModuleGraphUpdatePermit;
|
||||
use crate::jsr::JsrFetchResolver;
|
||||
use crate::module_loader::ModuleLoadPreparer;
|
||||
use crate::npm::CliNpmResolver;
|
||||
use crate::npm::NpmFetchResolver;
|
||||
|
||||
use super::ConfigUpdater;
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum ImportMapKind {
|
||||
Inline,
|
||||
Outline,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum DepLocation {
|
||||
DenoJson(ConfigFileRc, KeyPath, ImportMapKind),
|
||||
PackageJson(PackageJsonRc, KeyPath),
|
||||
}
|
||||
|
||||
impl DepLocation {
|
||||
pub fn is_deno_json(&self) -> bool {
|
||||
matches!(self, DepLocation::DenoJson(..))
|
||||
}
|
||||
|
||||
pub fn file_path(&self) -> Cow<std::path::Path> {
|
||||
match self {
|
||||
DepLocation::DenoJson(arc, _, _) => {
|
||||
Cow::Owned(arc.specifier.to_file_path().unwrap())
|
||||
}
|
||||
DepLocation::PackageJson(arc, _) => Cow::Borrowed(arc.path.as_ref()),
|
||||
}
|
||||
}
|
||||
fn config_kind(&self) -> super::ConfigKind {
|
||||
match self {
|
||||
DepLocation::DenoJson(_, _, _) => super::ConfigKind::DenoJson,
|
||||
DepLocation::PackageJson(_, _) => super::ConfigKind::PackageJson,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct DebugAdapter<T>(T);
|
||||
|
||||
impl<'a> std::fmt::Debug for DebugAdapter<&'a ConfigFileRc> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("ConfigFile")
|
||||
.field("specifier", &self.0.specifier)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
impl<'a> std::fmt::Debug for DebugAdapter<&'a PackageJsonRc> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("PackageJson")
|
||||
.field("path", &self.0.path)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for DepLocation {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
DepLocation::DenoJson(arc, key_path, kind) => {
|
||||
let mut debug = f.debug_tuple("DenoJson");
|
||||
debug
|
||||
.field(&DebugAdapter(arc))
|
||||
.field(key_path)
|
||||
.field(kind)
|
||||
.finish()
|
||||
}
|
||||
DepLocation::PackageJson(arc, key_path) => {
|
||||
let mut debug = f.debug_tuple("PackageJson");
|
||||
debug.field(&DebugAdapter(arc)).field(key_path).finish()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub enum DepKind {
|
||||
Jsr,
|
||||
Npm,
|
||||
}
|
||||
|
||||
impl DepKind {
|
||||
pub fn scheme(&self) -> &'static str {
|
||||
match self {
|
||||
DepKind::Npm => "npm",
|
||||
DepKind::Jsr => "jsr",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum KeyPart {
|
||||
Imports,
|
||||
Scopes,
|
||||
Dependencies,
|
||||
DevDependencies,
|
||||
String(String),
|
||||
}
|
||||
|
||||
impl From<String> for KeyPart {
|
||||
fn from(value: String) -> Self {
|
||||
KeyPart::String(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PackageJsonDepKind> for KeyPart {
|
||||
fn from(value: PackageJsonDepKind) -> Self {
|
||||
match value {
|
||||
PackageJsonDepKind::Normal => Self::Dependencies,
|
||||
PackageJsonDepKind::Dev => Self::DevDependencies,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl KeyPart {
|
||||
pub fn as_str(&self) -> &str {
|
||||
match self {
|
||||
KeyPart::Imports => "imports",
|
||||
KeyPart::Scopes => "scopes",
|
||||
KeyPart::Dependencies => "dependencies",
|
||||
KeyPart::DevDependencies => "devDependencies",
|
||||
KeyPart::String(s) => s,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct KeyPath {
|
||||
pub parts: Vec<KeyPart>,
|
||||
}
|
||||
|
||||
impl KeyPath {
|
||||
fn from_parts(parts: impl IntoIterator<Item = KeyPart>) -> Self {
|
||||
Self {
|
||||
parts: parts.into_iter().collect(),
|
||||
}
|
||||
}
|
||||
fn last(&self) -> Option<&KeyPart> {
|
||||
self.parts.last()
|
||||
}
|
||||
fn push(&mut self, part: KeyPart) {
|
||||
self.parts.push(part)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Dep {
|
||||
pub req: PackageReq,
|
||||
pub kind: DepKind,
|
||||
pub location: DepLocation,
|
||||
#[allow(dead_code)]
|
||||
pub id: DepId,
|
||||
#[allow(dead_code)]
|
||||
pub alias: Option<String>,
|
||||
}
|
||||
|
||||
fn import_map_entries(
|
||||
import_map: &ImportMap,
|
||||
) -> impl Iterator<Item = (KeyPath, SpecifierMapEntry<'_>)> {
|
||||
import_map
|
||||
.imports()
|
||||
.entries()
|
||||
.map(|entry| {
|
||||
(
|
||||
KeyPath::from_parts([
|
||||
KeyPart::Imports,
|
||||
KeyPart::String(entry.raw_key.into()),
|
||||
]),
|
||||
entry,
|
||||
)
|
||||
})
|
||||
.chain(import_map.scopes().flat_map(|scope| {
|
||||
let path = KeyPath::from_parts([
|
||||
KeyPart::Scopes,
|
||||
scope.raw_key.to_string().into(),
|
||||
]);
|
||||
|
||||
scope.imports.entries().map(move |entry| {
|
||||
let mut full_path = path.clone();
|
||||
full_path.push(KeyPart::String(entry.raw_key.to_string()));
|
||||
(full_path, entry)
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
fn to_import_map_value_from_imports(
|
||||
deno_json: &ConfigFile,
|
||||
) -> serde_json::Value {
|
||||
let mut value = serde_json::Map::with_capacity(2);
|
||||
if let Some(imports) = &deno_json.json.imports {
|
||||
value.insert("imports".to_string(), imports.clone());
|
||||
}
|
||||
if let Some(scopes) = &deno_json.json.scopes {
|
||||
value.insert("scopes".to_string(), scopes.clone());
|
||||
}
|
||||
serde_json::Value::Object(value)
|
||||
}
|
||||
|
||||
fn deno_json_import_map(
|
||||
deno_json: &ConfigFile,
|
||||
) -> Result<Option<(ImportMapWithDiagnostics, ImportMapKind)>, AnyError> {
|
||||
let (value, kind) =
|
||||
if deno_json.json.imports.is_some() || deno_json.json.scopes.is_some() {
|
||||
(
|
||||
to_import_map_value_from_imports(deno_json),
|
||||
ImportMapKind::Inline,
|
||||
)
|
||||
} else {
|
||||
match deno_json.to_import_map_path()? {
|
||||
Some(path) => {
|
||||
let text = std::fs::read_to_string(&path)?;
|
||||
let value = serde_json::from_str(&text)?;
|
||||
(value, ImportMapKind::Outline)
|
||||
}
|
||||
None => return Ok(None),
|
||||
}
|
||||
};
|
||||
|
||||
import_map::parse_from_value(deno_json.specifier.clone(), value)
|
||||
.map_err(Into::into)
|
||||
.map(|import_map| Some((import_map, kind)))
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
enum PackageJsonDepKind {
|
||||
Normal,
|
||||
Dev,
|
||||
}
|
||||
|
||||
type PackageJsonDeps = IndexMap<
|
||||
String,
|
||||
Result<
|
||||
(PackageJsonDepKind, PackageJsonDepValue),
|
||||
PackageJsonDepValueParseError,
|
||||
>,
|
||||
>;
|
||||
|
||||
/// Resolve the package.json's dependencies.
|
||||
// TODO(nathanwhit): Remove once we update deno_package_json with dev deps split out
|
||||
fn resolve_local_package_json_deps(
|
||||
package_json: &PackageJsonRc,
|
||||
) -> PackageJsonDeps {
|
||||
/// Gets the name and raw version constraint for a registry info or
|
||||
/// package.json dependency entry taking into account npm package aliases.
|
||||
fn parse_dep_entry_name_and_raw_version<'a>(
|
||||
key: &'a str,
|
||||
value: &'a str,
|
||||
) -> (&'a str, &'a str) {
|
||||
if let Some(package_and_version) = value.strip_prefix("npm:") {
|
||||
if let Some((name, version)) = package_and_version.rsplit_once('@') {
|
||||
// if empty, then the name was scoped and there's no version
|
||||
if name.is_empty() {
|
||||
(package_and_version, "*")
|
||||
} else {
|
||||
(name, version)
|
||||
}
|
||||
} else {
|
||||
(package_and_version, "*")
|
||||
}
|
||||
} else {
|
||||
(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_entry(
|
||||
key: &str,
|
||||
value: &str,
|
||||
) -> Result<PackageJsonDepValue, PackageJsonDepValueParseError> {
|
||||
if let Some(workspace_key) = value.strip_prefix("workspace:") {
|
||||
let version_req = VersionReq::parse_from_npm(workspace_key)?;
|
||||
return Ok(PackageJsonDepValue::Workspace(version_req));
|
||||
}
|
||||
if value.starts_with("file:")
|
||||
|| value.starts_with("git:")
|
||||
|| value.starts_with("http:")
|
||||
|| value.starts_with("https:")
|
||||
{
|
||||
return Err(PackageJsonDepValueParseError::Unsupported {
|
||||
scheme: value.split(':').next().unwrap().to_string(),
|
||||
});
|
||||
}
|
||||
let (name, version_req) = parse_dep_entry_name_and_raw_version(key, value);
|
||||
let result = VersionReq::parse_from_npm(version_req);
|
||||
match result {
|
||||
Ok(version_req) => Ok(PackageJsonDepValue::Req(PackageReq {
|
||||
name: name.to_string(),
|
||||
version_req,
|
||||
})),
|
||||
Err(err) => Err(PackageJsonDepValueParseError::VersionReq(err)),
|
||||
}
|
||||
}
|
||||
|
||||
fn insert_deps(
|
||||
deps: Option<&IndexMap<String, String>>,
|
||||
result: &mut PackageJsonDeps,
|
||||
kind: PackageJsonDepKind,
|
||||
) {
|
||||
if let Some(deps) = deps {
|
||||
for (key, value) in deps {
|
||||
result.entry(key.to_string()).or_insert_with(|| {
|
||||
parse_entry(key, value).map(|entry| (kind, entry))
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let deps = package_json.dependencies.as_ref();
|
||||
let dev_deps = package_json.dev_dependencies.as_ref();
|
||||
let mut result = IndexMap::new();
|
||||
|
||||
// favors the deps over dev_deps
|
||||
insert_deps(deps, &mut result, PackageJsonDepKind::Normal);
|
||||
insert_deps(dev_deps, &mut result, PackageJsonDepKind::Dev);
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
fn add_deps_from_deno_json(
|
||||
deno_json: &Arc<ConfigFile>,
|
||||
mut filter: impl DepFilter,
|
||||
deps: &mut Vec<Dep>,
|
||||
) {
|
||||
let (import_map, import_map_kind) = match deno_json_import_map(deno_json) {
|
||||
Ok(Some((import_map, import_map_kind))) => (import_map, import_map_kind),
|
||||
Ok(None) => return,
|
||||
Err(e) => {
|
||||
log::warn!("failed to parse imports from {}: {e}", &deno_json.specifier);
|
||||
return;
|
||||
}
|
||||
};
|
||||
for (key_path, entry) in import_map_entries(&import_map.import_map) {
|
||||
let Some(value) = entry.value else { continue };
|
||||
let kind = match value.scheme() {
|
||||
"npm" => DepKind::Npm,
|
||||
"jsr" => DepKind::Jsr,
|
||||
_ => continue,
|
||||
};
|
||||
let req = match parse_req_reference(value.as_str(), kind) {
|
||||
Ok(req) => req.req.clone(),
|
||||
Err(err) => {
|
||||
log::warn!("failed to parse package req \"{}\": {err}", value.as_str());
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let alias: &str = key_path.last().unwrap().as_str().trim_end_matches('/');
|
||||
let alias = (alias != req.name).then(|| alias.to_string());
|
||||
if !filter.should_include(alias.as_deref(), &req, kind) {
|
||||
continue;
|
||||
}
|
||||
let id = DepId(deps.len());
|
||||
deps.push(Dep {
|
||||
location: DepLocation::DenoJson(
|
||||
deno_json.clone(),
|
||||
key_path,
|
||||
import_map_kind,
|
||||
),
|
||||
kind,
|
||||
req,
|
||||
id,
|
||||
alias,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn add_deps_from_package_json(
|
||||
package_json: &PackageJsonRc,
|
||||
mut filter: impl DepFilter,
|
||||
deps: &mut Vec<Dep>,
|
||||
) {
|
||||
let package_json_deps = resolve_local_package_json_deps(package_json);
|
||||
for (k, v) in package_json_deps {
|
||||
let (package_dep_kind, v) = match v {
|
||||
Ok((k, v)) => (k, v),
|
||||
Err(e) => {
|
||||
log::warn!("bad package json dep value: {e}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
match v {
|
||||
deno_package_json::PackageJsonDepValue::Req(req) => {
|
||||
let alias = k.as_str();
|
||||
let alias = (alias != req.name).then(|| alias.to_string());
|
||||
if !filter.should_include(alias.as_deref(), &req, DepKind::Npm) {
|
||||
continue;
|
||||
}
|
||||
let id = DepId(deps.len());
|
||||
deps.push(Dep {
|
||||
id,
|
||||
kind: DepKind::Npm,
|
||||
location: DepLocation::PackageJson(
|
||||
package_json.clone(),
|
||||
KeyPath::from_parts([package_dep_kind.into(), k.into()]),
|
||||
),
|
||||
req,
|
||||
alias,
|
||||
})
|
||||
}
|
||||
deno_package_json::PackageJsonDepValue::Workspace(_) => continue,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn deps_from_workspace(
|
||||
workspace: &Arc<Workspace>,
|
||||
dep_filter: impl DepFilter,
|
||||
) -> Result<Vec<Dep>, AnyError> {
|
||||
let mut deps = Vec::with_capacity(256);
|
||||
for deno_json in workspace.deno_jsons() {
|
||||
add_deps_from_deno_json(deno_json, dep_filter, &mut deps);
|
||||
}
|
||||
for package_json in workspace.package_jsons() {
|
||||
add_deps_from_package_json(package_json, dep_filter, &mut deps);
|
||||
}
|
||||
|
||||
Ok(deps)
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct DepId(usize);
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Change {
|
||||
Update(DepId, VersionReq),
|
||||
}
|
||||
|
||||
pub trait DepFilter: Copy {
|
||||
fn should_include(
|
||||
&mut self,
|
||||
alias: Option<&str>,
|
||||
package_req: &PackageReq,
|
||||
dep_kind: DepKind,
|
||||
) -> bool;
|
||||
}
|
||||
|
||||
impl<T> DepFilter for T
|
||||
where
|
||||
T: FnMut(Option<&str>, &PackageReq, DepKind) -> bool + Copy,
|
||||
{
|
||||
fn should_include<'a>(
|
||||
&mut self,
|
||||
alias: Option<&'a str>,
|
||||
package_req: &'a PackageReq,
|
||||
dep_kind: DepKind,
|
||||
) -> bool {
|
||||
(*self)(alias, package_req, dep_kind)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PackageLatestVersion {
|
||||
pub semver_compatible: Option<PackageNv>,
|
||||
pub latest: Option<PackageNv>,
|
||||
}
|
||||
|
||||
pub struct DepManager {
|
||||
deps: Vec<Dep>,
|
||||
resolved_versions: Vec<Option<PackageNv>>,
|
||||
latest_versions: Vec<PackageLatestVersion>,
|
||||
|
||||
pending_changes: Vec<Change>,
|
||||
|
||||
dependencies_resolved: AtomicBool,
|
||||
module_load_preparer: Arc<ModuleLoadPreparer>,
|
||||
// TODO(nathanwhit): probably shouldn't be pub
|
||||
pub(crate) jsr_fetch_resolver: Arc<JsrFetchResolver>,
|
||||
pub(crate) npm_fetch_resolver: Arc<NpmFetchResolver>,
|
||||
npm_resolver: Arc<dyn CliNpmResolver>,
|
||||
permissions_container: PermissionsContainer,
|
||||
main_module_graph_container: Arc<MainModuleGraphContainer>,
|
||||
lockfile: Option<Arc<CliLockfile>>,
|
||||
}
|
||||
|
||||
pub struct DepManagerArgs {
|
||||
pub module_load_preparer: Arc<ModuleLoadPreparer>,
|
||||
pub jsr_fetch_resolver: Arc<JsrFetchResolver>,
|
||||
pub npm_fetch_resolver: Arc<NpmFetchResolver>,
|
||||
pub npm_resolver: Arc<dyn CliNpmResolver>,
|
||||
pub permissions_container: PermissionsContainer,
|
||||
pub main_module_graph_container: Arc<MainModuleGraphContainer>,
|
||||
pub lockfile: Option<Arc<CliLockfile>>,
|
||||
}
|
||||
|
||||
impl DepManager {
|
||||
pub fn reloaded_after_modification(self, args: DepManagerArgs) -> Self {
|
||||
let mut new = Self::with_deps_args(self.deps, args);
|
||||
new.latest_versions = self.latest_versions;
|
||||
new
|
||||
}
|
||||
fn with_deps_args(deps: Vec<Dep>, args: DepManagerArgs) -> Self {
|
||||
let DepManagerArgs {
|
||||
module_load_preparer,
|
||||
jsr_fetch_resolver,
|
||||
npm_fetch_resolver,
|
||||
npm_resolver,
|
||||
permissions_container,
|
||||
main_module_graph_container,
|
||||
lockfile,
|
||||
} = args;
|
||||
Self {
|
||||
deps,
|
||||
resolved_versions: Vec::new(),
|
||||
latest_versions: Vec::new(),
|
||||
jsr_fetch_resolver,
|
||||
dependencies_resolved: AtomicBool::new(false),
|
||||
module_load_preparer,
|
||||
npm_fetch_resolver,
|
||||
npm_resolver,
|
||||
permissions_container,
|
||||
main_module_graph_container,
|
||||
lockfile,
|
||||
pending_changes: Vec::new(),
|
||||
}
|
||||
}
|
||||
pub fn from_workspace_dir(
|
||||
workspace_dir: &Arc<WorkspaceDirectory>,
|
||||
dep_filter: impl DepFilter,
|
||||
args: DepManagerArgs,
|
||||
) -> Result<Self, AnyError> {
|
||||
let mut deps = Vec::with_capacity(256);
|
||||
if let Some(deno_json) = workspace_dir.maybe_deno_json() {
|
||||
if deno_json.specifier.scheme() != "file" {
|
||||
bail!("remote deno.json files are not supported");
|
||||
}
|
||||
let path = deno_json.specifier.to_file_path().unwrap();
|
||||
if path.parent().unwrap() == workspace_dir.dir_path() {
|
||||
add_deps_from_deno_json(deno_json, dep_filter, &mut deps);
|
||||
}
|
||||
}
|
||||
if let Some(package_json) = workspace_dir.maybe_pkg_json() {
|
||||
add_deps_from_package_json(package_json, dep_filter, &mut deps);
|
||||
}
|
||||
|
||||
Ok(Self::with_deps_args(deps, args))
|
||||
}
|
||||
pub fn from_workspace(
|
||||
workspace: &Arc<Workspace>,
|
||||
dep_filter: impl DepFilter,
|
||||
args: DepManagerArgs,
|
||||
) -> Result<Self, AnyError> {
|
||||
let deps = deps_from_workspace(workspace, dep_filter)?;
|
||||
Ok(Self::with_deps_args(deps, args))
|
||||
}
|
||||
|
||||
async fn run_dependency_resolution(&self) -> Result<(), AnyError> {
|
||||
if self
|
||||
.dependencies_resolved
|
||||
.load(std::sync::atomic::Ordering::Relaxed)
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut graph_permit = self
|
||||
.main_module_graph_container
|
||||
.acquire_update_permit()
|
||||
.await;
|
||||
let graph = graph_permit.graph_mut();
|
||||
// populate the information from the lockfile
|
||||
if let Some(lockfile) = &self.lockfile {
|
||||
let lockfile = lockfile.lock();
|
||||
graph.fill_from_lockfile(FillFromLockfileOptions {
|
||||
redirects: lockfile
|
||||
.content
|
||||
.redirects
|
||||
.iter()
|
||||
.map(|(from, to)| (from.as_str(), to.as_str())),
|
||||
package_specifiers: lockfile
|
||||
.content
|
||||
.packages
|
||||
.specifiers
|
||||
.iter()
|
||||
.map(|(dep, id)| (dep, id.as_str())),
|
||||
});
|
||||
}
|
||||
|
||||
let npm_resolver = self.npm_resolver.as_managed().unwrap();
|
||||
if self.deps.iter().all(|dep| match dep.kind {
|
||||
DepKind::Npm => {
|
||||
npm_resolver.resolve_pkg_id_from_pkg_req(&dep.req).is_ok()
|
||||
}
|
||||
DepKind::Jsr => graph.packages.mappings().contains_key(&dep.req),
|
||||
}) {
|
||||
self
|
||||
.dependencies_resolved
|
||||
.store(true, std::sync::atomic::Ordering::Relaxed);
|
||||
graph_permit.commit();
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
npm_resolver.ensure_top_level_package_json_install().await?;
|
||||
let mut roots = Vec::new();
|
||||
let mut info_futures = FuturesUnordered::new();
|
||||
for dep in &self.deps {
|
||||
if dep.location.is_deno_json() {
|
||||
match dep.kind {
|
||||
DepKind::Npm => roots.push(
|
||||
ModuleSpecifier::parse(&format!("npm:/{}/", dep.req)).unwrap(),
|
||||
),
|
||||
DepKind::Jsr => info_futures.push(async {
|
||||
if let Some(nv) = self.jsr_fetch_resolver.req_to_nv(&dep.req).await
|
||||
{
|
||||
if let Some(info) =
|
||||
self.jsr_fetch_resolver.package_version_info(&nv).await
|
||||
{
|
||||
let specifier =
|
||||
ModuleSpecifier::parse(&format!("jsr:/{}/", dep.req))
|
||||
.unwrap();
|
||||
return Some((specifier, info));
|
||||
}
|
||||
}
|
||||
None
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while let Some(info_future) = info_futures.next().await {
|
||||
if let Some((specifier, info)) = info_future {
|
||||
let exports = info.exports();
|
||||
for (k, _) in exports {
|
||||
if let Ok(spec) = specifier.join(k) {
|
||||
roots.push(spec);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self
|
||||
.module_load_preparer
|
||||
.prepare_module_load(
|
||||
graph,
|
||||
&roots,
|
||||
false,
|
||||
deno_config::deno_json::TsTypeLib::DenoWindow,
|
||||
self.permissions_container.clone(),
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
graph_permit.commit();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn resolved_version(&self, id: DepId) -> Option<&PackageNv> {
|
||||
self.resolved_versions[id.0].as_ref()
|
||||
}
|
||||
|
||||
pub async fn resolve_current_versions(&mut self) -> Result<(), AnyError> {
|
||||
self.run_dependency_resolution().await?;
|
||||
|
||||
let graph = self.main_module_graph_container.graph();
|
||||
|
||||
let mut resolved = Vec::with_capacity(self.deps.len());
|
||||
let snapshot = self.npm_resolver.as_managed().unwrap().snapshot();
|
||||
let resolved_npm = snapshot.package_reqs();
|
||||
let resolved_jsr = graph.packages.mappings();
|
||||
for dep in &self.deps {
|
||||
match dep.kind {
|
||||
DepKind::Npm => {
|
||||
let resolved_version = resolved_npm.get(&dep.req).cloned();
|
||||
resolved.push(resolved_version);
|
||||
}
|
||||
DepKind::Jsr => {
|
||||
let resolved_version = resolved_jsr.get(&dep.req).cloned();
|
||||
resolved.push(resolved_version)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.resolved_versions = resolved;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn load_latest_versions(
|
||||
&self,
|
||||
) -> Result<Vec<PackageLatestVersion>, AnyError> {
|
||||
if self.latest_versions.len() == self.deps.len() {
|
||||
return Ok(self.latest_versions.clone());
|
||||
}
|
||||
let latest_tag_req = deno_semver::VersionReq::from_raw_text_and_inner(
|
||||
"latest".into(),
|
||||
deno_semver::RangeSetOrTag::Tag("latest".into()),
|
||||
);
|
||||
let mut latest_versions = Vec::with_capacity(self.deps.len());
|
||||
|
||||
let npm_sema = Semaphore::new(32);
|
||||
let jsr_sema = Semaphore::new(32);
|
||||
let mut futs = FuturesOrdered::new();
|
||||
|
||||
for dep in &self.deps {
|
||||
match dep.kind {
|
||||
DepKind::Npm => futs.push_back(
|
||||
async {
|
||||
let semver_req = &dep.req;
|
||||
let latest_req = PackageReq {
|
||||
name: dep.req.name.clone(),
|
||||
version_req: latest_tag_req.clone(),
|
||||
};
|
||||
let _permit = npm_sema.acquire().await;
|
||||
let semver_compatible =
|
||||
self.npm_fetch_resolver.req_to_nv(semver_req).await;
|
||||
let latest = self.npm_fetch_resolver.req_to_nv(&latest_req).await;
|
||||
PackageLatestVersion {
|
||||
latest,
|
||||
semver_compatible,
|
||||
}
|
||||
}
|
||||
.boxed_local(),
|
||||
),
|
||||
DepKind::Jsr => futs.push_back(
|
||||
async {
|
||||
let semver_req = &dep.req;
|
||||
let latest_req = PackageReq {
|
||||
name: dep.req.name.clone(),
|
||||
version_req: deno_semver::WILDCARD_VERSION_REQ.clone(),
|
||||
};
|
||||
let _permit = jsr_sema.acquire().await;
|
||||
let semver_compatible =
|
||||
self.jsr_fetch_resolver.req_to_nv(semver_req).await;
|
||||
let latest = self.jsr_fetch_resolver.req_to_nv(&latest_req).await;
|
||||
PackageLatestVersion {
|
||||
latest,
|
||||
semver_compatible,
|
||||
}
|
||||
}
|
||||
.boxed_local(),
|
||||
),
|
||||
}
|
||||
}
|
||||
while let Some(nv) = futs.next().await {
|
||||
latest_versions.push(nv);
|
||||
}
|
||||
|
||||
Ok(latest_versions)
|
||||
}
|
||||
|
||||
pub async fn resolve_versions(&mut self) -> Result<(), AnyError> {
|
||||
let (_, latest_versions) = try_join(
|
||||
self.run_dependency_resolution(),
|
||||
self.load_latest_versions(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
self.latest_versions = latest_versions;
|
||||
|
||||
self.resolve_current_versions().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn deps_with_resolved_latest_versions(
|
||||
&self,
|
||||
) -> impl IntoIterator<Item = (DepId, Option<PackageNv>, PackageLatestVersion)> + '_
|
||||
{
|
||||
self
|
||||
.resolved_versions
|
||||
.iter()
|
||||
.zip(self.latest_versions.iter())
|
||||
.enumerate()
|
||||
.map(|(i, (resolved, latest))| {
|
||||
(DepId(i), resolved.clone(), latest.clone())
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_dep(&self, id: DepId) -> &Dep {
|
||||
&self.deps[id.0]
|
||||
}
|
||||
|
||||
pub fn update_dep(&mut self, dep_id: DepId, new_version_req: VersionReq) {
|
||||
self
|
||||
.pending_changes
|
||||
.push(Change::Update(dep_id, new_version_req));
|
||||
}
|
||||
|
||||
pub fn commit_changes(&mut self) -> Result<(), AnyError> {
|
||||
let changes = std::mem::take(&mut self.pending_changes);
|
||||
let mut config_updaters = HashMap::new();
|
||||
for change in changes {
|
||||
match change {
|
||||
Change::Update(dep_id, version_req) => {
|
||||
// TODO: move most of this to ConfigUpdater
|
||||
let dep = &mut self.deps[dep_id.0];
|
||||
dep.req.version_req = version_req.clone();
|
||||
match &dep.location {
|
||||
DepLocation::DenoJson(arc, key_path, import_map_kind) => {
|
||||
if matches!(import_map_kind, ImportMapKind::Outline) {
|
||||
// not supported
|
||||
continue;
|
||||
}
|
||||
let updater =
|
||||
get_or_create_updater(&mut config_updaters, &dep.location)?;
|
||||
|
||||
let Some(property) = updater.get_property_for_mutation(key_path)
|
||||
else {
|
||||
log::warn!(
|
||||
"failed to find property at path {key_path:?} for file {}",
|
||||
arc.specifier
|
||||
);
|
||||
continue;
|
||||
};
|
||||
let Some(string_value) = cst_string_literal(&property) else {
|
||||
continue;
|
||||
};
|
||||
let mut req_reference = match dep.kind {
|
||||
DepKind::Npm => NpmPackageReqReference::from_str(&string_value)
|
||||
.unwrap()
|
||||
.into_inner(),
|
||||
DepKind::Jsr => JsrPackageReqReference::from_str(&string_value)
|
||||
.unwrap()
|
||||
.into_inner(),
|
||||
};
|
||||
req_reference.req.version_req = version_req;
|
||||
let mut new_value =
|
||||
format!("{}:{}", dep.kind.scheme(), req_reference);
|
||||
if string_value.ends_with('/') && !new_value.ends_with('/') {
|
||||
// the display impl for PackageReqReference maps `/` to the root
|
||||
// subpath, but for the import map the trailing `/` is significant
|
||||
new_value.push('/');
|
||||
}
|
||||
if string_value
|
||||
.trim_start_matches(format!("{}:", dep.kind.scheme()).as_str())
|
||||
.starts_with('/')
|
||||
{
|
||||
// this is gross
|
||||
new_value = new_value.replace(':', ":/");
|
||||
}
|
||||
property
|
||||
.set_value(jsonc_parser::cst::CstInputValue::String(new_value));
|
||||
}
|
||||
DepLocation::PackageJson(arc, key_path) => {
|
||||
let updater =
|
||||
get_or_create_updater(&mut config_updaters, &dep.location)?;
|
||||
let Some(property) = updater.get_property_for_mutation(key_path)
|
||||
else {
|
||||
log::warn!(
|
||||
"failed to find property at path {key_path:?} for file {}",
|
||||
arc.path.display()
|
||||
);
|
||||
continue;
|
||||
};
|
||||
let Some(string_value) = cst_string_literal(&property) else {
|
||||
continue;
|
||||
};
|
||||
let new_value = if string_value.starts_with("npm:") {
|
||||
// aliased
|
||||
let rest = string_value.trim_start_matches("npm:");
|
||||
let mut parts = rest.split('@');
|
||||
let first = parts.next().unwrap();
|
||||
if first.is_empty() {
|
||||
let scope_and_name = parts.next().unwrap();
|
||||
format!("npm:@{scope_and_name}@{version_req}")
|
||||
} else {
|
||||
format!("npm:{first}@{version_req}")
|
||||
}
|
||||
} else if string_value.contains(":") {
|
||||
bail!("Unexpected package json dependency string: \"{string_value}\" in {}", arc.path.display());
|
||||
} else {
|
||||
version_req.to_string()
|
||||
};
|
||||
property
|
||||
.set_value(jsonc_parser::cst::CstInputValue::String(new_value));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (_, updater) in config_updaters {
|
||||
updater.commit()?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn get_or_create_updater<'a>(
|
||||
config_updaters: &'a mut HashMap<std::path::PathBuf, ConfigUpdater>,
|
||||
location: &DepLocation,
|
||||
) -> Result<&'a mut ConfigUpdater, AnyError> {
|
||||
match config_updaters.entry(location.file_path().into_owned()) {
|
||||
std::collections::hash_map::Entry::Occupied(occupied_entry) => {
|
||||
Ok(occupied_entry.into_mut())
|
||||
}
|
||||
std::collections::hash_map::Entry::Vacant(vacant_entry) => {
|
||||
let updater = ConfigUpdater::new(
|
||||
location.config_kind(),
|
||||
location.file_path().into_owned(),
|
||||
)?;
|
||||
Ok(vacant_entry.insert(updater))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn cst_string_literal(
|
||||
property: &jsonc_parser::cst::CstObjectProp,
|
||||
) -> Option<String> {
|
||||
// TODO(nathanwhit): ensure this unwrap is safe
|
||||
let value = property.value().unwrap();
|
||||
let Some(string) = value.as_string_lit() else {
|
||||
log::warn!("malformed entry");
|
||||
return None;
|
||||
};
|
||||
let Ok(string_value) = string.decoded_value() else {
|
||||
log::warn!("malformed string: {string:?}");
|
||||
return None;
|
||||
};
|
||||
Some(string_value)
|
||||
}
|
||||
|
||||
fn parse_req_reference(
|
||||
input: &str,
|
||||
kind: DepKind,
|
||||
) -> Result<
|
||||
PackageReqReference,
|
||||
deno_semver::package::PackageReqReferenceParseError,
|
||||
> {
|
||||
Ok(match kind {
|
||||
DepKind::Npm => NpmPackageReqReference::from_str(input)?.into_inner(),
|
||||
DepKind::Jsr => JsrPackageReqReference::from_str(input)?.into_inner(),
|
||||
})
|
||||
}
|
661
cli/tools/registry/pm/outdated.rs
Normal file
661
cli/tools/registry/pm/outdated.rs
Normal file
|
@ -0,0 +1,661 @@
|
|||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use deno_core::error::AnyError;
|
||||
use deno_semver::package::PackageNv;
|
||||
use deno_semver::package::PackageReq;
|
||||
use deno_semver::VersionReq;
|
||||
use deno_terminal::colors;
|
||||
|
||||
use crate::args::CacheSetting;
|
||||
use crate::args::CliOptions;
|
||||
use crate::args::Flags;
|
||||
use crate::args::OutdatedFlags;
|
||||
use crate::factory::CliFactory;
|
||||
use crate::file_fetcher::FileFetcher;
|
||||
use crate::jsr::JsrFetchResolver;
|
||||
use crate::npm::NpmFetchResolver;
|
||||
use crate::tools::registry::pm::deps::DepKind;
|
||||
|
||||
use super::deps::Dep;
|
||||
use super::deps::DepManager;
|
||||
use super::deps::DepManagerArgs;
|
||||
use super::deps::PackageLatestVersion;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
struct OutdatedPackage {
|
||||
kind: DepKind,
|
||||
latest: String,
|
||||
semver_compatible: String,
|
||||
current: String,
|
||||
name: String,
|
||||
}
|
||||
|
||||
#[allow(clippy::print_stdout)]
|
||||
fn print_outdated_table(packages: &[OutdatedPackage]) {
|
||||
const HEADINGS: &[&str] = &["Package", "Current", "Update", "Latest"];
|
||||
|
||||
let mut longest_package = 0;
|
||||
let mut longest_current = 0;
|
||||
let mut longest_update = 0;
|
||||
let mut longest_latest = 0;
|
||||
|
||||
for package in packages {
|
||||
let name_len = package.kind.scheme().len() + 1 + package.name.len();
|
||||
longest_package = longest_package.max(name_len);
|
||||
longest_current = longest_current.max(package.current.len());
|
||||
longest_update = longest_update.max(package.semver_compatible.len());
|
||||
longest_latest = longest_latest.max(package.latest.len());
|
||||
}
|
||||
|
||||
let package_column_width = longest_package.max(HEADINGS[0].len()) + 2;
|
||||
let current_column_width = longest_current.max(HEADINGS[1].len()) + 2;
|
||||
let update_column_width = longest_update.max(HEADINGS[2].len()) + 2;
|
||||
let latest_column_width = longest_latest.max(HEADINGS[3].len()) + 2;
|
||||
|
||||
let package_fill = "─".repeat(package_column_width);
|
||||
let current_fill = "─".repeat(current_column_width);
|
||||
let update_fill = "─".repeat(update_column_width);
|
||||
let latest_fill = "─".repeat(latest_column_width);
|
||||
|
||||
println!("┌{package_fill}┬{current_fill}┬{update_fill}┬{latest_fill}┐");
|
||||
println!(
|
||||
"│ {}{} │ {}{} │ {}{} │ {}{} │",
|
||||
colors::intense_blue(HEADINGS[0]),
|
||||
" ".repeat(package_column_width - 2 - HEADINGS[0].len()),
|
||||
colors::intense_blue(HEADINGS[1]),
|
||||
" ".repeat(current_column_width - 2 - HEADINGS[1].len()),
|
||||
colors::intense_blue(HEADINGS[2]),
|
||||
" ".repeat(update_column_width - 2 - HEADINGS[2].len()),
|
||||
colors::intense_blue(HEADINGS[3]),
|
||||
" ".repeat(latest_column_width - 2 - HEADINGS[3].len())
|
||||
);
|
||||
for package in packages {
|
||||
println!("├{package_fill}┼{current_fill}┼{update_fill}┼{latest_fill}┤",);
|
||||
|
||||
print!(
|
||||
"│ {:<package_column_width$} ",
|
||||
format!("{}:{}", package.kind.scheme(), package.name),
|
||||
package_column_width = package_column_width - 2
|
||||
);
|
||||
print!(
|
||||
"│ {:<current_column_width$} ",
|
||||
package.current,
|
||||
current_column_width = current_column_width - 2
|
||||
);
|
||||
print!(
|
||||
"│ {:<update_column_width$} ",
|
||||
package.semver_compatible,
|
||||
update_column_width = update_column_width - 2
|
||||
);
|
||||
println!(
|
||||
"│ {:<latest_column_width$} │",
|
||||
package.latest,
|
||||
latest_column_width = latest_column_width - 2
|
||||
);
|
||||
}
|
||||
|
||||
println!("└{package_fill}┴{current_fill}┴{update_fill}┴{latest_fill}┘",);
|
||||
}
|
||||
|
||||
fn print_outdated(
|
||||
deps: &mut DepManager,
|
||||
compatible: bool,
|
||||
) -> Result<(), AnyError> {
|
||||
let mut outdated = Vec::new();
|
||||
let mut seen = std::collections::BTreeSet::new();
|
||||
for (dep_id, resolved, latest_versions) in
|
||||
deps.deps_with_resolved_latest_versions()
|
||||
{
|
||||
let dep = deps.get_dep(dep_id);
|
||||
|
||||
let Some(resolved) = resolved else { continue };
|
||||
|
||||
let latest = {
|
||||
let preferred = if compatible {
|
||||
&latest_versions.semver_compatible
|
||||
} else {
|
||||
&latest_versions.latest
|
||||
};
|
||||
if let Some(v) = preferred {
|
||||
v
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
if latest > &resolved
|
||||
&& seen.insert((dep.kind, dep.req.name.clone(), resolved.version.clone()))
|
||||
{
|
||||
outdated.push(OutdatedPackage {
|
||||
kind: dep.kind,
|
||||
name: dep.req.name.clone(),
|
||||
current: resolved.version.to_string(),
|
||||
latest: latest_versions
|
||||
.latest
|
||||
.map(|l| l.version.to_string())
|
||||
.unwrap_or_default(),
|
||||
semver_compatible: latest_versions
|
||||
.semver_compatible
|
||||
.map(|l| l.version.to_string())
|
||||
.unwrap_or_default(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if !outdated.is_empty() {
|
||||
outdated.sort();
|
||||
print_outdated_table(&outdated);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn outdated(
|
||||
flags: Arc<Flags>,
|
||||
update_flags: OutdatedFlags,
|
||||
) -> Result<(), AnyError> {
|
||||
let factory = CliFactory::from_flags(flags.clone());
|
||||
let cli_options = factory.cli_options()?;
|
||||
let workspace = cli_options.workspace();
|
||||
let http_client = factory.http_client_provider();
|
||||
let deps_http_cache = factory.global_http_cache()?;
|
||||
let mut file_fetcher = FileFetcher::new(
|
||||
deps_http_cache.clone(),
|
||||
CacheSetting::RespectHeaders,
|
||||
true,
|
||||
http_client.clone(),
|
||||
Default::default(),
|
||||
None,
|
||||
);
|
||||
file_fetcher.set_download_log_level(log::Level::Trace);
|
||||
let file_fetcher = Arc::new(file_fetcher);
|
||||
let npm_fetch_resolver = Arc::new(NpmFetchResolver::new(
|
||||
file_fetcher.clone(),
|
||||
cli_options.npmrc().clone(),
|
||||
));
|
||||
let jsr_fetch_resolver =
|
||||
Arc::new(JsrFetchResolver::new(file_fetcher.clone()));
|
||||
|
||||
let args = dep_manager_args(
|
||||
&factory,
|
||||
cli_options,
|
||||
npm_fetch_resolver.clone(),
|
||||
jsr_fetch_resolver.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let filter_set = filter::FilterSet::from_filter_strings(
|
||||
update_flags.filters.iter().map(|s| s.as_str()),
|
||||
)?;
|
||||
|
||||
let filter_fn = |alias: Option<&str>, req: &PackageReq, _: DepKind| {
|
||||
if filter_set.is_empty() {
|
||||
return true;
|
||||
}
|
||||
let name = alias.unwrap_or(&req.name);
|
||||
filter_set.matches(name)
|
||||
};
|
||||
let mut deps = if update_flags.recursive {
|
||||
super::deps::DepManager::from_workspace(workspace, filter_fn, args)?
|
||||
} else {
|
||||
super::deps::DepManager::from_workspace_dir(
|
||||
&cli_options.start_dir,
|
||||
filter_fn,
|
||||
args,
|
||||
)?
|
||||
};
|
||||
|
||||
deps.resolve_versions().await?;
|
||||
|
||||
match update_flags.kind {
|
||||
crate::args::OutdatedKind::Update { latest } => {
|
||||
update(deps, latest, &filter_set, flags).await?;
|
||||
}
|
||||
crate::args::OutdatedKind::PrintOutdated { compatible } => {
|
||||
print_outdated(&mut deps, compatible)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn choose_new_version_req(
|
||||
dep: &Dep,
|
||||
resolved: Option<&PackageNv>,
|
||||
latest_versions: &PackageLatestVersion,
|
||||
update_to_latest: bool,
|
||||
filter_set: &filter::FilterSet,
|
||||
) -> Option<VersionReq> {
|
||||
let explicit_version_req = filter_set
|
||||
.matching_filter(dep.alias.as_deref().unwrap_or(&dep.req.name))
|
||||
.version_spec()
|
||||
.cloned();
|
||||
|
||||
if let Some(version_req) = explicit_version_req {
|
||||
if let Some(resolved) = resolved {
|
||||
// todo(nathanwhit): handle tag
|
||||
if version_req.tag().is_none() && version_req.matches(&resolved.version) {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
Some(version_req)
|
||||
} else {
|
||||
let preferred = if update_to_latest {
|
||||
latest_versions.latest.as_ref()?
|
||||
} else {
|
||||
latest_versions.semver_compatible.as_ref()?
|
||||
};
|
||||
if preferred.version <= resolved?.version {
|
||||
return None;
|
||||
}
|
||||
Some(
|
||||
VersionReq::parse_from_specifier(
|
||||
format!("^{}", preferred.version).as_str(),
|
||||
)
|
||||
.unwrap(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async fn update(
|
||||
mut deps: DepManager,
|
||||
update_to_latest: bool,
|
||||
filter_set: &filter::FilterSet,
|
||||
flags: Arc<Flags>,
|
||||
) -> Result<(), AnyError> {
|
||||
let mut updated = Vec::new();
|
||||
|
||||
for (dep_id, resolved, latest_versions) in deps
|
||||
.deps_with_resolved_latest_versions()
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>()
|
||||
{
|
||||
let dep = deps.get_dep(dep_id);
|
||||
let new_version_req = choose_new_version_req(
|
||||
dep,
|
||||
resolved.as_ref(),
|
||||
&latest_versions,
|
||||
update_to_latest,
|
||||
filter_set,
|
||||
);
|
||||
let Some(new_version_req) = new_version_req else {
|
||||
continue;
|
||||
};
|
||||
|
||||
updated.push((
|
||||
dep_id,
|
||||
format!("{}:{}", dep.kind.scheme(), dep.req.name),
|
||||
deps.resolved_version(dep.id).cloned(),
|
||||
new_version_req.clone(),
|
||||
));
|
||||
|
||||
deps.update_dep(dep_id, new_version_req);
|
||||
}
|
||||
|
||||
deps.commit_changes()?;
|
||||
|
||||
if !updated.is_empty() {
|
||||
let factory = super::npm_install_after_modification(
|
||||
flags.clone(),
|
||||
Some(deps.jsr_fetch_resolver.clone()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut updated_to_versions = HashSet::new();
|
||||
let cli_options = factory.cli_options()?;
|
||||
let args = dep_manager_args(
|
||||
&factory,
|
||||
cli_options,
|
||||
deps.npm_fetch_resolver.clone(),
|
||||
deps.jsr_fetch_resolver.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut deps = deps.reloaded_after_modification(args);
|
||||
deps.resolve_current_versions().await?;
|
||||
for (dep_id, package_name, maybe_current_version, new_version_req) in
|
||||
updated
|
||||
{
|
||||
if let Some(nv) = deps.resolved_version(dep_id) {
|
||||
updated_to_versions.insert((
|
||||
package_name,
|
||||
maybe_current_version,
|
||||
nv.version.clone(),
|
||||
));
|
||||
} else {
|
||||
log::warn!(
|
||||
"Failed to resolve version for new version requirement: {} -> {}",
|
||||
package_name,
|
||||
new_version_req
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
log::info!(
|
||||
"Updated {} dependenc{}:",
|
||||
updated_to_versions.len(),
|
||||
if updated_to_versions.len() == 1 {
|
||||
"y"
|
||||
} else {
|
||||
"ies"
|
||||
}
|
||||
);
|
||||
let mut updated_to_versions =
|
||||
updated_to_versions.into_iter().collect::<Vec<_>>();
|
||||
updated_to_versions.sort_by(|(k, _, _), (k2, _, _)| k.cmp(k2));
|
||||
let max_name = updated_to_versions
|
||||
.iter()
|
||||
.map(|(name, _, _)| name.len())
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
let max_old = updated_to_versions
|
||||
.iter()
|
||||
.map(|(_, maybe_current, _)| {
|
||||
maybe_current
|
||||
.as_ref()
|
||||
.map(|v| v.version.to_string().len())
|
||||
.unwrap_or(0)
|
||||
})
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
let max_new = updated_to_versions
|
||||
.iter()
|
||||
.map(|(_, _, new_version)| new_version.to_string().len())
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
|
||||
for (package_name, maybe_current_version, new_version) in
|
||||
updated_to_versions
|
||||
{
|
||||
let current_version = if let Some(current_version) = maybe_current_version
|
||||
{
|
||||
current_version.version.to_string()
|
||||
} else {
|
||||
"".to_string()
|
||||
};
|
||||
|
||||
log::info!(
|
||||
" - {}{} {}{} -> {}{}",
|
||||
format!(
|
||||
"{}{}",
|
||||
colors::gray(package_name[0..4].to_string()),
|
||||
package_name[4..].to_string()
|
||||
),
|
||||
" ".repeat(max_name - package_name.len()),
|
||||
" ".repeat(max_old - current_version.len()),
|
||||
colors::gray(¤t_version),
|
||||
" ".repeat(max_new - new_version.to_string().len()),
|
||||
colors::green(&new_version),
|
||||
);
|
||||
}
|
||||
} else {
|
||||
log::info!(
|
||||
"All {}dependencies are up to date.",
|
||||
if filter_set.is_empty() {
|
||||
""
|
||||
} else {
|
||||
"matching "
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn dep_manager_args(
|
||||
factory: &CliFactory,
|
||||
cli_options: &CliOptions,
|
||||
npm_fetch_resolver: Arc<NpmFetchResolver>,
|
||||
jsr_fetch_resolver: Arc<JsrFetchResolver>,
|
||||
) -> Result<DepManagerArgs, AnyError> {
|
||||
Ok(DepManagerArgs {
|
||||
module_load_preparer: factory.module_load_preparer().await?.clone(),
|
||||
jsr_fetch_resolver,
|
||||
npm_fetch_resolver,
|
||||
npm_resolver: factory.npm_resolver().await?.clone(),
|
||||
permissions_container: factory.root_permissions_container()?.clone(),
|
||||
main_module_graph_container: factory
|
||||
.main_module_graph_container()
|
||||
.await?
|
||||
.clone(),
|
||||
lockfile: cli_options.maybe_lockfile().cloned(),
|
||||
})
|
||||
}
|
||||
|
||||
mod filter {
|
||||
use deno_core::anyhow::anyhow;
|
||||
use deno_core::anyhow::Context;
|
||||
use deno_core::error::AnyError;
|
||||
use deno_semver::VersionReq;
|
||||
|
||||
enum FilterKind {
|
||||
Exclude,
|
||||
Include,
|
||||
}
|
||||
pub struct Filter {
|
||||
kind: FilterKind,
|
||||
regex: regex::Regex,
|
||||
version_spec: Option<VersionReq>,
|
||||
}
|
||||
|
||||
fn pattern_to_regex(pattern: &str) -> Result<regex::Regex, AnyError> {
|
||||
let escaped = regex::escape(pattern);
|
||||
let unescaped_star = escaped.replace(r"\*", ".*");
|
||||
Ok(regex::Regex::new(&format!("^{}$", unescaped_star))?)
|
||||
}
|
||||
|
||||
impl Filter {
|
||||
pub fn version_spec(&self) -> Option<&VersionReq> {
|
||||
self.version_spec.as_ref()
|
||||
}
|
||||
pub fn from_str(input: &str) -> Result<Self, AnyError> {
|
||||
let (kind, first_idx) = if input.starts_with('!') {
|
||||
(FilterKind::Exclude, 1)
|
||||
} else {
|
||||
(FilterKind::Include, 0)
|
||||
};
|
||||
let s = &input[first_idx..];
|
||||
let (pattern, version_spec) =
|
||||
if let Some(scope_name) = s.strip_prefix('@') {
|
||||
if let Some(idx) = scope_name.find('@') {
|
||||
let (pattern, version_spec) = s.split_at(idx + 1);
|
||||
(
|
||||
pattern,
|
||||
Some(
|
||||
VersionReq::parse_from_specifier(
|
||||
version_spec.trim_start_matches('@'),
|
||||
)
|
||||
.with_context(|| format!("Invalid filter \"{input}\""))?,
|
||||
),
|
||||
)
|
||||
} else {
|
||||
(s, None)
|
||||
}
|
||||
} else {
|
||||
let mut parts = s.split('@');
|
||||
let Some(pattern) = parts.next() else {
|
||||
return Err(anyhow!("Invalid filter \"{input}\""));
|
||||
};
|
||||
(
|
||||
pattern,
|
||||
parts
|
||||
.next()
|
||||
.map(VersionReq::parse_from_specifier)
|
||||
.transpose()
|
||||
.with_context(|| format!("Invalid filter \"{input}\""))?,
|
||||
)
|
||||
};
|
||||
|
||||
Ok(Filter {
|
||||
kind,
|
||||
regex: pattern_to_regex(pattern)
|
||||
.with_context(|| format!("Invalid filter \"{input}\""))?,
|
||||
version_spec,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn matches(&self, name: &str) -> bool {
|
||||
self.regex.is_match(name)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FilterSet {
|
||||
filters: Vec<Filter>,
|
||||
has_exclude: bool,
|
||||
has_include: bool,
|
||||
}
|
||||
impl FilterSet {
|
||||
pub fn from_filter_strings<'a>(
|
||||
filter_strings: impl IntoIterator<Item = &'a str>,
|
||||
) -> Result<Self, AnyError> {
|
||||
let filters = filter_strings
|
||||
.into_iter()
|
||||
.map(Filter::from_str)
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
let has_exclude = filters
|
||||
.iter()
|
||||
.any(|f| matches!(f.kind, FilterKind::Exclude));
|
||||
let has_include = filters
|
||||
.iter()
|
||||
.any(|f| matches!(f.kind, FilterKind::Include));
|
||||
Ok(FilterSet {
|
||||
filters,
|
||||
has_exclude,
|
||||
has_include,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.filters.is_empty()
|
||||
}
|
||||
|
||||
pub fn matches(&self, name: &str) -> bool {
|
||||
self.matching_filter(name).is_included()
|
||||
}
|
||||
|
||||
pub fn matching_filter(&self, name: &str) -> MatchResult<'_> {
|
||||
if self.filters.is_empty() {
|
||||
return MatchResult::Included;
|
||||
}
|
||||
let mut matched = None;
|
||||
for filter in &self.filters {
|
||||
match filter.kind {
|
||||
FilterKind::Include => {
|
||||
if matched.is_none() && filter.matches(name) {
|
||||
matched = Some(filter);
|
||||
}
|
||||
}
|
||||
FilterKind::Exclude => {
|
||||
if filter.matches(name) {
|
||||
return MatchResult::Excluded;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(filter) = matched {
|
||||
MatchResult::Matches(filter)
|
||||
} else if self.has_exclude && !self.has_include {
|
||||
MatchResult::Included
|
||||
} else {
|
||||
MatchResult::Excluded
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub enum MatchResult<'a> {
|
||||
Matches(&'a Filter),
|
||||
Included,
|
||||
Excluded,
|
||||
}
|
||||
|
||||
impl MatchResult<'_> {
|
||||
pub fn version_spec(&self) -> Option<&VersionReq> {
|
||||
match self {
|
||||
MatchResult::Matches(filter) => filter.version_spec(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
pub fn is_included(&self) -> bool {
|
||||
matches!(self, MatchResult::Included | MatchResult::Matches(_))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
fn matches_filters<'a, 'b>(
|
||||
filters: impl IntoIterator<Item = &'a str>,
|
||||
name: &str,
|
||||
) -> bool {
|
||||
let filters = super::FilterSet::from_filter_strings(filters).unwrap();
|
||||
filters.matches(name)
|
||||
}
|
||||
|
||||
fn version_spec(s: &str) -> deno_semver::VersionReq {
|
||||
deno_semver::VersionReq::parse_from_specifier(s).unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_glob() {
|
||||
assert!(matches_filters(["foo*"], "foo"));
|
||||
assert!(matches_filters(["foo*"], "foobar"));
|
||||
assert!(!matches_filters(["foo*"], "barfoo"));
|
||||
|
||||
assert!(matches_filters(["*foo"], "foo"));
|
||||
assert!(matches_filters(["*foo"], "barfoo"));
|
||||
assert!(!matches_filters(["*foo"], "foobar"));
|
||||
|
||||
assert!(matches_filters(["@scope/foo*"], "@scope/foobar"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_glob_with_version() {
|
||||
assert!(matches_filters(["foo*@1"], "foo",));
|
||||
assert!(matches_filters(["foo*@1"], "foobar",));
|
||||
assert!(matches_filters(["foo*@1"], "foo-bar",));
|
||||
assert!(!matches_filters(["foo*@1"], "barfoo",));
|
||||
assert!(matches_filters(["@scope/*@1"], "@scope/foo"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn glob_exclude() {
|
||||
assert!(!matches_filters(["!foo*"], "foo"));
|
||||
assert!(!matches_filters(["!foo*"], "foobar"));
|
||||
assert!(matches_filters(["!foo*"], "barfoo"));
|
||||
|
||||
assert!(!matches_filters(["!*foo"], "foo"));
|
||||
assert!(!matches_filters(["!*foo"], "barfoo"));
|
||||
assert!(matches_filters(["!*foo"], "foobar"));
|
||||
|
||||
assert!(!matches_filters(["!@scope/foo*"], "@scope/foobar"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multiple_globs() {
|
||||
assert!(matches_filters(["foo*", "bar*"], "foo"));
|
||||
assert!(matches_filters(["foo*", "bar*"], "bar"));
|
||||
assert!(!matches_filters(["foo*", "bar*"], "baz"));
|
||||
|
||||
assert!(matches_filters(["foo*", "!bar*"], "foo"));
|
||||
assert!(!matches_filters(["foo*", "!bar*"], "bar"));
|
||||
assert!(matches_filters(["foo*", "!bar*"], "foobar"));
|
||||
assert!(!matches_filters(["foo*", "!*bar"], "foobar"));
|
||||
assert!(!matches_filters(["foo*", "!*bar"], "baz"));
|
||||
|
||||
let filters =
|
||||
super::FilterSet::from_filter_strings(["foo*@1", "bar*@2"]).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
filters.matching_filter("foo").version_spec().cloned(),
|
||||
Some(version_spec("1"))
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
filters.matching_filter("bar").version_spec().cloned(),
|
||||
Some(version_spec("2"))
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -124,7 +124,8 @@ async fn run_with_watch(
|
|||
!watch_flags.no_clear_screen,
|
||||
),
|
||||
WatcherRestartMode::Automatic,
|
||||
move |flags, watcher_communicator, _changed_paths| {
|
||||
move |flags, watcher_communicator, changed_paths| {
|
||||
watcher_communicator.show_path_changed(changed_paths.clone());
|
||||
Ok(async move {
|
||||
let factory = CliFactory::from_flags_for_watcher(
|
||||
flags,
|
||||
|
|
|
@ -151,7 +151,8 @@ async fn serve_with_watch(
|
|||
!watch_flags.no_clear_screen,
|
||||
),
|
||||
WatcherRestartMode::Automatic,
|
||||
move |flags, watcher_communicator, _changed_paths| {
|
||||
move |flags, watcher_communicator, changed_paths| {
|
||||
watcher_communicator.show_path_changed(changed_paths.clone());
|
||||
Ok(async move {
|
||||
let factory = CliFactory::from_flags_for_watcher(
|
||||
flags,
|
||||
|
|
|
@ -1,23 +1,33 @@
|
|||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::num::NonZeroUsize;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
|
||||
use deno_config::deno_json::Task;
|
||||
use deno_config::workspace::FolderConfigs;
|
||||
use deno_config::workspace::TaskDefinition;
|
||||
use deno_config::workspace::TaskOrScript;
|
||||
use deno_config::workspace::WorkspaceDirectory;
|
||||
use deno_config::workspace::WorkspaceMemberTasksConfig;
|
||||
use deno_config::workspace::WorkspaceTasksConfig;
|
||||
use deno_core::anyhow::anyhow;
|
||||
use deno_core::anyhow::bail;
|
||||
use deno_core::anyhow::Context;
|
||||
use deno_core::error::AnyError;
|
||||
use deno_core::futures::future::LocalBoxFuture;
|
||||
use deno_core::futures::stream::futures_unordered;
|
||||
use deno_core::futures::FutureExt;
|
||||
use deno_core::futures::StreamExt;
|
||||
use deno_core::url::Url;
|
||||
use deno_path_util::normalize_path;
|
||||
use deno_runtime::deno_node::NodeResolver;
|
||||
use deno_task_shell::ShellCommand;
|
||||
use indexmap::IndexMap;
|
||||
use regex::Regex;
|
||||
|
||||
use crate::args::CliOptions;
|
||||
use crate::args::Flags;
|
||||
|
@ -28,6 +38,12 @@ use crate::npm::CliNpmResolver;
|
|||
use crate::task_runner;
|
||||
use crate::util::fs::canonicalize_path;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct PackageTaskInfo {
|
||||
matched_tasks: Vec<String>,
|
||||
tasks_config: WorkspaceTasksConfig,
|
||||
}
|
||||
|
||||
pub async fn execute_script(
|
||||
flags: Arc<Flags>,
|
||||
task_flags: TaskFlags,
|
||||
|
@ -35,7 +51,7 @@ pub async fn execute_script(
|
|||
let factory = CliFactory::from_flags(flags);
|
||||
let cli_options = factory.cli_options()?;
|
||||
let start_dir = &cli_options.start_dir;
|
||||
if !start_dir.has_deno_or_pkg_json() {
|
||||
if !start_dir.has_deno_or_pkg_json() && !task_flags.eval {
|
||||
bail!("deno task couldn't find deno.json(c). See https://docs.deno.com/go/config")
|
||||
}
|
||||
let force_use_pkg_json =
|
||||
|
@ -48,155 +64,563 @@ pub async fn execute_script(
|
|||
v == "1"
|
||||
})
|
||||
.unwrap_or(false);
|
||||
let tasks_config = start_dir.to_tasks_config()?;
|
||||
let tasks_config = if force_use_pkg_json {
|
||||
tasks_config.with_only_pkg_json()
|
||||
} else {
|
||||
tasks_config
|
||||
};
|
||||
|
||||
let task_name = match &task_flags.task {
|
||||
Some(task) => task,
|
||||
None => {
|
||||
fn arg_to_regex(input: &str) -> Result<regex::Regex, regex::Error> {
|
||||
let mut regex_str = regex::escape(input);
|
||||
regex_str = regex_str.replace("\\*", ".*");
|
||||
|
||||
Regex::new(®ex_str)
|
||||
}
|
||||
|
||||
let packages_task_configs: Vec<PackageTaskInfo> = if let Some(filter) =
|
||||
&task_flags.filter
|
||||
{
|
||||
let task_name = task_flags.task.as_ref().unwrap();
|
||||
|
||||
// Filter based on package name
|
||||
let package_regex = arg_to_regex(filter)?;
|
||||
let task_regex = arg_to_regex(task_name)?;
|
||||
|
||||
let mut packages_task_info: Vec<PackageTaskInfo> = vec![];
|
||||
|
||||
fn matches_package(
|
||||
config: &FolderConfigs,
|
||||
force_use_pkg_json: bool,
|
||||
regex: &Regex,
|
||||
) -> bool {
|
||||
if !force_use_pkg_json {
|
||||
if let Some(deno_json) = &config.deno_json {
|
||||
if let Some(name) = &deno_json.json.name {
|
||||
if regex.is_match(name) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(package_json) = &config.pkg_json {
|
||||
if let Some(name) = &package_json.name {
|
||||
if regex.is_match(name) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
let workspace = cli_options.workspace();
|
||||
for folder in workspace.config_folders() {
|
||||
if !matches_package(folder.1, force_use_pkg_json, &package_regex) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let member_dir = workspace.resolve_member_dir(folder.0);
|
||||
let mut tasks_config = member_dir.to_tasks_config()?;
|
||||
if force_use_pkg_json {
|
||||
tasks_config = tasks_config.with_only_pkg_json();
|
||||
}
|
||||
|
||||
// Any of the matched tasks could be a child task of another matched
|
||||
// one. Therefore we need to filter these out to ensure that every
|
||||
// task is only run once.
|
||||
let mut matched: HashSet<String> = HashSet::new();
|
||||
let mut visited: HashSet<String> = HashSet::new();
|
||||
|
||||
fn visit_task(
|
||||
tasks_config: &WorkspaceTasksConfig,
|
||||
visited: &mut HashSet<String>,
|
||||
name: &str,
|
||||
) {
|
||||
if visited.contains(name) {
|
||||
return;
|
||||
}
|
||||
|
||||
visited.insert(name.to_string());
|
||||
|
||||
if let Some((_, TaskOrScript::Task(_, task))) = &tasks_config.task(name)
|
||||
{
|
||||
for dep in &task.dependencies {
|
||||
visit_task(tasks_config, visited, dep);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Match tasks in deno.json
|
||||
for name in tasks_config.task_names() {
|
||||
if task_regex.is_match(name) && !visited.contains(name) {
|
||||
matched.insert(name.to_string());
|
||||
visit_task(&tasks_config, &mut visited, name);
|
||||
}
|
||||
}
|
||||
|
||||
packages_task_info.push(PackageTaskInfo {
|
||||
matched_tasks: matched
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
tasks_config,
|
||||
});
|
||||
}
|
||||
|
||||
// Logging every task definition would be too spammy. Pnpm only
|
||||
// logs a simple message too.
|
||||
if packages_task_info
|
||||
.iter()
|
||||
.all(|config| config.matched_tasks.is_empty())
|
||||
{
|
||||
log::warn!(
|
||||
"{}",
|
||||
colors::red(format!(
|
||||
"No matching task or script '{}' found in selected packages.",
|
||||
task_name
|
||||
))
|
||||
);
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
// FIXME: Sort packages topologically
|
||||
//
|
||||
|
||||
packages_task_info
|
||||
} else {
|
||||
let mut tasks_config = start_dir.to_tasks_config()?;
|
||||
|
||||
if force_use_pkg_json {
|
||||
tasks_config = tasks_config.with_only_pkg_json()
|
||||
}
|
||||
|
||||
let Some(task_name) = &task_flags.task else {
|
||||
print_available_tasks(
|
||||
&mut std::io::stdout(),
|
||||
&cli_options.start_dir,
|
||||
&tasks_config,
|
||||
)?;
|
||||
return Ok(0);
|
||||
}
|
||||
};
|
||||
|
||||
vec![PackageTaskInfo {
|
||||
tasks_config,
|
||||
matched_tasks: vec![task_name.to_string()],
|
||||
}]
|
||||
};
|
||||
|
||||
let npm_resolver = factory.npm_resolver().await?;
|
||||
let node_resolver = factory.node_resolver().await?;
|
||||
let env_vars = task_runner::real_env_vars();
|
||||
|
||||
match tasks_config.task(task_name) {
|
||||
Some((dir_url, task_or_script)) => match task_or_script {
|
||||
TaskOrScript::Task(_tasks, script) => {
|
||||
let cwd = match task_flags.cwd {
|
||||
Some(path) => canonicalize_path(&PathBuf::from(path))
|
||||
.context("failed canonicalizing --cwd")?,
|
||||
None => normalize_path(dir_url.to_file_path().unwrap()),
|
||||
};
|
||||
let no_of_concurrent_tasks = if let Ok(value) = std::env::var("DENO_JOBS") {
|
||||
value.parse::<NonZeroUsize>().ok()
|
||||
} else {
|
||||
std::thread::available_parallelism().ok()
|
||||
}
|
||||
.unwrap_or_else(|| NonZeroUsize::new(2).unwrap());
|
||||
|
||||
let custom_commands = task_runner::resolve_custom_commands(
|
||||
npm_resolver.as_ref(),
|
||||
node_resolver,
|
||||
)?;
|
||||
run_task(RunTaskOptions {
|
||||
task_name,
|
||||
script,
|
||||
cwd: &cwd,
|
||||
env_vars,
|
||||
custom_commands,
|
||||
npm_resolver: npm_resolver.as_ref(),
|
||||
cli_options,
|
||||
})
|
||||
.await
|
||||
}
|
||||
TaskOrScript::Script(scripts, _script) => {
|
||||
// ensure the npm packages are installed if using a managed resolver
|
||||
if let Some(npm_resolver) = npm_resolver.as_managed() {
|
||||
npm_resolver.ensure_top_level_package_json_install().await?;
|
||||
}
|
||||
let task_runner = TaskRunner {
|
||||
task_flags: &task_flags,
|
||||
npm_resolver: npm_resolver.as_ref(),
|
||||
node_resolver: node_resolver.as_ref(),
|
||||
env_vars,
|
||||
cli_options,
|
||||
concurrency: no_of_concurrent_tasks.into(),
|
||||
};
|
||||
|
||||
let cwd = match task_flags.cwd {
|
||||
Some(path) => canonicalize_path(&PathBuf::from(path))?,
|
||||
None => normalize_path(dir_url.to_file_path().unwrap()),
|
||||
};
|
||||
if task_flags.eval {
|
||||
return task_runner
|
||||
.run_deno_task(
|
||||
&Url::from_directory_path(cli_options.initial_cwd()).unwrap(),
|
||||
"",
|
||||
&TaskDefinition {
|
||||
command: task_flags.task.as_ref().unwrap().to_string(),
|
||||
dependencies: vec![],
|
||||
description: None,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
// At this point we already checked if the task name exists in package.json.
|
||||
// We can therefore check for "pre" and "post" scripts too, since we're only
|
||||
// dealing with package.json here and not deno.json
|
||||
let task_names = vec![
|
||||
format!("pre{}", task_name),
|
||||
task_name.clone(),
|
||||
format!("post{}", task_name),
|
||||
];
|
||||
let custom_commands = task_runner::resolve_custom_commands(
|
||||
npm_resolver.as_ref(),
|
||||
node_resolver,
|
||||
)?;
|
||||
for task_name in &task_names {
|
||||
if let Some(script) = scripts.get(task_name) {
|
||||
let exit_code = run_task(RunTaskOptions {
|
||||
task_name,
|
||||
script,
|
||||
cwd: &cwd,
|
||||
env_vars: env_vars.clone(),
|
||||
custom_commands: custom_commands.clone(),
|
||||
npm_resolver: npm_resolver.as_ref(),
|
||||
cli_options,
|
||||
})
|
||||
.await?;
|
||||
if exit_code > 0 {
|
||||
return Ok(exit_code);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
},
|
||||
None => {
|
||||
if task_flags.is_run {
|
||||
return Err(anyhow!("Task not found: {}", task_name));
|
||||
}
|
||||
log::error!("Task not found: {}", task_name);
|
||||
if log::log_enabled!(log::Level::Error) {
|
||||
print_available_tasks(
|
||||
&mut std::io::stderr(),
|
||||
&cli_options.start_dir,
|
||||
&tasks_config,
|
||||
)?;
|
||||
}
|
||||
Ok(1)
|
||||
for task_config in &packages_task_configs {
|
||||
let exit_code = task_runner.run_tasks(task_config).await?;
|
||||
if exit_code > 0 {
|
||||
return Ok(exit_code);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
struct RunTaskOptions<'a> {
|
||||
struct RunSingleOptions<'a> {
|
||||
task_name: &'a str,
|
||||
script: &'a str,
|
||||
cwd: &'a Path,
|
||||
env_vars: HashMap<String, String>,
|
||||
custom_commands: HashMap<String, Rc<dyn ShellCommand>>,
|
||||
npm_resolver: &'a dyn CliNpmResolver,
|
||||
cli_options: &'a CliOptions,
|
||||
}
|
||||
|
||||
async fn run_task(opts: RunTaskOptions<'_>) -> Result<i32, AnyError> {
|
||||
let RunTaskOptions {
|
||||
task_name,
|
||||
script,
|
||||
cwd,
|
||||
env_vars,
|
||||
custom_commands,
|
||||
npm_resolver,
|
||||
cli_options,
|
||||
} = opts;
|
||||
struct TaskRunner<'a> {
|
||||
task_flags: &'a TaskFlags,
|
||||
npm_resolver: &'a dyn CliNpmResolver,
|
||||
node_resolver: &'a NodeResolver,
|
||||
env_vars: HashMap<String, String>,
|
||||
cli_options: &'a CliOptions,
|
||||
concurrency: usize,
|
||||
}
|
||||
|
||||
output_task(
|
||||
opts.task_name,
|
||||
&task_runner::get_script_with_args(script, cli_options.argv()),
|
||||
);
|
||||
impl<'a> TaskRunner<'a> {
|
||||
pub async fn run_tasks(
|
||||
&self,
|
||||
pkg_tasks_config: &PackageTaskInfo,
|
||||
) -> Result<i32, deno_core::anyhow::Error> {
|
||||
match sort_tasks_topo(pkg_tasks_config) {
|
||||
Ok(sorted) => self.run_tasks_in_parallel(sorted).await,
|
||||
Err(err) => match err {
|
||||
TaskError::NotFound(name) => {
|
||||
if self.task_flags.is_run {
|
||||
return Err(anyhow!("Task not found: {}", name));
|
||||
}
|
||||
|
||||
Ok(
|
||||
task_runner::run_task(task_runner::RunTaskOptions {
|
||||
log::error!("Task not found: {}", name);
|
||||
if log::log_enabled!(log::Level::Error) {
|
||||
self.print_available_tasks(&pkg_tasks_config.tasks_config)?;
|
||||
}
|
||||
Ok(1)
|
||||
}
|
||||
TaskError::TaskDepCycle { path } => {
|
||||
log::error!("Task cycle detected: {}", path.join(" -> "));
|
||||
Ok(1)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print_available_tasks(
|
||||
&self,
|
||||
tasks_config: &WorkspaceTasksConfig,
|
||||
) -> Result<(), std::io::Error> {
|
||||
print_available_tasks(
|
||||
&mut std::io::stderr(),
|
||||
&self.cli_options.start_dir,
|
||||
tasks_config,
|
||||
)
|
||||
}
|
||||
|
||||
async fn run_tasks_in_parallel(
|
||||
&self,
|
||||
tasks: Vec<ResolvedTask<'a>>,
|
||||
) -> Result<i32, deno_core::anyhow::Error> {
|
||||
struct PendingTasksContext<'a> {
|
||||
completed: HashSet<usize>,
|
||||
running: HashSet<usize>,
|
||||
tasks: &'a [ResolvedTask<'a>],
|
||||
}
|
||||
|
||||
impl<'a> PendingTasksContext<'a> {
|
||||
fn has_remaining_tasks(&self) -> bool {
|
||||
self.completed.len() < self.tasks.len()
|
||||
}
|
||||
|
||||
fn mark_complete(&mut self, task: &ResolvedTask) {
|
||||
self.running.remove(&task.id);
|
||||
self.completed.insert(task.id);
|
||||
}
|
||||
|
||||
fn get_next_task<'b>(
|
||||
&mut self,
|
||||
runner: &'b TaskRunner<'b>,
|
||||
) -> Option<
|
||||
LocalBoxFuture<'b, Result<(i32, &'a ResolvedTask<'a>), AnyError>>,
|
||||
>
|
||||
where
|
||||
'a: 'b,
|
||||
{
|
||||
for task in self.tasks.iter() {
|
||||
if self.completed.contains(&task.id)
|
||||
|| self.running.contains(&task.id)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
let should_run = task
|
||||
.dependencies
|
||||
.iter()
|
||||
.all(|dep_id| self.completed.contains(dep_id));
|
||||
if !should_run {
|
||||
continue;
|
||||
}
|
||||
|
||||
self.running.insert(task.id);
|
||||
return Some(
|
||||
async move {
|
||||
match task.task_or_script {
|
||||
TaskOrScript::Task(_, def) => {
|
||||
runner.run_deno_task(task.folder_url, task.name, def).await
|
||||
}
|
||||
TaskOrScript::Script(scripts, _) => {
|
||||
runner
|
||||
.run_npm_script(task.folder_url, task.name, scripts)
|
||||
.await
|
||||
}
|
||||
}
|
||||
.map(|exit_code| (exit_code, task))
|
||||
}
|
||||
.boxed_local(),
|
||||
);
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
let mut context = PendingTasksContext {
|
||||
completed: HashSet::with_capacity(tasks.len()),
|
||||
running: HashSet::with_capacity(self.concurrency),
|
||||
tasks: &tasks,
|
||||
};
|
||||
|
||||
let mut queue = futures_unordered::FuturesUnordered::new();
|
||||
|
||||
while context.has_remaining_tasks() {
|
||||
while queue.len() < self.concurrency {
|
||||
if let Some(task) = context.get_next_task(self) {
|
||||
queue.push(task);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// If queue is empty at this point, then there are no more tasks in the queue.
|
||||
let Some(result) = queue.next().await else {
|
||||
debug_assert_eq!(context.tasks.len(), 0);
|
||||
break;
|
||||
};
|
||||
|
||||
let (exit_code, name) = result?;
|
||||
if exit_code > 0 {
|
||||
return Ok(exit_code);
|
||||
}
|
||||
|
||||
context.mark_complete(name);
|
||||
}
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
pub async fn run_deno_task(
|
||||
&self,
|
||||
dir_url: &Url,
|
||||
task_name: &str,
|
||||
definition: &TaskDefinition,
|
||||
) -> Result<i32, deno_core::anyhow::Error> {
|
||||
let cwd = match &self.task_flags.cwd {
|
||||
Some(path) => canonicalize_path(&PathBuf::from(path))
|
||||
.context("failed canonicalizing --cwd")?,
|
||||
None => normalize_path(dir_url.to_file_path().unwrap()),
|
||||
};
|
||||
|
||||
let custom_commands = task_runner::resolve_custom_commands(
|
||||
self.npm_resolver,
|
||||
self.node_resolver,
|
||||
)?;
|
||||
self
|
||||
.run_single(RunSingleOptions {
|
||||
task_name,
|
||||
script: &definition.command,
|
||||
cwd: &cwd,
|
||||
custom_commands,
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn run_npm_script(
|
||||
&self,
|
||||
dir_url: &Url,
|
||||
task_name: &str,
|
||||
scripts: &IndexMap<String, String>,
|
||||
) -> Result<i32, deno_core::anyhow::Error> {
|
||||
// ensure the npm packages are installed if using a managed resolver
|
||||
if let Some(npm_resolver) = self.npm_resolver.as_managed() {
|
||||
npm_resolver.ensure_top_level_package_json_install().await?;
|
||||
}
|
||||
|
||||
let cwd = match &self.task_flags.cwd {
|
||||
Some(path) => canonicalize_path(&PathBuf::from(path))?,
|
||||
None => normalize_path(dir_url.to_file_path().unwrap()),
|
||||
};
|
||||
|
||||
// At this point we already checked if the task name exists in package.json.
|
||||
// We can therefore check for "pre" and "post" scripts too, since we're only
|
||||
// dealing with package.json here and not deno.json
|
||||
let task_names = vec![
|
||||
format!("pre{}", task_name),
|
||||
task_name.to_string(),
|
||||
format!("post{}", task_name),
|
||||
];
|
||||
let custom_commands = task_runner::resolve_custom_commands(
|
||||
self.npm_resolver,
|
||||
self.node_resolver,
|
||||
)?;
|
||||
for task_name in &task_names {
|
||||
if let Some(script) = scripts.get(task_name) {
|
||||
let exit_code = self
|
||||
.run_single(RunSingleOptions {
|
||||
task_name,
|
||||
script,
|
||||
cwd: &cwd,
|
||||
custom_commands: custom_commands.clone(),
|
||||
})
|
||||
.await?;
|
||||
if exit_code > 0 {
|
||||
return Ok(exit_code);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
async fn run_single(
|
||||
&self,
|
||||
opts: RunSingleOptions<'_>,
|
||||
) -> Result<i32, AnyError> {
|
||||
let RunSingleOptions {
|
||||
task_name,
|
||||
script,
|
||||
cwd,
|
||||
env_vars,
|
||||
custom_commands,
|
||||
init_cwd: opts.cli_options.initial_cwd(),
|
||||
argv: cli_options.argv(),
|
||||
root_node_modules_dir: npm_resolver.root_node_modules_path(),
|
||||
stdio: None,
|
||||
})
|
||||
.await?
|
||||
.exit_code,
|
||||
)
|
||||
} = opts;
|
||||
|
||||
output_task(
|
||||
opts.task_name,
|
||||
&task_runner::get_script_with_args(script, self.cli_options.argv()),
|
||||
);
|
||||
|
||||
Ok(
|
||||
task_runner::run_task(task_runner::RunTaskOptions {
|
||||
task_name,
|
||||
script,
|
||||
cwd,
|
||||
env_vars: self.env_vars.clone(),
|
||||
custom_commands,
|
||||
init_cwd: self.cli_options.initial_cwd(),
|
||||
argv: self.cli_options.argv(),
|
||||
root_node_modules_dir: self.npm_resolver.root_node_modules_path(),
|
||||
stdio: None,
|
||||
})
|
||||
.await?
|
||||
.exit_code,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum TaskError {
|
||||
NotFound(String),
|
||||
TaskDepCycle { path: Vec<String> },
|
||||
}
|
||||
|
||||
struct ResolvedTask<'a> {
|
||||
id: usize,
|
||||
name: &'a str,
|
||||
folder_url: &'a Url,
|
||||
task_or_script: TaskOrScript<'a>,
|
||||
dependencies: Vec<usize>,
|
||||
}
|
||||
|
||||
fn sort_tasks_topo<'a>(
|
||||
pkg_task_config: &'a PackageTaskInfo,
|
||||
) -> Result<Vec<ResolvedTask<'a>>, TaskError> {
|
||||
trait TasksConfig {
|
||||
fn task(
|
||||
&self,
|
||||
name: &str,
|
||||
) -> Option<(&Url, TaskOrScript, &dyn TasksConfig)>;
|
||||
}
|
||||
|
||||
impl TasksConfig for WorkspaceTasksConfig {
|
||||
fn task(
|
||||
&self,
|
||||
name: &str,
|
||||
) -> Option<(&Url, TaskOrScript, &dyn TasksConfig)> {
|
||||
if let Some(member) = &self.member {
|
||||
if let Some((dir_url, task_or_script)) = member.task(name) {
|
||||
return Some((dir_url, task_or_script, self as &dyn TasksConfig));
|
||||
}
|
||||
}
|
||||
if let Some(root) = &self.root {
|
||||
if let Some((dir_url, task_or_script)) = root.task(name) {
|
||||
// switch to only using the root tasks for the dependencies
|
||||
return Some((dir_url, task_or_script, root as &dyn TasksConfig));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl TasksConfig for WorkspaceMemberTasksConfig {
|
||||
fn task(
|
||||
&self,
|
||||
name: &str,
|
||||
) -> Option<(&Url, TaskOrScript, &dyn TasksConfig)> {
|
||||
self.task(name).map(|(dir_url, task_or_script)| {
|
||||
(dir_url, task_or_script, self as &dyn TasksConfig)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn sort_visit<'a>(
|
||||
name: &'a str,
|
||||
sorted: &mut Vec<ResolvedTask<'a>>,
|
||||
mut path: Vec<(&'a Url, &'a str)>,
|
||||
tasks_config: &'a dyn TasksConfig,
|
||||
) -> Result<usize, TaskError> {
|
||||
let Some((folder_url, task_or_script, tasks_config)) =
|
||||
tasks_config.task(name)
|
||||
else {
|
||||
return Err(TaskError::NotFound(name.to_string()));
|
||||
};
|
||||
|
||||
if let Some(existing_task) = sorted
|
||||
.iter()
|
||||
.find(|task| task.name == name && task.folder_url == folder_url)
|
||||
{
|
||||
// already exists
|
||||
return Ok(existing_task.id);
|
||||
}
|
||||
|
||||
if path.contains(&(folder_url, name)) {
|
||||
path.push((folder_url, name));
|
||||
return Err(TaskError::TaskDepCycle {
|
||||
path: path.iter().map(|(_, s)| s.to_string()).collect(),
|
||||
});
|
||||
}
|
||||
|
||||
let mut dependencies: Vec<usize> = Vec::new();
|
||||
if let TaskOrScript::Task(_, task) = task_or_script {
|
||||
dependencies.reserve(task.dependencies.len());
|
||||
for dep in &task.dependencies {
|
||||
let mut path = path.clone();
|
||||
path.push((folder_url, name));
|
||||
dependencies.push(sort_visit(dep, sorted, path, tasks_config)?);
|
||||
}
|
||||
}
|
||||
|
||||
let id = sorted.len();
|
||||
sorted.push(ResolvedTask {
|
||||
id,
|
||||
name,
|
||||
folder_url,
|
||||
task_or_script,
|
||||
dependencies,
|
||||
});
|
||||
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
let mut sorted: Vec<ResolvedTask<'a>> = vec![];
|
||||
|
||||
for name in &pkg_task_config.matched_tasks {
|
||||
sort_visit(name, &mut sorted, Vec::new(), &pkg_task_config.tasks_config)?;
|
||||
}
|
||||
|
||||
Ok(sorted)
|
||||
}
|
||||
|
||||
fn output_task(task_name: &str, script: &str) {
|
||||
|
@ -222,79 +646,96 @@ fn print_available_tasks(
|
|||
" {}",
|
||||
colors::red("No tasks found in configuration file")
|
||||
)?;
|
||||
} else {
|
||||
let mut seen_task_names =
|
||||
HashSet::with_capacity(tasks_config.tasks_count());
|
||||
for maybe_config in [&tasks_config.member, &tasks_config.root] {
|
||||
let Some(config) = maybe_config else {
|
||||
continue;
|
||||
};
|
||||
for (is_root, is_deno, (key, task)) in config
|
||||
.deno_json
|
||||
.as_ref()
|
||||
.map(|config| {
|
||||
let is_root = !is_cwd_root_dir
|
||||
&& config.folder_url
|
||||
== *workspace_dir.workspace.root_dir().as_ref();
|
||||
config
|
||||
.tasks
|
||||
.iter()
|
||||
.map(move |(k, t)| (is_root, true, (k, Cow::Borrowed(t))))
|
||||
})
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.chain(
|
||||
config
|
||||
.package_json
|
||||
.as_ref()
|
||||
.map(|config| {
|
||||
let is_root = !is_cwd_root_dir
|
||||
&& config.folder_url
|
||||
== *workspace_dir.workspace.root_dir().as_ref();
|
||||
config.tasks.iter().map(move |(k, v)| {
|
||||
(is_root, false, (k, Cow::Owned(Task::Definition(v.clone()))))
|
||||
})
|
||||
})
|
||||
.into_iter()
|
||||
.flatten(),
|
||||
)
|
||||
{
|
||||
if !seen_task_names.insert(key) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
struct AvailableTaskDescription {
|
||||
is_root: bool,
|
||||
is_deno: bool,
|
||||
name: String,
|
||||
task: TaskDefinition,
|
||||
}
|
||||
let mut seen_task_names = HashSet::with_capacity(tasks_config.tasks_count());
|
||||
let mut task_descriptions = Vec::with_capacity(tasks_config.tasks_count());
|
||||
|
||||
for maybe_config in [&tasks_config.member, &tasks_config.root] {
|
||||
let Some(config) = maybe_config else {
|
||||
continue;
|
||||
};
|
||||
|
||||
if let Some(config) = config.deno_json.as_ref() {
|
||||
let is_root = !is_cwd_root_dir
|
||||
&& config.folder_url == *workspace_dir.workspace.root_dir().as_ref();
|
||||
|
||||
for (name, definition) in &config.tasks {
|
||||
if !seen_task_names.insert(name) {
|
||||
continue; // already seen
|
||||
}
|
||||
writeln!(
|
||||
writer,
|
||||
"- {}{}",
|
||||
colors::cyan(key),
|
||||
if is_root {
|
||||
if is_deno {
|
||||
format!(" {}", colors::italic_gray("(workspace)"))
|
||||
} else {
|
||||
format!(" {}", colors::italic_gray("(workspace package.json)"))
|
||||
}
|
||||
} else if is_deno {
|
||||
"".to_string()
|
||||
} else {
|
||||
format!(" {}", colors::italic_gray("(package.json)"))
|
||||
}
|
||||
)?;
|
||||
let definition = match task.as_ref() {
|
||||
Task::Definition(definition) => definition,
|
||||
Task::Commented { definition, .. } => definition,
|
||||
};
|
||||
if let Task::Commented { comments, .. } = task.as_ref() {
|
||||
let slash_slash = colors::italic_gray("//");
|
||||
for comment in comments {
|
||||
writeln!(
|
||||
writer,
|
||||
" {slash_slash} {}",
|
||||
colors::italic_gray(comment)
|
||||
)?;
|
||||
}
|
||||
}
|
||||
writeln!(writer, " {definition}")?;
|
||||
task_descriptions.push(AvailableTaskDescription {
|
||||
is_root,
|
||||
is_deno: true,
|
||||
name: name.to_string(),
|
||||
task: definition.clone(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(config) = config.package_json.as_ref() {
|
||||
let is_root = !is_cwd_root_dir
|
||||
&& config.folder_url == *workspace_dir.workspace.root_dir().as_ref();
|
||||
for (name, script) in &config.tasks {
|
||||
if !seen_task_names.insert(name) {
|
||||
continue; // already seen
|
||||
}
|
||||
|
||||
task_descriptions.push(AvailableTaskDescription {
|
||||
is_root,
|
||||
is_deno: false,
|
||||
name: name.to_string(),
|
||||
task: deno_config::deno_json::TaskDefinition {
|
||||
command: script.to_string(),
|
||||
dependencies: vec![],
|
||||
description: None,
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for desc in task_descriptions {
|
||||
writeln!(
|
||||
writer,
|
||||
"- {}{}",
|
||||
colors::cyan(desc.name),
|
||||
if desc.is_root {
|
||||
if desc.is_deno {
|
||||
format!(" {}", colors::italic_gray("(workspace)"))
|
||||
} else {
|
||||
format!(" {}", colors::italic_gray("(workspace package.json)"))
|
||||
}
|
||||
} else if desc.is_deno {
|
||||
"".to_string()
|
||||
} else {
|
||||
format!(" {}", colors::italic_gray("(package.json)"))
|
||||
}
|
||||
)?;
|
||||
if let Some(description) = &desc.task.description {
|
||||
let slash_slash = colors::italic_gray("//");
|
||||
writeln!(
|
||||
writer,
|
||||
" {slash_slash} {}",
|
||||
colors::italic_gray(description)
|
||||
)?;
|
||||
}
|
||||
writeln!(writer, " {}", desc.task.command)?;
|
||||
if !desc.task.dependencies.is_empty() {
|
||||
writeln!(
|
||||
writer,
|
||||
" {} {}",
|
||||
colors::gray("depends on:"),
|
||||
colors::cyan(desc.task.dependencies.join(", "))
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
|
|
@ -1661,6 +1661,7 @@ pub async fn run_tests_with_watch(
|
|||
),
|
||||
move |flags, watcher_communicator, changed_paths| {
|
||||
let test_flags = test_flags.clone();
|
||||
watcher_communicator.show_path_changed(changed_paths.clone());
|
||||
Ok(async move {
|
||||
let factory = CliFactory::from_flags_for_watcher(
|
||||
flags,
|
||||
|
|
|
@ -450,6 +450,12 @@ delete Object.prototype.__proto__;
|
|||
// We specify the resolution mode to be CommonJS for some npm files and this
|
||||
// diagnostic gets generated even though we're using custom module resolution.
|
||||
1452,
|
||||
// Module '...' cannot be imported using this construct. The specifier only resolves to an
|
||||
// ES module, which cannot be imported with 'require'.
|
||||
1471,
|
||||
// TS1479: The current file is a CommonJS module whose imports will produce 'require' calls;
|
||||
// however, the referenced file is an ECMAScript module and cannot be imported with 'require'.
|
||||
1479,
|
||||
// TS2306: File '.../index.d.ts' is not a module.
|
||||
// We get this for `x-typescript-types` declaration files which don't export
|
||||
// anything. We prefer to treat these as modules with no exports.
|
||||
|
|
6
cli/tsc/dts/lib.deno.ns.d.ts
vendored
6
cli/tsc/dts/lib.deno.ns.d.ts
vendored
|
@ -4535,7 +4535,7 @@ declare namespace Deno {
|
|||
/** The object that is returned from a {@linkcode Deno.upgradeWebSocket}
|
||||
* request.
|
||||
*
|
||||
* @category Web Sockets */
|
||||
* @category WebSockets */
|
||||
export interface WebSocketUpgrade {
|
||||
/** The response object that represents the HTTP response to the client,
|
||||
* which should be used to the {@linkcode RequestEvent} `.respondWith()` for
|
||||
|
@ -4549,7 +4549,7 @@ declare namespace Deno {
|
|||
/** Options which can be set when performing a
|
||||
* {@linkcode Deno.upgradeWebSocket} upgrade of a {@linkcode Request}
|
||||
*
|
||||
* @category Web Sockets */
|
||||
* @category WebSockets */
|
||||
export interface UpgradeWebSocketOptions {
|
||||
/** Sets the `.protocol` property on the client side web socket to the
|
||||
* value provided here, which should be one of the strings specified in the
|
||||
|
@ -4597,7 +4597,7 @@ declare namespace Deno {
|
|||
* This operation does not yet consume the request or open the websocket. This
|
||||
* only happens once the returned response has been passed to `respondWith()`.
|
||||
*
|
||||
* @category Web Sockets
|
||||
* @category WebSockets
|
||||
*/
|
||||
export function upgradeWebSocket(
|
||||
request: Request,
|
||||
|
|
58
cli/tsc/dts/lib.deno.shared_globals.d.ts
vendored
58
cli/tsc/dts/lib.deno.shared_globals.d.ts
vendored
|
@ -15,14 +15,14 @@
|
|||
/// <reference lib="deno.crypto" />
|
||||
/// <reference lib="deno.ns" />
|
||||
|
||||
/** @category WASM */
|
||||
/** @category Wasm */
|
||||
declare namespace WebAssembly {
|
||||
/**
|
||||
* The `WebAssembly.CompileError` object indicates an error during WebAssembly decoding or validation.
|
||||
*
|
||||
* [MDN](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/CompileError)
|
||||
*
|
||||
* @category WASM
|
||||
* @category Wasm
|
||||
*/
|
||||
export class CompileError extends Error {
|
||||
/** Creates a new `WebAssembly.CompileError` object. */
|
||||
|
@ -36,7 +36,7 @@ declare namespace WebAssembly {
|
|||
*
|
||||
* [MDN](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Global)
|
||||
*
|
||||
* @category WASM
|
||||
* @category Wasm
|
||||
*/
|
||||
export class Global {
|
||||
/** Creates a new `Global` object. */
|
||||
|
@ -59,7 +59,7 @@ declare namespace WebAssembly {
|
|||
*
|
||||
* [MDN](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Instance)
|
||||
*
|
||||
* @category WASM
|
||||
* @category Wasm
|
||||
*/
|
||||
export class Instance {
|
||||
/** Creates a new Instance object. */
|
||||
|
@ -79,7 +79,7 @@ declare namespace WebAssembly {
|
|||
*
|
||||
* [MDN](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/LinkError)
|
||||
*
|
||||
* @category WASM
|
||||
* @category Wasm
|
||||
*/
|
||||
export class LinkError extends Error {
|
||||
/** Creates a new WebAssembly.LinkError object. */
|
||||
|
@ -95,7 +95,7 @@ declare namespace WebAssembly {
|
|||
*
|
||||
* [MDN](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Memory)
|
||||
*
|
||||
* @category WASM
|
||||
* @category Wasm
|
||||
*/
|
||||
export class Memory {
|
||||
/** Creates a new `Memory` object. */
|
||||
|
@ -117,7 +117,7 @@ declare namespace WebAssembly {
|
|||
*
|
||||
* [MDN](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Module)
|
||||
*
|
||||
* @category WASM
|
||||
* @category Wasm
|
||||
*/
|
||||
export class Module {
|
||||
/** Creates a new `Module` object. */
|
||||
|
@ -145,7 +145,7 @@ declare namespace WebAssembly {
|
|||
*
|
||||
* [MDN](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/RuntimeError)
|
||||
*
|
||||
* @category WASM
|
||||
* @category Wasm
|
||||
*/
|
||||
export class RuntimeError extends Error {
|
||||
/** Creates a new `WebAssembly.RuntimeError` object. */
|
||||
|
@ -160,7 +160,7 @@ declare namespace WebAssembly {
|
|||
*
|
||||
* [MDN](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Table)
|
||||
*
|
||||
* @category WASM
|
||||
* @category Wasm
|
||||
*/
|
||||
export class Table {
|
||||
/** Creates a new `Table` object. */
|
||||
|
@ -182,7 +182,7 @@ declare namespace WebAssembly {
|
|||
/** The `GlobalDescriptor` describes the options you can pass to
|
||||
* `new WebAssembly.Global()`.
|
||||
*
|
||||
* @category WASM
|
||||
* @category Wasm
|
||||
*/
|
||||
export interface GlobalDescriptor {
|
||||
mutable?: boolean;
|
||||
|
@ -192,7 +192,7 @@ declare namespace WebAssembly {
|
|||
/** The `MemoryDescriptor` describes the options you can pass to
|
||||
* `new WebAssembly.Memory()`.
|
||||
*
|
||||
* @category WASM
|
||||
* @category Wasm
|
||||
*/
|
||||
export interface MemoryDescriptor {
|
||||
initial: number;
|
||||
|
@ -203,7 +203,7 @@ declare namespace WebAssembly {
|
|||
/** A `ModuleExportDescriptor` is the description of a declared export in a
|
||||
* `WebAssembly.Module`.
|
||||
*
|
||||
* @category WASM
|
||||
* @category Wasm
|
||||
*/
|
||||
export interface ModuleExportDescriptor {
|
||||
kind: ImportExportKind;
|
||||
|
@ -213,7 +213,7 @@ declare namespace WebAssembly {
|
|||
/** A `ModuleImportDescriptor` is the description of a declared import in a
|
||||
* `WebAssembly.Module`.
|
||||
*
|
||||
* @category WASM
|
||||
* @category Wasm
|
||||
*/
|
||||
export interface ModuleImportDescriptor {
|
||||
kind: ImportExportKind;
|
||||
|
@ -224,7 +224,7 @@ declare namespace WebAssembly {
|
|||
/** The `TableDescriptor` describes the options you can pass to
|
||||
* `new WebAssembly.Table()`.
|
||||
*
|
||||
* @category WASM
|
||||
* @category Wasm
|
||||
*/
|
||||
export interface TableDescriptor {
|
||||
element: TableKind;
|
||||
|
@ -234,7 +234,7 @@ declare namespace WebAssembly {
|
|||
|
||||
/** The value returned from `WebAssembly.instantiate`.
|
||||
*
|
||||
* @category WASM
|
||||
* @category Wasm
|
||||
*/
|
||||
export interface WebAssemblyInstantiatedSource {
|
||||
/* A `WebAssembly.Instance` object that contains all the exported WebAssembly functions. */
|
||||
|
@ -247,21 +247,21 @@ declare namespace WebAssembly {
|
|||
module: Module;
|
||||
}
|
||||
|
||||
/** @category WASM */
|
||||
/** @category Wasm */
|
||||
export type ImportExportKind = "function" | "global" | "memory" | "table";
|
||||
/** @category WASM */
|
||||
/** @category Wasm */
|
||||
export type TableKind = "anyfunc";
|
||||
/** @category WASM */
|
||||
/** @category Wasm */
|
||||
export type ValueType = "f32" | "f64" | "i32" | "i64";
|
||||
/** @category WASM */
|
||||
/** @category Wasm */
|
||||
export type ExportValue = Function | Global | Memory | Table;
|
||||
/** @category WASM */
|
||||
/** @category Wasm */
|
||||
export type Exports = Record<string, ExportValue>;
|
||||
/** @category WASM */
|
||||
/** @category Wasm */
|
||||
export type ImportValue = ExportValue | number;
|
||||
/** @category WASM */
|
||||
/** @category Wasm */
|
||||
export type ModuleImports = Record<string, ImportValue>;
|
||||
/** @category WASM */
|
||||
/** @category Wasm */
|
||||
export type Imports = Record<string, ModuleImports>;
|
||||
|
||||
/**
|
||||
|
@ -272,7 +272,7 @@ declare namespace WebAssembly {
|
|||
*
|
||||
* [MDN](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/compile)
|
||||
*
|
||||
* @category WASM
|
||||
* @category Wasm
|
||||
*/
|
||||
export function compile(bytes: BufferSource): Promise<Module>;
|
||||
|
||||
|
@ -284,7 +284,7 @@ declare namespace WebAssembly {
|
|||
*
|
||||
* [MDN](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/compileStreaming)
|
||||
*
|
||||
* @category WASM
|
||||
* @category Wasm
|
||||
*/
|
||||
export function compileStreaming(
|
||||
source: Response | Promise<Response>,
|
||||
|
@ -301,7 +301,7 @@ declare namespace WebAssembly {
|
|||
*
|
||||
* [MDN](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/instantiate)
|
||||
*
|
||||
* @category WASM
|
||||
* @category Wasm
|
||||
*/
|
||||
export function instantiate(
|
||||
bytes: BufferSource,
|
||||
|
@ -318,7 +318,7 @@ declare namespace WebAssembly {
|
|||
*
|
||||
* [MDN](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/instantiate)
|
||||
*
|
||||
* @category WASM
|
||||
* @category Wasm
|
||||
*/
|
||||
export function instantiate(
|
||||
moduleObject: Module,
|
||||
|
@ -332,7 +332,7 @@ declare namespace WebAssembly {
|
|||
*
|
||||
* [MDN](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/instantiateStreaming)
|
||||
*
|
||||
* @category WASM
|
||||
* @category Wasm
|
||||
*/
|
||||
export function instantiateStreaming(
|
||||
response: Response | PromiseLike<Response>,
|
||||
|
@ -346,7 +346,7 @@ declare namespace WebAssembly {
|
|||
*
|
||||
* [MDN](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/validate)
|
||||
*
|
||||
* @category WASM
|
||||
* @category Wasm
|
||||
*/
|
||||
export function validate(bytes: BufferSource): boolean;
|
||||
}
|
||||
|
|
149
cli/tsc/dts/lib.deno.unstable.d.ts
vendored
149
cli/tsc/dts/lib.deno.unstable.d.ts
vendored
|
@ -1180,6 +1180,32 @@ declare namespace Deno {
|
|||
...values: unknown[]
|
||||
): Displayable;
|
||||
|
||||
/**
|
||||
* Display a JPG or PNG image.
|
||||
*
|
||||
* ```
|
||||
* Deno.jupyter.image("./cat.jpg");
|
||||
* Deno.jupyter.image("./dog.png");
|
||||
* ```
|
||||
*
|
||||
* @category Jupyter
|
||||
* @experimental
|
||||
*/
|
||||
export function image(path: string): Displayable;
|
||||
|
||||
/**
|
||||
* Display a JPG or PNG image.
|
||||
*
|
||||
* ```
|
||||
* const img = Deno.readFileSync("./cat.jpg");
|
||||
* Deno.jupyter.image(img);
|
||||
* ```
|
||||
*
|
||||
* @category Jupyter
|
||||
* @experimental
|
||||
*/
|
||||
export function image(data: Uint8Array): Displayable;
|
||||
|
||||
/**
|
||||
* Format an object for displaying in Deno
|
||||
*
|
||||
|
@ -1226,80 +1252,53 @@ declare namespace Deno {
|
|||
}
|
||||
|
||||
/**
|
||||
* **UNSTABLE**: New API, yet to be vetted.
|
||||
*
|
||||
* APIs for working with the OpenTelemetry observability framework. Deno can
|
||||
* export traces, metrics, and logs to OpenTelemetry compatible backends via
|
||||
* the OTLP protocol.
|
||||
*
|
||||
* Deno automatically instruments the runtime with OpenTelemetry traces and
|
||||
* metrics. This data is exported via OTLP to OpenTelemetry compatible
|
||||
* backends. User logs from the `console` API are exported as OpenTelemetry
|
||||
* logs via OTLP.
|
||||
*
|
||||
* User code can also create custom traces, metrics, and logs using the
|
||||
* OpenTelemetry API. This is done using the official OpenTelemetry package
|
||||
* for JavaScript:
|
||||
* [`npm:@opentelemetry/api`](https://opentelemetry.io/docs/languages/js/).
|
||||
* Deno integrates with this package to provide trace context propagation
|
||||
* between native Deno APIs (like `Deno.serve` or `fetch`) and custom user
|
||||
* code. Deno also provides APIs that allow exporting custom telemetry data
|
||||
* via the same OTLP channel used by the Deno runtime. This is done using the
|
||||
* [`jsr:@deno/otel`](https://jsr.io/@deno/otel) package.
|
||||
*
|
||||
* @example Using OpenTelemetry API to create custom traces
|
||||
* ```ts,ignore
|
||||
* import { trace } from "npm:@opentelemetry/api@1";
|
||||
* import "jsr:@deno/otel@0.0.2/register";
|
||||
*
|
||||
* const tracer = trace.getTracer("example-tracer");
|
||||
*
|
||||
* async function doWork() {
|
||||
* return tracer.startActiveSpan("doWork", async (span) => {
|
||||
* span.setAttribute("key", "value");
|
||||
* await new Promise((resolve) => setTimeout(resolve, 1000));
|
||||
* span.end();
|
||||
* });
|
||||
* }
|
||||
*
|
||||
* Deno.serve(async (req) => {
|
||||
* await doWork();
|
||||
* const resp = await fetch("https://example.com");
|
||||
* return resp;
|
||||
* });
|
||||
* ```
|
||||
*
|
||||
* @category Telemetry
|
||||
* @experimental
|
||||
*/
|
||||
export namespace tracing {
|
||||
/**
|
||||
* Whether tracing is enabled.
|
||||
* @category Telemetry
|
||||
* @experimental
|
||||
*/
|
||||
export const enabled: boolean;
|
||||
|
||||
/**
|
||||
* Allowed attribute type.
|
||||
* @category Telemetry
|
||||
* @experimental
|
||||
*/
|
||||
export type AttributeValue = string | number | boolean | bigint;
|
||||
|
||||
/**
|
||||
* A tracing span.
|
||||
* @category Telemetry
|
||||
* @experimental
|
||||
*/
|
||||
export class Span implements Disposable {
|
||||
readonly traceId: string;
|
||||
readonly spanId: string;
|
||||
readonly parentSpanId: string;
|
||||
readonly kind: string;
|
||||
readonly name: string;
|
||||
readonly startTime: number;
|
||||
readonly endTime: number;
|
||||
readonly status: null | { code: 1 } | { code: 2; message: string };
|
||||
readonly attributes: Record<string, AttributeValue>;
|
||||
readonly traceFlags: number;
|
||||
|
||||
/**
|
||||
* Construct a new Span and enter it as the "current" span.
|
||||
*/
|
||||
constructor(
|
||||
name: string,
|
||||
kind?: "internal" | "server" | "client" | "producer" | "consumer",
|
||||
);
|
||||
|
||||
/**
|
||||
* Set an attribute on this span.
|
||||
*/
|
||||
setAttribute(
|
||||
name: string,
|
||||
value: AttributeValue,
|
||||
): void;
|
||||
|
||||
/**
|
||||
* Enter this span as the "current" span.
|
||||
*/
|
||||
enter(): void;
|
||||
|
||||
/**
|
||||
* Exit this span as the "current" span and restore the previous one.
|
||||
*/
|
||||
exit(): void;
|
||||
|
||||
/**
|
||||
* End this span, and exit it as the "current" span.
|
||||
*/
|
||||
end(): void;
|
||||
|
||||
[Symbol.dispose](): void;
|
||||
|
||||
/**
|
||||
* Get the "current" span, if one exists.
|
||||
*/
|
||||
static current(): Span | undefined | null;
|
||||
}
|
||||
|
||||
export namespace telemetry {
|
||||
/**
|
||||
* A SpanExporter compatible with OpenTelemetry.js
|
||||
* https://open-telemetry.github.io/opentelemetry-js/interfaces/_opentelemetry_sdk_trace_base.SpanExporter.html
|
||||
|
@ -1319,14 +1318,6 @@ declare namespace Deno {
|
|||
export {}; // only export exports
|
||||
}
|
||||
|
||||
/**
|
||||
* @category Telemetry
|
||||
* @experimental
|
||||
*/
|
||||
export namespace metrics {
|
||||
export {}; // only export exports
|
||||
}
|
||||
|
||||
export {}; // only export exports
|
||||
}
|
||||
|
||||
|
|
|
@ -650,6 +650,10 @@ fn op_load_inner(
|
|||
media_type = MediaType::Json;
|
||||
Some(Cow::Borrowed(&*module.source))
|
||||
}
|
||||
Module::Wasm(module) => {
|
||||
media_type = MediaType::Dts;
|
||||
Some(Cow::Borrowed(&*module.source_dts))
|
||||
}
|
||||
Module::Npm(_) | Module::Node(_) => None,
|
||||
Module::External(module) => {
|
||||
// means it's Deno code importing an npm module
|
||||
|
@ -889,6 +893,9 @@ fn resolve_graph_specifier_types(
|
|||
Some(Module::Json(module)) => {
|
||||
Ok(Some((module.specifier.clone(), module.media_type)))
|
||||
}
|
||||
Some(Module::Wasm(module)) => {
|
||||
Ok(Some((module.specifier.clone(), MediaType::Dmts)))
|
||||
}
|
||||
Some(Module::Npm(module)) => {
|
||||
if let Some(npm) = &state.maybe_npm.as_ref() {
|
||||
let package_folder = npm
|
||||
|
@ -1196,7 +1203,7 @@ mod tests {
|
|||
.context("Unable to get CWD")
|
||||
.unwrap(),
|
||||
);
|
||||
let mut op_state = OpState::new(None);
|
||||
let mut op_state = OpState::new(None, None);
|
||||
op_state.put(state);
|
||||
op_state
|
||||
}
|
||||
|
|
|
@ -127,19 +127,12 @@ impl PrintConfig {
|
|||
}
|
||||
}
|
||||
|
||||
fn create_print_after_restart_fn(
|
||||
banner: &'static str,
|
||||
clear_screen: bool,
|
||||
) -> impl Fn() {
|
||||
fn create_print_after_restart_fn(clear_screen: bool) -> impl Fn() {
|
||||
move || {
|
||||
#[allow(clippy::print_stderr)]
|
||||
if clear_screen && std::io::stderr().is_terminal() {
|
||||
eprint!("{}", CLEAR_SCREEN);
|
||||
}
|
||||
info!(
|
||||
"{} File change detected! Restarting!",
|
||||
colors::intense_blue(banner),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -187,7 +180,17 @@ impl WatcherCommunicator {
|
|||
}
|
||||
|
||||
pub fn print(&self, msg: String) {
|
||||
log::info!("{} {}", self.banner, msg);
|
||||
log::info!("{} {}", self.banner, colors::gray(msg));
|
||||
}
|
||||
|
||||
pub fn show_path_changed(&self, changed_paths: Option<Vec<PathBuf>>) {
|
||||
if let Some(paths) = changed_paths {
|
||||
if !paths.is_empty() {
|
||||
self.print(format!("Restarting! File change detected: {:?}", paths[0]))
|
||||
} else {
|
||||
self.print("Restarting! File change detected.".to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -263,7 +266,7 @@ where
|
|||
clear_screen,
|
||||
} = print_config;
|
||||
|
||||
let print_after_restart = create_print_after_restart_fn(banner, clear_screen);
|
||||
let print_after_restart = create_print_after_restart_fn(clear_screen);
|
||||
let watcher_communicator = Arc::new(WatcherCommunicator {
|
||||
paths_to_watch_tx: paths_to_watch_tx.clone(),
|
||||
changed_paths_rx: changed_paths_rx.resubscribe(),
|
||||
|
|
|
@ -83,6 +83,15 @@ pub trait HmrRunner: Send + Sync {
|
|||
async fn run(&mut self) -> Result<(), AnyError>;
|
||||
}
|
||||
|
||||
pub trait CliCodeCache: code_cache::CodeCache {
|
||||
/// Gets if the code cache is still enabled.
|
||||
fn enabled(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn as_code_cache(self: Arc<Self>) -> Arc<dyn code_cache::CodeCache>;
|
||||
}
|
||||
|
||||
#[async_trait::async_trait(?Send)]
|
||||
pub trait CoverageCollector: Send + Sync {
|
||||
async fn start_collecting(&mut self) -> Result<(), AnyError>;
|
||||
|
@ -127,7 +136,7 @@ pub struct CliMainWorkerOptions {
|
|||
struct SharedWorkerState {
|
||||
blob_store: Arc<BlobStore>,
|
||||
broadcast_channel: InMemoryBroadcastChannel,
|
||||
code_cache: Option<Arc<dyn code_cache::CodeCache>>,
|
||||
code_cache: Option<Arc<dyn CliCodeCache>>,
|
||||
compiled_wasm_module_store: CompiledWasmModuleStore,
|
||||
feature_checker: Arc<FeatureChecker>,
|
||||
fs: Arc<dyn deno_fs::FileSystem>,
|
||||
|
@ -384,6 +393,13 @@ impl CliMainWorker {
|
|||
}
|
||||
}
|
||||
|
||||
// TODO(bartlomieju): this should be moved to some other place, added to avoid string
|
||||
// duplication between worker setups and `deno info` output.
|
||||
pub fn get_cache_storage_dir() -> PathBuf {
|
||||
// Note: we currently use temp_dir() to avoid managing storage size.
|
||||
std::env::temp_dir().join("deno_cache")
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CliMainWorkerFactory {
|
||||
shared: Arc<SharedWorkerState>,
|
||||
|
@ -393,7 +409,7 @@ impl CliMainWorkerFactory {
|
|||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
blob_store: Arc<BlobStore>,
|
||||
code_cache: Option<Arc<dyn code_cache::CodeCache>>,
|
||||
code_cache: Option<Arc<dyn CliCodeCache>>,
|
||||
feature_checker: Arc<FeatureChecker>,
|
||||
fs: Arc<dyn deno_fs::FileSystem>,
|
||||
maybe_file_watcher_communicator: Option<Arc<WatcherCommunicator>>,
|
||||
|
@ -520,10 +536,7 @@ impl CliMainWorkerFactory {
|
|||
});
|
||||
let cache_storage_dir = maybe_storage_key.map(|key| {
|
||||
// TODO(@satyarohith): storage quota management
|
||||
// Note: we currently use temp_dir() to avoid managing storage size.
|
||||
std::env::temp_dir()
|
||||
.join("deno_cache")
|
||||
.join(checksum::gen(&[key.as_bytes()]))
|
||||
get_cache_storage_dir().join(checksum::gen(&[key.as_bytes()]))
|
||||
});
|
||||
|
||||
// TODO(bartlomieju): this is cruft, update FeatureChecker to spit out
|
||||
|
@ -554,7 +567,7 @@ impl CliMainWorkerFactory {
|
|||
),
|
||||
feature_checker,
|
||||
permissions,
|
||||
v8_code_cache: shared.code_cache.clone(),
|
||||
v8_code_cache: shared.code_cache.clone().map(|c| c.as_code_cache()),
|
||||
};
|
||||
|
||||
let options = WorkerOptions {
|
||||
|
@ -604,6 +617,8 @@ impl CliMainWorkerFactory {
|
|||
origin_storage_dir,
|
||||
stdio,
|
||||
skip_op_registration: shared.options.skip_op_registration,
|
||||
enable_stack_trace_arg_in_ops: crate::args::has_trace_permissions_enabled(
|
||||
),
|
||||
};
|
||||
|
||||
let mut worker = MainWorker::bootstrap_from_options(
|
||||
|
@ -720,10 +735,7 @@ fn create_web_worker_callback(
|
|||
.resolve_storage_key(&args.main_module);
|
||||
let cache_storage_dir = maybe_storage_key.map(|key| {
|
||||
// TODO(@satyarohith): storage quota management
|
||||
// Note: we currently use temp_dir() to avoid managing storage size.
|
||||
std::env::temp_dir()
|
||||
.join("deno_cache")
|
||||
.join(checksum::gen(&[key.as_bytes()]))
|
||||
get_cache_storage_dir().join(checksum::gen(&[key.as_bytes()]))
|
||||
});
|
||||
|
||||
// TODO(bartlomieju): this is cruft, update FeatureChecker to spit out
|
||||
|
@ -803,6 +815,8 @@ fn create_web_worker_callback(
|
|||
strace_ops: shared.options.strace_ops.clone(),
|
||||
close_on_idle: args.close_on_idle,
|
||||
maybe_worker_metadata: args.maybe_worker_metadata,
|
||||
enable_stack_trace_arg_in_ops: crate::args::has_trace_permissions_enabled(
|
||||
),
|
||||
};
|
||||
|
||||
WebWorker::bootstrap_from_options(services, options)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_broadcast_channel"
|
||||
version = "0.171.0"
|
||||
version = "0.173.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
2
ext/cache/Cargo.toml
vendored
2
ext/cache/Cargo.toml
vendored
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_cache"
|
||||
version = "0.109.0"
|
||||
version = "0.111.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
9
ext/cache/sqlite.rs
vendored
9
ext/cache/sqlite.rs
vendored
|
@ -8,6 +8,7 @@ use std::time::SystemTime;
|
|||
use std::time::UNIX_EPOCH;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use deno_core::anyhow::Context;
|
||||
use deno_core::error::AnyError;
|
||||
use deno_core::futures::future::poll_fn;
|
||||
use deno_core::parking_lot::Mutex;
|
||||
|
@ -45,7 +46,13 @@ impl SqliteBackedCache {
|
|||
pub fn new(cache_storage_dir: PathBuf) -> Result<Self, CacheError> {
|
||||
{
|
||||
std::fs::create_dir_all(&cache_storage_dir)
|
||||
.expect("failed to create cache dir");
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to create cache storage directory {}",
|
||||
cache_storage_dir.display()
|
||||
)
|
||||
})
|
||||
.map_err(CacheError::Other)?;
|
||||
let path = cache_storage_dir.join("cache_metadata.db");
|
||||
let connection = rusqlite::Connection::open(&path).unwrap_or_else(|_| {
|
||||
panic!("failed to open cache db at {}", path.display())
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_canvas"
|
||||
version = "0.46.0"
|
||||
version = "0.48.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_console"
|
||||
version = "0.177.0"
|
||||
version = "0.179.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
3
ext/console/internal.d.ts
vendored
3
ext/console/internal.d.ts
vendored
|
@ -9,4 +9,7 @@ declare module "ext:deno_console/01_console.js" {
|
|||
keys: (keyof TObject)[];
|
||||
evaluate: boolean;
|
||||
}): Record<string, unknown>;
|
||||
|
||||
class Console {
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_cron"
|
||||
version = "0.57.0"
|
||||
version = "0.59.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_crypto"
|
||||
version = "0.191.0"
|
||||
version = "0.193.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_fetch"
|
||||
version = "0.201.0"
|
||||
version = "0.203.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -67,6 +67,7 @@ use http_body_util::BodyExt;
|
|||
use hyper::body::Frame;
|
||||
use hyper_util::client::legacy::connect::HttpConnector;
|
||||
use hyper_util::client::legacy::connect::HttpInfo;
|
||||
use hyper_util::client::legacy::Builder as HyperClientBuilder;
|
||||
use hyper_util::rt::TokioExecutor;
|
||||
use hyper_util::rt::TokioTimer;
|
||||
use serde::Deserialize;
|
||||
|
@ -85,6 +86,16 @@ pub struct Options {
|
|||
pub user_agent: String,
|
||||
pub root_cert_store_provider: Option<Arc<dyn RootCertStoreProvider>>,
|
||||
pub proxy: Option<Proxy>,
|
||||
/// A callback to customize HTTP client configuration.
|
||||
///
|
||||
/// The settings applied with this hook may be overridden by the options
|
||||
/// provided through `Deno.createHttpClient()` API. For instance, if the hook
|
||||
/// calls [`hyper_util::client::legacy::Builder::pool_max_idle_per_host`] with
|
||||
/// a value of 99, and a user calls `Deno.createHttpClient({ poolMaxIdlePerHost: 42 })`,
|
||||
/// the value that will take effect is 42.
|
||||
///
|
||||
/// For more info on what can be configured, see [`hyper_util::client::legacy::Builder`].
|
||||
pub client_builder_hook: Option<fn(HyperClientBuilder) -> HyperClientBuilder>,
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub request_builder_hook: Option<
|
||||
fn(&mut http::Request<ReqBody>) -> Result<(), deno_core::error::AnyError>,
|
||||
|
@ -112,6 +123,7 @@ impl Default for Options {
|
|||
user_agent: "".to_string(),
|
||||
root_cert_store_provider: None,
|
||||
proxy: None,
|
||||
client_builder_hook: None,
|
||||
request_builder_hook: None,
|
||||
unsafely_ignore_certificate_errors: None,
|
||||
client_cert_chain_and_key: TlsKeys::Null,
|
||||
|
@ -271,6 +283,7 @@ pub fn create_client_from_options(
|
|||
pool_idle_timeout: None,
|
||||
http1: true,
|
||||
http2: true,
|
||||
client_builder_hook: options.client_builder_hook,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
@ -384,7 +397,7 @@ impl FetchPermissions for deno_permissions::PermissionsContainer {
|
|||
}
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[serde]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn op_fetch<FP>(
|
||||
|
@ -853,7 +866,7 @@ fn default_true() -> bool {
|
|||
true
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[smi]
|
||||
pub fn op_fetch_custom_client<FP>(
|
||||
state: &mut OpState,
|
||||
|
@ -908,6 +921,7 @@ where
|
|||
),
|
||||
http1: args.http1,
|
||||
http2: args.http2,
|
||||
client_builder_hook: options.client_builder_hook,
|
||||
},
|
||||
)?;
|
||||
|
||||
|
@ -929,6 +943,7 @@ pub struct CreateHttpClientOptions {
|
|||
pub pool_idle_timeout: Option<Option<u64>>,
|
||||
pub http1: bool,
|
||||
pub http2: bool,
|
||||
pub client_builder_hook: Option<fn(HyperClientBuilder) -> HyperClientBuilder>,
|
||||
}
|
||||
|
||||
impl Default for CreateHttpClientOptions {
|
||||
|
@ -944,6 +959,7 @@ impl Default for CreateHttpClientOptions {
|
|||
pool_idle_timeout: None,
|
||||
http1: true,
|
||||
http2: true,
|
||||
client_builder_hook: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -999,11 +1015,14 @@ pub fn create_http_client(
|
|||
HttpClientCreateError::InvalidUserAgent(user_agent.to_string())
|
||||
})?;
|
||||
|
||||
let mut builder =
|
||||
hyper_util::client::legacy::Builder::new(TokioExecutor::new());
|
||||
let mut builder = HyperClientBuilder::new(TokioExecutor::new());
|
||||
builder.timer(TokioTimer::new());
|
||||
builder.pool_timer(TokioTimer::new());
|
||||
|
||||
if let Some(client_builder_hook) = options.client_builder_hook {
|
||||
builder = client_builder_hook(builder);
|
||||
}
|
||||
|
||||
let mut proxies = proxy::from_env();
|
||||
if let Some(proxy) = options.proxy {
|
||||
let mut intercept = proxy::Intercept::all(&proxy.url)
|
||||
|
|
|
@ -126,6 +126,7 @@ async fn rust_test_client_with_resolver(
|
|||
dns_resolver: resolver,
|
||||
http1: true,
|
||||
http2: true,
|
||||
client_builder_hook: None,
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_ffi"
|
||||
version = "0.164.0"
|
||||
version = "0.166.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -287,7 +287,7 @@ fn ffi_call(
|
|||
}
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
#[serde]
|
||||
pub fn op_ffi_call_ptr_nonblocking<FP>(
|
||||
scope: &mut v8::HandleScope,
|
||||
|
@ -385,7 +385,7 @@ pub fn op_ffi_call_nonblocking(
|
|||
})
|
||||
}
|
||||
|
||||
#[op2(reentrant)]
|
||||
#[op2(reentrant, stack_trace)]
|
||||
#[serde]
|
||||
pub fn op_ffi_call_ptr<FP>(
|
||||
scope: &mut v8::HandleScope,
|
||||
|
|
|
@ -561,7 +561,7 @@ pub struct RegisterCallbackArgs {
|
|||
result: NativeType,
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
pub fn op_ffi_unsafe_callback_create<FP, 'scope>(
|
||||
state: &mut OpState,
|
||||
scope: &mut v8::HandleScope<'scope>,
|
||||
|
|
|
@ -15,6 +15,7 @@ use dlopen2::raw::Library;
|
|||
use serde::Deserialize;
|
||||
use serde_value::ValueDeserializer;
|
||||
use std::borrow::Cow;
|
||||
use std::cell::RefCell;
|
||||
use std::collections::HashMap;
|
||||
use std::ffi::c_void;
|
||||
use std::rc::Rc;
|
||||
|
@ -123,17 +124,20 @@ pub struct FfiLoadArgs {
|
|||
symbols: HashMap<String, ForeignSymbol>,
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
pub fn op_ffi_load<'scope, FP>(
|
||||
scope: &mut v8::HandleScope<'scope>,
|
||||
state: &mut OpState,
|
||||
state: Rc<RefCell<OpState>>,
|
||||
#[serde] args: FfiLoadArgs,
|
||||
) -> Result<v8::Local<'scope, v8::Value>, DlfcnError>
|
||||
where
|
||||
FP: FfiPermissions + 'static,
|
||||
{
|
||||
let permissions = state.borrow_mut::<FP>();
|
||||
let path = permissions.check_partial_with_path(&args.path)?;
|
||||
let path = {
|
||||
let mut state = state.borrow_mut();
|
||||
let permissions = state.borrow_mut::<FP>();
|
||||
permissions.check_partial_with_path(&args.path)?
|
||||
};
|
||||
|
||||
let lib = Library::open(&path).map_err(|e| {
|
||||
dlopen2::Error::OpeningLibraryError(std::io::Error::new(
|
||||
|
@ -215,6 +219,7 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
let mut state = state.borrow_mut();
|
||||
let out = v8::Array::new(scope, 2);
|
||||
let rid = state.resource_table.add(resource);
|
||||
let rid_v8 = v8::Integer::new_from_unsigned(scope, rid);
|
||||
|
|
|
@ -49,7 +49,7 @@ pub enum ReprError {
|
|||
Permission(#[from] deno_permissions::PermissionCheckError),
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_ffi_ptr_create<FP>(
|
||||
state: &mut OpState,
|
||||
#[bigint] ptr_number: usize,
|
||||
|
@ -63,7 +63,7 @@ where
|
|||
Ok(ptr_number as *mut c_void)
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_ffi_ptr_equals<FP>(
|
||||
state: &mut OpState,
|
||||
a: *const c_void,
|
||||
|
@ -78,7 +78,7 @@ where
|
|||
Ok(a == b)
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
pub fn op_ffi_ptr_of<FP>(
|
||||
state: &mut OpState,
|
||||
#[anybuffer] buf: *const u8,
|
||||
|
@ -92,7 +92,7 @@ where
|
|||
Ok(buf as *mut c_void)
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_ffi_ptr_of_exact<FP>(
|
||||
state: &mut OpState,
|
||||
buf: v8::Local<v8::ArrayBufferView>,
|
||||
|
@ -112,7 +112,7 @@ where
|
|||
Ok(buf.as_ptr() as _)
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_ffi_ptr_offset<FP>(
|
||||
state: &mut OpState,
|
||||
ptr: *mut c_void,
|
||||
|
@ -142,7 +142,7 @@ unsafe extern "C" fn noop_deleter_callback(
|
|||
) {
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
#[bigint]
|
||||
pub fn op_ffi_ptr_value<FP>(
|
||||
state: &mut OpState,
|
||||
|
@ -157,7 +157,7 @@ where
|
|||
Ok(ptr as usize)
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
pub fn op_ffi_get_buf<FP, 'scope>(
|
||||
scope: &mut v8::HandleScope<'scope>,
|
||||
state: &mut OpState,
|
||||
|
@ -189,7 +189,7 @@ where
|
|||
Ok(array_buffer)
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
pub fn op_ffi_buf_copy_into<FP>(
|
||||
state: &mut OpState,
|
||||
src: *mut c_void,
|
||||
|
@ -219,7 +219,7 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
pub fn op_ffi_cstr_read<FP, 'scope>(
|
||||
scope: &mut v8::HandleScope<'scope>,
|
||||
state: &mut OpState,
|
||||
|
@ -244,7 +244,7 @@ where
|
|||
Ok(value)
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_ffi_read_bool<FP>(
|
||||
state: &mut OpState,
|
||||
ptr: *mut c_void,
|
||||
|
@ -264,7 +264,7 @@ where
|
|||
Ok(unsafe { ptr::read_unaligned::<bool>(ptr.offset(offset) as *const bool) })
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_ffi_read_u8<FP>(
|
||||
state: &mut OpState,
|
||||
ptr: *mut c_void,
|
||||
|
@ -286,7 +286,7 @@ where
|
|||
})
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_ffi_read_i8<FP>(
|
||||
state: &mut OpState,
|
||||
ptr: *mut c_void,
|
||||
|
@ -308,7 +308,7 @@ where
|
|||
})
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_ffi_read_u16<FP>(
|
||||
state: &mut OpState,
|
||||
ptr: *mut c_void,
|
||||
|
@ -330,7 +330,7 @@ where
|
|||
})
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_ffi_read_i16<FP>(
|
||||
state: &mut OpState,
|
||||
ptr: *mut c_void,
|
||||
|
@ -352,7 +352,7 @@ where
|
|||
})
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_ffi_read_u32<FP>(
|
||||
state: &mut OpState,
|
||||
ptr: *mut c_void,
|
||||
|
@ -372,7 +372,7 @@ where
|
|||
Ok(unsafe { ptr::read_unaligned::<u32>(ptr.offset(offset) as *const u32) })
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_ffi_read_i32<FP>(
|
||||
state: &mut OpState,
|
||||
ptr: *mut c_void,
|
||||
|
@ -392,7 +392,7 @@ where
|
|||
Ok(unsafe { ptr::read_unaligned::<i32>(ptr.offset(offset) as *const i32) })
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
#[bigint]
|
||||
pub fn op_ffi_read_u64<FP>(
|
||||
state: &mut OpState,
|
||||
|
@ -418,7 +418,7 @@ where
|
|||
Ok(value)
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
#[bigint]
|
||||
pub fn op_ffi_read_i64<FP>(
|
||||
state: &mut OpState,
|
||||
|
@ -444,7 +444,7 @@ where
|
|||
Ok(value)
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_ffi_read_f32<FP>(
|
||||
state: &mut OpState,
|
||||
ptr: *mut c_void,
|
||||
|
@ -464,7 +464,7 @@ where
|
|||
Ok(unsafe { ptr::read_unaligned::<f32>(ptr.offset(offset) as *const f32) })
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_ffi_read_f64<FP>(
|
||||
state: &mut OpState,
|
||||
ptr: *mut c_void,
|
||||
|
@ -484,7 +484,7 @@ where
|
|||
Ok(unsafe { ptr::read_unaligned::<f64>(ptr.offset(offset) as *const f64) })
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_ffi_read_ptr<FP>(
|
||||
state: &mut OpState,
|
||||
ptr: *mut c_void,
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_fs"
|
||||
version = "0.87.0"
|
||||
version = "0.89.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
@ -19,6 +19,7 @@ sync_fs = []
|
|||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
base32.workspace = true
|
||||
boxed_error.workspace = true
|
||||
deno_core.workspace = true
|
||||
deno_io.workspace = true
|
||||
deno_path_util.workspace = true
|
||||
|
|
|
@ -15,6 +15,7 @@ pub use crate::interface::FsDirEntry;
|
|||
pub use crate::interface::FsFileType;
|
||||
pub use crate::interface::OpenOptions;
|
||||
pub use crate::ops::FsOpsError;
|
||||
pub use crate::ops::FsOpsErrorKind;
|
||||
pub use crate::ops::OperationError;
|
||||
pub use crate::std_fs::RealFs;
|
||||
pub use crate::sync::MaybeSend;
|
||||
|
|
153
ext/fs/ops.rs
153
ext/fs/ops.rs
|
@ -16,6 +16,7 @@ use crate::interface::FsDirEntry;
|
|||
use crate::interface::FsFileType;
|
||||
use crate::FsPermissions;
|
||||
use crate::OpenOptions;
|
||||
use boxed_error::Boxed;
|
||||
use deno_core::op2;
|
||||
use deno_core::CancelFuture;
|
||||
use deno_core::CancelHandle;
|
||||
|
@ -32,8 +33,11 @@ use rand::thread_rng;
|
|||
use rand::Rng;
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Debug, Boxed)]
|
||||
pub struct FsOpsError(pub Box<FsOpsErrorKind>);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum FsOpsError {
|
||||
pub enum FsOpsErrorKind {
|
||||
#[error("{0}")]
|
||||
Io(#[source] std::io::Error),
|
||||
#[error("{0}")]
|
||||
|
@ -73,15 +77,16 @@ pub enum FsOpsError {
|
|||
impl From<FsError> for FsOpsError {
|
||||
fn from(err: FsError) -> Self {
|
||||
match err {
|
||||
FsError::Io(err) => FsOpsError::Io(err),
|
||||
FsError::Io(err) => FsOpsErrorKind::Io(err),
|
||||
FsError::FileBusy => {
|
||||
FsOpsError::Other(deno_core::error::resource_unavailable())
|
||||
FsOpsErrorKind::Other(deno_core::error::resource_unavailable())
|
||||
}
|
||||
FsError::NotSupported => {
|
||||
FsOpsError::Other(deno_core::error::not_supported())
|
||||
FsOpsErrorKind::Other(deno_core::error::not_supported())
|
||||
}
|
||||
FsError::NotCapable(err) => FsOpsError::NotCapable(err),
|
||||
FsError::NotCapable(err) => FsOpsErrorKind::NotCapable(err),
|
||||
}
|
||||
.into_box()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -127,11 +132,12 @@ fn map_permission_error(
|
|||
(path.as_str(), "")
|
||||
};
|
||||
|
||||
FsOpsError::NotCapableAccess {
|
||||
FsOpsErrorKind::NotCapableAccess {
|
||||
standalone: deno_permissions::is_standalone(),
|
||||
err,
|
||||
path: format!("{path}{truncated}"),
|
||||
}
|
||||
.into_box()
|
||||
}
|
||||
err => Err::<(), _>(err)
|
||||
.context_path(operation, path)
|
||||
|
@ -140,7 +146,7 @@ fn map_permission_error(
|
|||
}
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[string]
|
||||
pub fn op_fs_cwd<P>(state: &mut OpState) -> Result<String, FsOpsError>
|
||||
where
|
||||
|
@ -155,7 +161,7 @@ where
|
|||
Ok(path_str)
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_fs_chdir<P>(
|
||||
state: &mut OpState,
|
||||
#[string] directory: &str,
|
||||
|
@ -182,7 +188,7 @@ where
|
|||
state.borrow::<FileSystemRc>().umask(mask).context("umask")
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[smi]
|
||||
pub fn op_fs_open_sync<P>(
|
||||
state: &mut OpState,
|
||||
|
@ -209,7 +215,7 @@ where
|
|||
Ok(rid)
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
#[smi]
|
||||
pub async fn op_fs_open_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
@ -237,7 +243,7 @@ where
|
|||
Ok(rid)
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
pub fn op_fs_mkdir_sync<P>(
|
||||
state: &mut OpState,
|
||||
#[string] path: String,
|
||||
|
@ -260,7 +266,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
pub async fn op_fs_mkdir_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
#[string] path: String,
|
||||
|
@ -285,7 +291,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_fs_chmod_sync<P>(
|
||||
state: &mut OpState,
|
||||
#[string] path: String,
|
||||
|
@ -302,7 +308,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
pub async fn op_fs_chmod_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
#[string] path: String,
|
||||
|
@ -322,7 +328,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
pub fn op_fs_chown_sync<P>(
|
||||
state: &mut OpState,
|
||||
#[string] path: String,
|
||||
|
@ -341,7 +347,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
pub async fn op_fs_chown_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
#[string] path: String,
|
||||
|
@ -362,7 +368,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_fs_remove_sync<P>(
|
||||
state: &mut OpState,
|
||||
#[string] path: &str,
|
||||
|
@ -382,7 +388,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
pub async fn op_fs_remove_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
#[string] path: String,
|
||||
|
@ -413,7 +419,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_fs_copy_file_sync<P>(
|
||||
state: &mut OpState,
|
||||
#[string] from: &str,
|
||||
|
@ -433,7 +439,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
pub async fn op_fs_copy_file_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
#[string] from: String,
|
||||
|
@ -457,7 +463,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_fs_stat_sync<P>(
|
||||
state: &mut OpState,
|
||||
#[string] path: String,
|
||||
|
@ -476,7 +482,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
#[serde]
|
||||
pub async fn op_fs_stat_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
@ -498,7 +504,7 @@ where
|
|||
Ok(SerializableStat::from(stat))
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_fs_lstat_sync<P>(
|
||||
state: &mut OpState,
|
||||
#[string] path: String,
|
||||
|
@ -517,7 +523,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
#[serde]
|
||||
pub async fn op_fs_lstat_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
@ -539,7 +545,7 @@ where
|
|||
Ok(SerializableStat::from(stat))
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[string]
|
||||
pub fn op_fs_realpath_sync<P>(
|
||||
state: &mut OpState,
|
||||
|
@ -562,7 +568,7 @@ where
|
|||
Ok(path_string)
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
#[string]
|
||||
pub async fn op_fs_realpath_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
@ -590,7 +596,7 @@ where
|
|||
Ok(path_string)
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[serde]
|
||||
pub fn op_fs_read_dir_sync<P>(
|
||||
state: &mut OpState,
|
||||
|
@ -609,7 +615,7 @@ where
|
|||
Ok(entries)
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
#[serde]
|
||||
pub async fn op_fs_read_dir_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
@ -634,7 +640,7 @@ where
|
|||
Ok(entries)
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_fs_rename_sync<P>(
|
||||
state: &mut OpState,
|
||||
#[string] oldpath: String,
|
||||
|
@ -655,7 +661,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
pub async fn op_fs_rename_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
#[string] oldpath: String,
|
||||
|
@ -680,7 +686,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_fs_link_sync<P>(
|
||||
state: &mut OpState,
|
||||
#[string] oldpath: &str,
|
||||
|
@ -702,7 +708,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
pub async fn op_fs_link_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
#[string] oldpath: String,
|
||||
|
@ -728,7 +734,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
pub fn op_fs_symlink_sync<P>(
|
||||
state: &mut OpState,
|
||||
#[string] oldpath: &str,
|
||||
|
@ -752,7 +758,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
pub async fn op_fs_symlink_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
#[string] oldpath: String,
|
||||
|
@ -780,7 +786,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[string]
|
||||
pub fn op_fs_read_link_sync<P>(
|
||||
state: &mut OpState,
|
||||
|
@ -800,7 +806,7 @@ where
|
|||
Ok(target_string)
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
#[string]
|
||||
pub async fn op_fs_read_link_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
@ -825,7 +831,7 @@ where
|
|||
Ok(target_string)
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_fs_truncate_sync<P>(
|
||||
state: &mut OpState,
|
||||
#[string] path: &str,
|
||||
|
@ -845,7 +851,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
pub async fn op_fs_truncate_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
#[string] path: String,
|
||||
|
@ -869,7 +875,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_fs_utime_sync<P>(
|
||||
state: &mut OpState,
|
||||
#[string] path: &str,
|
||||
|
@ -890,7 +896,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
pub async fn op_fs_utime_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
#[string] path: String,
|
||||
|
@ -921,7 +927,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[string]
|
||||
pub fn op_fs_make_temp_dir_sync<P>(
|
||||
state: &mut OpState,
|
||||
|
@ -963,7 +969,7 @@ where
|
|||
.context("tmpdir")
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
#[string]
|
||||
pub async fn op_fs_make_temp_dir_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
@ -1009,7 +1015,7 @@ where
|
|||
.context("tmpdir")
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[string]
|
||||
pub fn op_fs_make_temp_file_sync<P>(
|
||||
state: &mut OpState,
|
||||
|
@ -1057,7 +1063,7 @@ where
|
|||
.context("tmpfile")
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
#[string]
|
||||
pub async fn op_fs_make_temp_file_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
@ -1176,7 +1182,9 @@ fn validate_temporary_filename_component(
|
|||
) -> Result<(), FsOpsError> {
|
||||
// Ban ASCII and Unicode control characters: these will often fail
|
||||
if let Some(c) = component.matches(|c: char| c.is_control()).next() {
|
||||
return Err(FsOpsError::InvalidControlCharacter(c.to_string()));
|
||||
return Err(
|
||||
FsOpsErrorKind::InvalidControlCharacter(c.to_string()).into_box(),
|
||||
);
|
||||
}
|
||||
// Windows has the most restrictive filenames. As temp files aren't normal files, we just
|
||||
// use this set of banned characters for all platforms because wildcard-like files can also
|
||||
|
@ -1192,13 +1200,13 @@ fn validate_temporary_filename_component(
|
|||
.matches(|c: char| "<>:\"/\\|?*".contains(c))
|
||||
.next()
|
||||
{
|
||||
return Err(FsOpsError::InvalidCharacter(c.to_string()));
|
||||
return Err(FsOpsErrorKind::InvalidCharacter(c.to_string()).into_box());
|
||||
}
|
||||
|
||||
// This check is only for Windows
|
||||
#[cfg(windows)]
|
||||
if suffix && component.ends_with(|c: char| ". ".contains(c)) {
|
||||
return Err(FsOpsError::InvalidTrailingCharacter);
|
||||
return Err(FsOpsErrorKind::InvalidTrailingCharacter.into_box());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -1227,7 +1235,7 @@ fn tmp_name(
|
|||
Ok(path)
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
pub fn op_fs_write_file_sync<P>(
|
||||
state: &mut OpState,
|
||||
#[string] path: String,
|
||||
|
@ -1253,7 +1261,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn op_fs_write_file_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
@ -1307,7 +1315,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[serde]
|
||||
pub fn op_fs_read_file_sync<P>(
|
||||
state: &mut OpState,
|
||||
|
@ -1328,7 +1336,7 @@ where
|
|||
Ok(buf.into())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
#[serde]
|
||||
pub async fn op_fs_read_file_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
@ -1370,7 +1378,7 @@ where
|
|||
Ok(buf.into())
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[string]
|
||||
pub fn op_fs_read_file_text_sync<P>(
|
||||
state: &mut OpState,
|
||||
|
@ -1391,7 +1399,7 @@ where
|
|||
Ok(str)
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
#[string]
|
||||
pub async fn op_fs_read_file_text_async<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
@ -1440,7 +1448,7 @@ fn to_seek_from(offset: i64, whence: i32) -> Result<SeekFrom, FsOpsError> {
|
|||
1 => SeekFrom::Current(offset),
|
||||
2 => SeekFrom::End(offset),
|
||||
_ => {
|
||||
return Err(FsOpsError::InvalidSeekMode(whence));
|
||||
return Err(FsOpsErrorKind::InvalidSeekMode(whence).into_box());
|
||||
}
|
||||
};
|
||||
Ok(seek_from)
|
||||
|
@ -1456,7 +1464,7 @@ pub fn op_fs_seek_sync(
|
|||
) -> Result<u64, FsOpsError> {
|
||||
let pos = to_seek_from(offset, whence)?;
|
||||
let file =
|
||||
FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?;
|
||||
FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
|
||||
let cursor = file.seek_sync(pos)?;
|
||||
Ok(cursor)
|
||||
}
|
||||
|
@ -1471,7 +1479,7 @@ pub async fn op_fs_seek_async(
|
|||
) -> Result<u64, FsOpsError> {
|
||||
let pos = to_seek_from(offset, whence)?;
|
||||
let file = FileResource::get_file(&state.borrow(), rid)
|
||||
.map_err(FsOpsError::Resource)?;
|
||||
.map_err(FsOpsErrorKind::Resource)?;
|
||||
let cursor = file.seek_async(pos).await?;
|
||||
Ok(cursor)
|
||||
}
|
||||
|
@ -1482,7 +1490,7 @@ pub fn op_fs_file_sync_data_sync(
|
|||
#[smi] rid: ResourceId,
|
||||
) -> Result<(), FsOpsError> {
|
||||
let file =
|
||||
FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?;
|
||||
FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
|
||||
file.datasync_sync()?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1493,7 +1501,7 @@ pub async fn op_fs_file_sync_data_async(
|
|||
#[smi] rid: ResourceId,
|
||||
) -> Result<(), FsOpsError> {
|
||||
let file = FileResource::get_file(&state.borrow(), rid)
|
||||
.map_err(FsOpsError::Resource)?;
|
||||
.map_err(FsOpsErrorKind::Resource)?;
|
||||
file.datasync_async().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1504,7 +1512,7 @@ pub fn op_fs_file_sync_sync(
|
|||
#[smi] rid: ResourceId,
|
||||
) -> Result<(), FsOpsError> {
|
||||
let file =
|
||||
FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?;
|
||||
FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
|
||||
file.sync_sync()?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1515,7 +1523,7 @@ pub async fn op_fs_file_sync_async(
|
|||
#[smi] rid: ResourceId,
|
||||
) -> Result<(), FsOpsError> {
|
||||
let file = FileResource::get_file(&state.borrow(), rid)
|
||||
.map_err(FsOpsError::Resource)?;
|
||||
.map_err(FsOpsErrorKind::Resource)?;
|
||||
file.sync_async().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1527,7 +1535,7 @@ pub fn op_fs_file_stat_sync(
|
|||
#[buffer] stat_out_buf: &mut [u32],
|
||||
) -> Result<(), FsOpsError> {
|
||||
let file =
|
||||
FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?;
|
||||
FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
|
||||
let stat = file.stat_sync()?;
|
||||
let serializable_stat = SerializableStat::from(stat);
|
||||
serializable_stat.write(stat_out_buf);
|
||||
|
@ -1541,7 +1549,7 @@ pub async fn op_fs_file_stat_async(
|
|||
#[smi] rid: ResourceId,
|
||||
) -> Result<SerializableStat, FsOpsError> {
|
||||
let file = FileResource::get_file(&state.borrow(), rid)
|
||||
.map_err(FsOpsError::Resource)?;
|
||||
.map_err(FsOpsErrorKind::Resource)?;
|
||||
let stat = file.stat_async().await?;
|
||||
Ok(stat.into())
|
||||
}
|
||||
|
@ -1553,7 +1561,7 @@ pub fn op_fs_flock_sync(
|
|||
exclusive: bool,
|
||||
) -> Result<(), FsOpsError> {
|
||||
let file =
|
||||
FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?;
|
||||
FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
|
||||
file.lock_sync(exclusive)?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1565,7 +1573,7 @@ pub async fn op_fs_flock_async(
|
|||
exclusive: bool,
|
||||
) -> Result<(), FsOpsError> {
|
||||
let file = FileResource::get_file(&state.borrow(), rid)
|
||||
.map_err(FsOpsError::Resource)?;
|
||||
.map_err(FsOpsErrorKind::Resource)?;
|
||||
file.lock_async(exclusive).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1576,7 +1584,7 @@ pub fn op_fs_funlock_sync(
|
|||
#[smi] rid: ResourceId,
|
||||
) -> Result<(), FsOpsError> {
|
||||
let file =
|
||||
FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?;
|
||||
FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
|
||||
file.unlock_sync()?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1587,7 +1595,7 @@ pub async fn op_fs_funlock_async(
|
|||
#[smi] rid: ResourceId,
|
||||
) -> Result<(), FsOpsError> {
|
||||
let file = FileResource::get_file(&state.borrow(), rid)
|
||||
.map_err(FsOpsError::Resource)?;
|
||||
.map_err(FsOpsErrorKind::Resource)?;
|
||||
file.unlock_async().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1599,7 +1607,7 @@ pub fn op_fs_ftruncate_sync(
|
|||
#[number] len: u64,
|
||||
) -> Result<(), FsOpsError> {
|
||||
let file =
|
||||
FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?;
|
||||
FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
|
||||
file.truncate_sync(len)?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1611,7 +1619,7 @@ pub async fn op_fs_file_truncate_async(
|
|||
#[number] len: u64,
|
||||
) -> Result<(), FsOpsError> {
|
||||
let file = FileResource::get_file(&state.borrow(), rid)
|
||||
.map_err(FsOpsError::Resource)?;
|
||||
.map_err(FsOpsErrorKind::Resource)?;
|
||||
file.truncate_async(len).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1626,7 +1634,7 @@ pub fn op_fs_futime_sync(
|
|||
#[smi] mtime_nanos: u32,
|
||||
) -> Result<(), FsOpsError> {
|
||||
let file =
|
||||
FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?;
|
||||
FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
|
||||
file.utime_sync(atime_secs, atime_nanos, mtime_secs, mtime_nanos)?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1641,7 +1649,7 @@ pub async fn op_fs_futime_async(
|
|||
#[smi] mtime_nanos: u32,
|
||||
) -> Result<(), FsOpsError> {
|
||||
let file = FileResource::get_file(&state.borrow(), rid)
|
||||
.map_err(FsOpsError::Resource)?;
|
||||
.map_err(FsOpsErrorKind::Resource)?;
|
||||
file
|
||||
.utime_async(atime_secs, atime_nanos, mtime_secs, mtime_nanos)
|
||||
.await?;
|
||||
|
@ -1717,7 +1725,7 @@ impl<T> MapErrContext for Result<T, FsError> {
|
|||
where
|
||||
F: FnOnce(FsError) -> OperationError,
|
||||
{
|
||||
self.map_err(|err| FsOpsError::OperationError(f(err)))
|
||||
self.map_err(|err| FsOpsErrorKind::OperationError(f(err)).into_box())
|
||||
}
|
||||
|
||||
fn context(self, operation: &'static str) -> Self::R {
|
||||
|
@ -1754,7 +1762,8 @@ impl<T> MapErrContext for Result<T, FsError> {
|
|||
}
|
||||
|
||||
fn path_into_string(s: std::ffi::OsString) -> Result<String, FsOpsError> {
|
||||
s.into_string().map_err(FsOpsError::InvalidUtf8)
|
||||
s.into_string()
|
||||
.map_err(|e| FsOpsErrorKind::InvalidUtf8(e).into_box())
|
||||
}
|
||||
|
||||
macro_rules! create_struct_writer {
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_http"
|
||||
version = "0.175.0"
|
||||
version = "0.177.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -18,6 +18,7 @@ use crate::service::HttpServerState;
|
|||
use crate::service::SignallingRc;
|
||||
use crate::websocket_upgrade::WebSocketUpgrade;
|
||||
use crate::LocalExecutor;
|
||||
use crate::Options;
|
||||
use cache_control::CacheControl;
|
||||
use deno_core::external;
|
||||
use deno_core::futures::future::poll_fn;
|
||||
|
@ -821,10 +822,16 @@ fn serve_http11_unconditional(
|
|||
io: impl HttpServeStream,
|
||||
svc: impl HttpService<Incoming, ResBody = HttpRecordResponse> + 'static,
|
||||
cancel: Rc<CancelHandle>,
|
||||
http1_builder_hook: Option<fn(http1::Builder) -> http1::Builder>,
|
||||
) -> impl Future<Output = Result<(), hyper::Error>> + 'static {
|
||||
let conn = http1::Builder::new()
|
||||
.keep_alive(true)
|
||||
.writev(*USE_WRITEV)
|
||||
let mut builder = http1::Builder::new();
|
||||
builder.keep_alive(true).writev(*USE_WRITEV);
|
||||
|
||||
if let Some(http1_builder_hook) = http1_builder_hook {
|
||||
builder = http1_builder_hook(builder);
|
||||
}
|
||||
|
||||
let conn = builder
|
||||
.serve_connection(TokioIo::new(io), svc)
|
||||
.with_upgrades();
|
||||
|
||||
|
@ -843,9 +850,17 @@ fn serve_http2_unconditional(
|
|||
io: impl HttpServeStream,
|
||||
svc: impl HttpService<Incoming, ResBody = HttpRecordResponse> + 'static,
|
||||
cancel: Rc<CancelHandle>,
|
||||
http2_builder_hook: Option<
|
||||
fn(http2::Builder<LocalExecutor>) -> http2::Builder<LocalExecutor>,
|
||||
>,
|
||||
) -> impl Future<Output = Result<(), hyper::Error>> + 'static {
|
||||
let conn =
|
||||
http2::Builder::new(LocalExecutor).serve_connection(TokioIo::new(io), svc);
|
||||
let mut builder = http2::Builder::new(LocalExecutor);
|
||||
|
||||
if let Some(http2_builder_hook) = http2_builder_hook {
|
||||
builder = http2_builder_hook(builder);
|
||||
}
|
||||
|
||||
let conn = builder.serve_connection(TokioIo::new(io), svc);
|
||||
async {
|
||||
match conn.or_abort(cancel).await {
|
||||
Err(mut conn) => {
|
||||
|
@ -861,15 +876,16 @@ async fn serve_http2_autodetect(
|
|||
io: impl HttpServeStream,
|
||||
svc: impl HttpService<Incoming, ResBody = HttpRecordResponse> + 'static,
|
||||
cancel: Rc<CancelHandle>,
|
||||
options: Options,
|
||||
) -> Result<(), HttpNextError> {
|
||||
let prefix = NetworkStreamPrefixCheck::new(io, HTTP2_PREFIX);
|
||||
let (matches, io) = prefix.match_prefix().await?;
|
||||
if matches {
|
||||
serve_http2_unconditional(io, svc, cancel)
|
||||
serve_http2_unconditional(io, svc, cancel, options.http2_builder_hook)
|
||||
.await
|
||||
.map_err(HttpNextError::Hyper)
|
||||
} else {
|
||||
serve_http11_unconditional(io, svc, cancel)
|
||||
serve_http11_unconditional(io, svc, cancel, options.http1_builder_hook)
|
||||
.await
|
||||
.map_err(HttpNextError::Hyper)
|
||||
}
|
||||
|
@ -880,6 +896,7 @@ fn serve_https(
|
|||
request_info: HttpConnectionProperties,
|
||||
lifetime: HttpLifetime,
|
||||
tx: tokio::sync::mpsc::Sender<Rc<HttpRecord>>,
|
||||
options: Options,
|
||||
) -> JoinHandle<Result<(), HttpNextError>> {
|
||||
let HttpLifetime {
|
||||
server_state,
|
||||
|
@ -891,21 +908,31 @@ fn serve_https(
|
|||
handle_request(req, request_info.clone(), server_state.clone(), tx.clone())
|
||||
});
|
||||
spawn(
|
||||
async {
|
||||
async move {
|
||||
let handshake = io.handshake().await?;
|
||||
// If the client specifically negotiates a protocol, we will use it. If not, we'll auto-detect
|
||||
// based on the prefix bytes
|
||||
let handshake = handshake.alpn;
|
||||
if Some(TLS_ALPN_HTTP_2) == handshake.as_deref() {
|
||||
serve_http2_unconditional(io, svc, listen_cancel_handle)
|
||||
.await
|
||||
.map_err(HttpNextError::Hyper)
|
||||
serve_http2_unconditional(
|
||||
io,
|
||||
svc,
|
||||
listen_cancel_handle,
|
||||
options.http2_builder_hook,
|
||||
)
|
||||
.await
|
||||
.map_err(HttpNextError::Hyper)
|
||||
} else if Some(TLS_ALPN_HTTP_11) == handshake.as_deref() {
|
||||
serve_http11_unconditional(io, svc, listen_cancel_handle)
|
||||
.await
|
||||
.map_err(HttpNextError::Hyper)
|
||||
serve_http11_unconditional(
|
||||
io,
|
||||
svc,
|
||||
listen_cancel_handle,
|
||||
options.http1_builder_hook,
|
||||
)
|
||||
.await
|
||||
.map_err(HttpNextError::Hyper)
|
||||
} else {
|
||||
serve_http2_autodetect(io, svc, listen_cancel_handle).await
|
||||
serve_http2_autodetect(io, svc, listen_cancel_handle, options).await
|
||||
}
|
||||
}
|
||||
.try_or_cancel(connection_cancel_handle),
|
||||
|
@ -917,6 +944,7 @@ fn serve_http(
|
|||
request_info: HttpConnectionProperties,
|
||||
lifetime: HttpLifetime,
|
||||
tx: tokio::sync::mpsc::Sender<Rc<HttpRecord>>,
|
||||
options: Options,
|
||||
) -> JoinHandle<Result<(), HttpNextError>> {
|
||||
let HttpLifetime {
|
||||
server_state,
|
||||
|
@ -928,7 +956,7 @@ fn serve_http(
|
|||
handle_request(req, request_info.clone(), server_state.clone(), tx.clone())
|
||||
});
|
||||
spawn(
|
||||
serve_http2_autodetect(io, svc, listen_cancel_handle)
|
||||
serve_http2_autodetect(io, svc, listen_cancel_handle, options)
|
||||
.try_or_cancel(connection_cancel_handle),
|
||||
)
|
||||
}
|
||||
|
@ -938,6 +966,7 @@ fn serve_http_on<HTTP>(
|
|||
listen_properties: &HttpListenProperties,
|
||||
lifetime: HttpLifetime,
|
||||
tx: tokio::sync::mpsc::Sender<Rc<HttpRecord>>,
|
||||
options: Options,
|
||||
) -> JoinHandle<Result<(), HttpNextError>>
|
||||
where
|
||||
HTTP: HttpPropertyExtractor,
|
||||
|
@ -949,14 +978,14 @@ where
|
|||
|
||||
match network_stream {
|
||||
NetworkStream::Tcp(conn) => {
|
||||
serve_http(conn, connection_properties, lifetime, tx)
|
||||
serve_http(conn, connection_properties, lifetime, tx, options)
|
||||
}
|
||||
NetworkStream::Tls(conn) => {
|
||||
serve_https(conn, connection_properties, lifetime, tx)
|
||||
serve_https(conn, connection_properties, lifetime, tx, options)
|
||||
}
|
||||
#[cfg(unix)]
|
||||
NetworkStream::Unix(conn) => {
|
||||
serve_http(conn, connection_properties, lifetime, tx)
|
||||
serve_http(conn, connection_properties, lifetime, tx, options)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1045,6 +1074,11 @@ where
|
|||
|
||||
let lifetime = resource.lifetime();
|
||||
|
||||
let options = {
|
||||
let state = state.borrow();
|
||||
*state.borrow::<Options>()
|
||||
};
|
||||
|
||||
let listen_properties_clone: HttpListenProperties = listen_properties.clone();
|
||||
let handle = spawn(async move {
|
||||
loop {
|
||||
|
@ -1057,6 +1091,7 @@ where
|
|||
&listen_properties_clone,
|
||||
lifetime.clone(),
|
||||
tx.clone(),
|
||||
options,
|
||||
);
|
||||
}
|
||||
#[allow(unreachable_code)]
|
||||
|
@ -1093,11 +1128,17 @@ where
|
|||
let (tx, rx) = tokio::sync::mpsc::channel(10);
|
||||
let resource: Rc<HttpJoinHandle> = Rc::new(HttpJoinHandle::new(rx));
|
||||
|
||||
let options = {
|
||||
let state = state.borrow();
|
||||
*state.borrow::<Options>()
|
||||
};
|
||||
|
||||
let handle = serve_http_on::<HTTP>(
|
||||
connection,
|
||||
&listen_properties,
|
||||
resource.lifetime(),
|
||||
tx,
|
||||
options,
|
||||
);
|
||||
|
||||
// Set the handle after we start the future
|
||||
|
|
|
@ -39,6 +39,8 @@ use deno_net::raw::NetworkStream;
|
|||
use deno_websocket::ws_create_server_stream;
|
||||
use flate2::write::GzEncoder;
|
||||
use flate2::Compression;
|
||||
use hyper::server::conn::http1;
|
||||
use hyper::server::conn::http2;
|
||||
use hyper_util::rt::TokioIo;
|
||||
use hyper_v014::body::Bytes;
|
||||
use hyper_v014::body::HttpBody;
|
||||
|
@ -96,6 +98,25 @@ pub use request_properties::HttpRequestProperties;
|
|||
pub use service::UpgradeUnavailableError;
|
||||
pub use websocket_upgrade::WebSocketUpgradeError;
|
||||
|
||||
#[derive(Debug, Default, Clone, Copy)]
|
||||
pub struct Options {
|
||||
/// By passing a hook function, the caller can customize various configuration
|
||||
/// options for the HTTP/2 server.
|
||||
/// See [`http2::Builder`] for what parameters can be customized.
|
||||
///
|
||||
/// If `None`, the default configuration provided by hyper will be used. Note
|
||||
/// that the default configuration is subject to change in future versions.
|
||||
pub http2_builder_hook:
|
||||
Option<fn(http2::Builder<LocalExecutor>) -> http2::Builder<LocalExecutor>>,
|
||||
/// By passing a hook function, the caller can customize various configuration
|
||||
/// options for the HTTP/1 server.
|
||||
/// See [`http1::Builder`] for what parameters can be customized.
|
||||
///
|
||||
/// If `None`, the default configuration provided by hyper will be used. Note
|
||||
/// that the default configuration is subject to change in future versions.
|
||||
pub http1_builder_hook: Option<fn(http1::Builder) -> http1::Builder>,
|
||||
}
|
||||
|
||||
deno_core::extension!(
|
||||
deno_http,
|
||||
deps = [deno_web, deno_net, deno_fetch, deno_websocket],
|
||||
|
@ -135,6 +156,12 @@ deno_core::extension!(
|
|||
http_next::op_http_cancel,
|
||||
],
|
||||
esm = ["00_serve.ts", "01_http.js", "02_websocket.ts"],
|
||||
options = {
|
||||
options: Options,
|
||||
},
|
||||
state = |state, options| {
|
||||
state.put::<Options>(options.options);
|
||||
}
|
||||
);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
|
@ -1117,7 +1144,7 @@ async fn op_http_upgrade_websocket(
|
|||
|
||||
// Needed so hyper can use non Send futures
|
||||
#[derive(Clone)]
|
||||
struct LocalExecutor;
|
||||
pub struct LocalExecutor;
|
||||
|
||||
impl<Fut> hyper_v014::rt::Executor<Fut> for LocalExecutor
|
||||
where
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_io"
|
||||
version = "0.87.0"
|
||||
version = "0.89.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_kv"
|
||||
version = "0.85.0"
|
||||
version = "0.87.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
@ -17,6 +17,7 @@ path = "lib.rs"
|
|||
anyhow.workspace = true
|
||||
async-trait.workspace = true
|
||||
base64.workspace = true
|
||||
boxed_error.workspace = true
|
||||
bytes.workspace = true
|
||||
chrono = { workspace = true, features = ["now"] }
|
||||
deno_core.workspace = true
|
||||
|
|
102
ext/kv/lib.rs
102
ext/kv/lib.rs
|
@ -14,6 +14,7 @@ use std::time::Duration;
|
|||
|
||||
use base64::prelude::BASE64_URL_SAFE;
|
||||
use base64::Engine;
|
||||
use boxed_error::Boxed;
|
||||
use chrono::DateTime;
|
||||
use chrono::Utc;
|
||||
use deno_core::error::get_custom_error_class;
|
||||
|
@ -114,8 +115,11 @@ impl Resource for DatabaseWatcherResource {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Boxed)]
|
||||
pub struct KvError(pub Box<KvErrorKind>);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum KvError {
|
||||
pub enum KvErrorKind {
|
||||
#[error(transparent)]
|
||||
DatabaseHandler(deno_core::error::AnyError),
|
||||
#[error(transparent)]
|
||||
|
@ -174,7 +178,7 @@ pub enum KvError {
|
|||
InvalidRange,
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
#[smi]
|
||||
async fn op_kv_database_open<DBH>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
@ -193,7 +197,7 @@ where
|
|||
let db = handler
|
||||
.open(state.clone(), path)
|
||||
.await
|
||||
.map_err(KvError::DatabaseHandler)?;
|
||||
.map_err(KvErrorKind::DatabaseHandler)?;
|
||||
let rid = state.borrow_mut().resource_table.add(DatabaseResource {
|
||||
db,
|
||||
cancel_handle: CancelHandle::new_rc(),
|
||||
|
@ -329,7 +333,7 @@ where
|
|||
let resource = state
|
||||
.resource_table
|
||||
.get::<DatabaseResource<DBH::DB>>(rid)
|
||||
.map_err(KvError::Resource)?;
|
||||
.map_err(KvErrorKind::Resource)?;
|
||||
resource.db.clone()
|
||||
};
|
||||
|
||||
|
@ -339,7 +343,7 @@ where
|
|||
};
|
||||
|
||||
if ranges.len() > config.max_read_ranges {
|
||||
return Err(KvError::TooManyRanges(config.max_read_ranges));
|
||||
return Err(KvErrorKind::TooManyRanges(config.max_read_ranges).into_box());
|
||||
}
|
||||
|
||||
let mut total_entries = 0usize;
|
||||
|
@ -358,14 +362,16 @@ where
|
|||
Ok(ReadRange {
|
||||
start,
|
||||
end,
|
||||
limit: NonZeroU32::new(limit).ok_or(KvError::InvalidLimit)?,
|
||||
limit: NonZeroU32::new(limit).ok_or(KvErrorKind::InvalidLimit)?,
|
||||
reverse,
|
||||
})
|
||||
})
|
||||
.collect::<Result<Vec<_>, KvError>>()?;
|
||||
|
||||
if total_entries > config.max_read_entries {
|
||||
return Err(KvError::TooManyEntries(config.max_read_entries));
|
||||
return Err(
|
||||
KvErrorKind::TooManyEntries(config.max_read_entries).into_box(),
|
||||
);
|
||||
}
|
||||
|
||||
let opts = SnapshotReadOptions {
|
||||
|
@ -374,7 +380,7 @@ where
|
|||
let output_ranges = db
|
||||
.snapshot_read(read_ranges, opts)
|
||||
.await
|
||||
.map_err(KvError::Kv)?;
|
||||
.map_err(KvErrorKind::Kv)?;
|
||||
let output_ranges = output_ranges
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
|
@ -415,7 +421,7 @@ where
|
|||
if get_custom_error_class(&err) == Some("BadResource") {
|
||||
return Ok(None);
|
||||
} else {
|
||||
return Err(KvError::Resource(err));
|
||||
return Err(KvErrorKind::Resource(err).into_box());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -423,11 +429,11 @@ where
|
|||
};
|
||||
|
||||
let Some(mut handle) =
|
||||
db.dequeue_next_message().await.map_err(KvError::Kv)?
|
||||
db.dequeue_next_message().await.map_err(KvErrorKind::Kv)?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
let payload = handle.take_payload().await.map_err(KvError::Kv)?.into();
|
||||
let payload = handle.take_payload().await.map_err(KvErrorKind::Kv)?.into();
|
||||
let handle_rid = {
|
||||
let mut state = state.borrow_mut();
|
||||
state.resource_table.add(QueueMessageResource { handle })
|
||||
|
@ -448,11 +454,11 @@ where
|
|||
let resource = state
|
||||
.resource_table
|
||||
.get::<DatabaseResource<DBH::DB>>(rid)
|
||||
.map_err(KvError::Resource)?;
|
||||
.map_err(KvErrorKind::Resource)?;
|
||||
let config = state.borrow::<Rc<KvConfig>>().clone();
|
||||
|
||||
if keys.len() > config.max_watched_keys {
|
||||
return Err(KvError::TooManyKeys(config.max_watched_keys));
|
||||
return Err(KvErrorKind::TooManyKeys(config.max_watched_keys).into_box());
|
||||
}
|
||||
|
||||
let keys: Vec<Vec<u8>> = keys
|
||||
|
@ -493,7 +499,7 @@ async fn op_kv_watch_next(
|
|||
let resource = state
|
||||
.resource_table
|
||||
.get::<DatabaseWatcherResource>(rid)
|
||||
.map_err(KvError::Resource)?;
|
||||
.map_err(KvErrorKind::Resource)?;
|
||||
resource.clone()
|
||||
};
|
||||
|
||||
|
@ -519,7 +525,7 @@ async fn op_kv_watch_next(
|
|||
return Ok(None);
|
||||
};
|
||||
|
||||
let entries = res.map_err(KvError::Kv)?;
|
||||
let entries = res.map_err(KvErrorKind::Kv)?;
|
||||
let entries = entries
|
||||
.into_iter()
|
||||
.map(|entry| {
|
||||
|
@ -549,9 +555,9 @@ where
|
|||
let handle = state
|
||||
.resource_table
|
||||
.take::<QueueMessageResource<<<DBH>::DB as Database>::QMH>>(handle_rid)
|
||||
.map_err(|_| KvError::QueueMessageNotFound)?;
|
||||
.map_err(|_| KvErrorKind::QueueMessageNotFound)?;
|
||||
Rc::try_unwrap(handle)
|
||||
.map_err(|_| KvError::QueueMessageNotFound)?
|
||||
.map_err(|_| KvErrorKind::QueueMessageNotFound)?
|
||||
.handle
|
||||
};
|
||||
// if we fail to finish the message, there is not much we can do and the
|
||||
|
@ -692,7 +698,7 @@ impl RawSelector {
|
|||
}),
|
||||
(Some(prefix), Some(start), None) => {
|
||||
if !start.starts_with(&prefix) || start.len() == prefix.len() {
|
||||
return Err(KvError::StartKeyNotInKeyspace);
|
||||
return Err(KvErrorKind::StartKeyNotInKeyspace.into_box());
|
||||
}
|
||||
Ok(Self::Prefixed {
|
||||
prefix,
|
||||
|
@ -702,7 +708,7 @@ impl RawSelector {
|
|||
}
|
||||
(Some(prefix), None, Some(end)) => {
|
||||
if !end.starts_with(&prefix) || end.len() == prefix.len() {
|
||||
return Err(KvError::EndKeyNotInKeyspace);
|
||||
return Err(KvErrorKind::EndKeyNotInKeyspace.into_box());
|
||||
}
|
||||
Ok(Self::Prefixed {
|
||||
prefix,
|
||||
|
@ -712,7 +718,7 @@ impl RawSelector {
|
|||
}
|
||||
(None, Some(start), Some(end)) => {
|
||||
if start > end {
|
||||
return Err(KvError::StartKeyGreaterThanEndKey);
|
||||
return Err(KvErrorKind::StartKeyGreaterThanEndKey.into_box());
|
||||
}
|
||||
Ok(Self::Range { start, end })
|
||||
}
|
||||
|
@ -720,7 +726,7 @@ impl RawSelector {
|
|||
let end = start.iter().copied().chain(Some(0)).collect();
|
||||
Ok(Self::Range { start, end })
|
||||
}
|
||||
_ => Err(KvError::InvalidRange),
|
||||
_ => Err(KvErrorKind::InvalidRange.into_box()),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -782,7 +788,7 @@ fn encode_cursor(
|
|||
) -> Result<String, KvError> {
|
||||
let common_prefix = selector.common_prefix();
|
||||
if !boundary_key.starts_with(common_prefix) {
|
||||
return Err(KvError::InvalidBoundaryKey);
|
||||
return Err(KvErrorKind::InvalidBoundaryKey.into_box());
|
||||
}
|
||||
Ok(BASE64_URL_SAFE.encode(&boundary_key[common_prefix.len()..]))
|
||||
}
|
||||
|
@ -799,7 +805,7 @@ fn decode_selector_and_cursor(
|
|||
let common_prefix = selector.common_prefix();
|
||||
let cursor = BASE64_URL_SAFE
|
||||
.decode(cursor)
|
||||
.map_err(|_| KvError::InvalidCursor)?;
|
||||
.map_err(|_| KvErrorKind::InvalidCursor)?;
|
||||
|
||||
let first_key: Vec<u8>;
|
||||
let last_key: Vec<u8>;
|
||||
|
@ -824,13 +830,13 @@ fn decode_selector_and_cursor(
|
|||
// Defend against out-of-bounds reading
|
||||
if let Some(start) = selector.start() {
|
||||
if &first_key[..] < start {
|
||||
return Err(KvError::CursorOutOfBounds);
|
||||
return Err(KvErrorKind::CursorOutOfBounds.into_box());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(end) = selector.end() {
|
||||
if &last_key[..] > end {
|
||||
return Err(KvError::CursorOutOfBounds);
|
||||
return Err(KvErrorKind::CursorOutOfBounds.into_box());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -855,7 +861,7 @@ where
|
|||
let resource = state
|
||||
.resource_table
|
||||
.get::<DatabaseResource<DBH::DB>>(rid)
|
||||
.map_err(KvError::Resource)?;
|
||||
.map_err(KvErrorKind::Resource)?;
|
||||
resource.db.clone()
|
||||
};
|
||||
|
||||
|
@ -865,28 +871,28 @@ where
|
|||
};
|
||||
|
||||
if checks.len() > config.max_checks {
|
||||
return Err(KvError::TooManyChecks(config.max_checks));
|
||||
return Err(KvErrorKind::TooManyChecks(config.max_checks).into_box());
|
||||
}
|
||||
|
||||
if mutations.len() + enqueues.len() > config.max_mutations {
|
||||
return Err(KvError::TooManyMutations(config.max_mutations));
|
||||
return Err(KvErrorKind::TooManyMutations(config.max_mutations).into_box());
|
||||
}
|
||||
|
||||
let checks = checks
|
||||
.into_iter()
|
||||
.map(check_from_v8)
|
||||
.collect::<Result<Vec<Check>, KvCheckError>>()
|
||||
.map_err(KvError::InvalidCheck)?;
|
||||
.map_err(KvErrorKind::InvalidCheck)?;
|
||||
let mutations = mutations
|
||||
.into_iter()
|
||||
.map(|mutation| mutation_from_v8((mutation, current_timestamp)))
|
||||
.collect::<Result<Vec<Mutation>, KvMutationError>>()
|
||||
.map_err(KvError::InvalidMutation)?;
|
||||
.map_err(KvErrorKind::InvalidMutation)?;
|
||||
let enqueues = enqueues
|
||||
.into_iter()
|
||||
.map(|e| enqueue_from_v8(e, current_timestamp))
|
||||
.collect::<Result<Vec<Enqueue>, std::io::Error>>()
|
||||
.map_err(KvError::InvalidEnqueue)?;
|
||||
.map_err(KvErrorKind::InvalidEnqueue)?;
|
||||
|
||||
let mut total_payload_size = 0usize;
|
||||
let mut total_key_size = 0usize;
|
||||
|
@ -897,7 +903,7 @@ where
|
|||
.chain(mutations.iter().map(|m| &m.key))
|
||||
{
|
||||
if key.is_empty() {
|
||||
return Err(KvError::EmptyKey);
|
||||
return Err(KvErrorKind::EmptyKey.into_box());
|
||||
}
|
||||
|
||||
total_payload_size += check_write_key_size(key, &config)?;
|
||||
|
@ -921,13 +927,16 @@ where
|
|||
}
|
||||
|
||||
if total_payload_size > config.max_total_mutation_size_bytes {
|
||||
return Err(KvError::TotalMutationTooLarge(
|
||||
config.max_total_mutation_size_bytes,
|
||||
));
|
||||
return Err(
|
||||
KvErrorKind::TotalMutationTooLarge(config.max_total_mutation_size_bytes)
|
||||
.into_box(),
|
||||
);
|
||||
}
|
||||
|
||||
if total_key_size > config.max_total_key_size_bytes {
|
||||
return Err(KvError::TotalKeyTooLarge(config.max_total_key_size_bytes));
|
||||
return Err(
|
||||
KvErrorKind::TotalKeyTooLarge(config.max_total_key_size_bytes).into_box(),
|
||||
);
|
||||
}
|
||||
|
||||
let atomic_write = AtomicWrite {
|
||||
|
@ -936,7 +945,10 @@ where
|
|||
enqueues,
|
||||
};
|
||||
|
||||
let result = db.atomic_write(atomic_write).await.map_err(KvError::Kv)?;
|
||||
let result = db
|
||||
.atomic_write(atomic_write)
|
||||
.await
|
||||
.map_err(KvErrorKind::Kv)?;
|
||||
|
||||
Ok(result.map(|res| faster_hex::hex_string(&res.versionstamp)))
|
||||
}
|
||||
|
@ -958,7 +970,9 @@ fn op_kv_encode_cursor(
|
|||
|
||||
fn check_read_key_size(key: &[u8], config: &KvConfig) -> Result<(), KvError> {
|
||||
if key.len() > config.max_read_key_size_bytes {
|
||||
Err(KvError::KeyTooLargeToRead(config.max_read_key_size_bytes))
|
||||
Err(
|
||||
KvErrorKind::KeyTooLargeToRead(config.max_read_key_size_bytes).into_box(),
|
||||
)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
|
@ -969,7 +983,10 @@ fn check_write_key_size(
|
|||
config: &KvConfig,
|
||||
) -> Result<usize, KvError> {
|
||||
if key.len() > config.max_write_key_size_bytes {
|
||||
Err(KvError::KeyTooLargeToWrite(config.max_write_key_size_bytes))
|
||||
Err(
|
||||
KvErrorKind::KeyTooLargeToWrite(config.max_write_key_size_bytes)
|
||||
.into_box(),
|
||||
)
|
||||
} else {
|
||||
Ok(key.len())
|
||||
}
|
||||
|
@ -986,7 +1003,7 @@ fn check_value_size(
|
|||
};
|
||||
|
||||
if payload.len() > config.max_value_size_bytes {
|
||||
Err(KvError::ValueTooLarge(config.max_value_size_bytes))
|
||||
Err(KvErrorKind::ValueTooLarge(config.max_value_size_bytes).into_box())
|
||||
} else {
|
||||
Ok(payload.len())
|
||||
}
|
||||
|
@ -997,7 +1014,10 @@ fn check_enqueue_payload_size(
|
|||
config: &KvConfig,
|
||||
) -> Result<usize, KvError> {
|
||||
if payload.len() > config.max_value_size_bytes {
|
||||
Err(KvError::EnqueuePayloadTooLarge(config.max_value_size_bytes))
|
||||
Err(
|
||||
KvErrorKind::EnqueuePayloadTooLarge(config.max_value_size_bytes)
|
||||
.into_box(),
|
||||
)
|
||||
} else {
|
||||
Ok(payload.len())
|
||||
}
|
||||
|
|
|
@ -210,6 +210,7 @@ impl<P: RemoteDbHandlerPermissions + 'static> DatabaseHandler
|
|||
pool_idle_timeout: None,
|
||||
http1: false,
|
||||
http2: true,
|
||||
client_builder_hook: None,
|
||||
},
|
||||
)?;
|
||||
let fetch_client = FetchClient(client);
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_napi"
|
||||
version = "0.108.0"
|
||||
version = "0.110.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -530,7 +530,7 @@ static NAPI_LOADED_MODULES: std::sync::LazyLock<
|
|||
RwLock<HashMap<PathBuf, NapiModuleHandle>>,
|
||||
> = std::sync::LazyLock::new(|| RwLock::new(HashMap::new()));
|
||||
|
||||
#[op2(reentrant)]
|
||||
#[op2(reentrant, stack_trace)]
|
||||
fn op_napi_open<NP, 'scope>(
|
||||
scope: &mut v8::HandleScope<'scope>,
|
||||
isolate: *mut v8::Isolate,
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "napi_sym"
|
||||
version = "0.107.0"
|
||||
version = "0.109.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_net"
|
||||
version = "0.169.0"
|
||||
version = "0.171.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
|
|
@ -7,7 +7,7 @@ pub mod ops_tls;
|
|||
pub mod ops_unix;
|
||||
pub mod raw;
|
||||
pub mod resolve_addr;
|
||||
mod tcp;
|
||||
pub mod tcp;
|
||||
|
||||
use deno_core::error::AnyError;
|
||||
use deno_core::OpState;
|
||||
|
|
|
@ -182,7 +182,7 @@ pub async fn op_net_recv_udp(
|
|||
Ok((nread, IpAddr::from(remote_addr)))
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
#[number]
|
||||
pub async fn op_net_send_udp<NP>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
@ -343,7 +343,7 @@ pub async fn op_net_set_multi_ttl_udp(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
#[serde]
|
||||
pub async fn op_net_connect_tcp<NP>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
@ -401,7 +401,7 @@ impl Resource for UdpSocketResource {
|
|||
}
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[serde]
|
||||
pub fn op_net_listen_tcp<NP>(
|
||||
state: &mut OpState,
|
||||
|
@ -501,7 +501,7 @@ where
|
|||
Ok((rid, IpAddr::from(local_addr)))
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[serde]
|
||||
pub fn op_net_listen_udp<NP>(
|
||||
state: &mut OpState,
|
||||
|
@ -516,7 +516,7 @@ where
|
|||
net_listen_udp::<NP>(state, addr, reuse_address, loopback)
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[serde]
|
||||
pub fn op_node_unstable_net_listen_udp<NP>(
|
||||
state: &mut OpState,
|
||||
|
@ -601,7 +601,7 @@ pub struct NameServer {
|
|||
port: u16,
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
#[serde]
|
||||
pub async fn op_dns_resolve<NP>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
|
|
@ -251,7 +251,7 @@ pub fn op_tls_cert_resolver_resolve_error(
|
|||
lookup.resolve(sni, Err(error))
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[serde]
|
||||
pub fn op_tls_start<NP>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
@ -340,7 +340,7 @@ where
|
|||
Ok((rid, IpAddr::from(local_addr), IpAddr::from(remote_addr)))
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
#[serde]
|
||||
pub async fn op_net_connect_tls<NP>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
@ -445,7 +445,7 @@ pub struct ListenTlsArgs {
|
|||
load_balanced: bool,
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[serde]
|
||||
pub fn op_net_listen_tls<NP>(
|
||||
state: &mut OpState,
|
||||
|
|
|
@ -85,7 +85,7 @@ pub async fn op_net_accept_unix(
|
|||
Ok((rid, local_addr_path, remote_addr_path))
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
#[serde]
|
||||
pub async fn op_net_connect_unix<NP>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
@ -118,7 +118,7 @@ where
|
|||
Ok((rid, local_addr_path, remote_addr_path))
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
#[serde]
|
||||
pub async fn op_net_recv_unixpacket(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
@ -140,7 +140,7 @@ pub async fn op_net_recv_unixpacket(
|
|||
Ok((nread, path))
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
#[number]
|
||||
pub async fn op_net_send_unixpacket<NP>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
@ -171,7 +171,7 @@ where
|
|||
Ok(nwritten)
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[serde]
|
||||
pub fn op_net_listen_unix<NP>(
|
||||
state: &mut OpState,
|
||||
|
@ -222,7 +222,7 @@ where
|
|||
Ok((rid, pathname))
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[serde]
|
||||
pub fn op_net_listen_unixpacket<NP>(
|
||||
state: &mut OpState,
|
||||
|
@ -235,7 +235,7 @@ where
|
|||
net_listen_unixpacket::<NP>(state, path)
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[serde]
|
||||
pub fn op_node_unstable_net_listen_unixpacket<NP>(
|
||||
state: &mut OpState,
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[package]
|
||||
name = "deno_node"
|
||||
version = "0.114.0"
|
||||
version = "0.116.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
@ -22,6 +22,7 @@ aes.workspace = true
|
|||
async-trait.workspace = true
|
||||
base64.workspace = true
|
||||
blake2 = "0.10.6"
|
||||
boxed_error.workspace = true
|
||||
brotli.workspace = true
|
||||
bytes.workspace = true
|
||||
cbc.workspace = true
|
||||
|
@ -53,7 +54,7 @@ http.workspace = true
|
|||
http-body-util.workspace = true
|
||||
hyper.workspace = true
|
||||
hyper-util.workspace = true
|
||||
idna = "0.3.0"
|
||||
idna = "1.0.3"
|
||||
indexmap.workspace = true
|
||||
ipnetwork = "0.20.0"
|
||||
k256 = "0.13.1"
|
||||
|
@ -94,6 +95,7 @@ spki.workspace = true
|
|||
stable_deref_trait = "1.2.0"
|
||||
thiserror.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-eld = "0.2"
|
||||
url.workspace = true
|
||||
webpki-root-certs.workspace = true
|
||||
winapi.workspace = true
|
||||
|
|
|
@ -427,6 +427,9 @@ deno_core::extension!(deno_node,
|
|||
ops::inspector::op_inspector_emit_protocol_event,
|
||||
ops::inspector::op_inspector_enabled,
|
||||
],
|
||||
objects = [
|
||||
ops::perf_hooks::EldHistogram
|
||||
],
|
||||
esm_entry_point = "ext:deno_node/02_init.js",
|
||||
esm = [
|
||||
dir "polyfills",
|
||||
|
|
|
@ -26,7 +26,7 @@ pub enum FsError {
|
|||
Fs(#[from] deno_io::fs::FsError),
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_node_fs_exists_sync<P>(
|
||||
state: &mut OpState,
|
||||
#[string] path: String,
|
||||
|
@ -41,7 +41,7 @@ where
|
|||
Ok(fs.exists_sync(&path))
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
pub async fn op_node_fs_exists<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
#[string] path: String,
|
||||
|
@ -60,7 +60,7 @@ where
|
|||
Ok(fs.exists_async(path).await?)
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_node_cp_sync<P>(
|
||||
state: &mut OpState,
|
||||
#[string] path: &str,
|
||||
|
@ -81,7 +81,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
pub async fn op_node_cp<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
#[string] path: String,
|
||||
|
@ -117,7 +117,7 @@ pub struct StatFs {
|
|||
pub ffree: u64,
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[serde]
|
||||
pub fn op_node_statfs<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
|
@ -258,7 +258,7 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_node_lutimes_sync<P>(
|
||||
state: &mut OpState,
|
||||
#[string] path: &str,
|
||||
|
@ -279,7 +279,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
pub async fn op_node_lutimes<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
#[string] path: String,
|
||||
|
@ -305,7 +305,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
pub fn op_node_lchown_sync<P>(
|
||||
state: &mut OpState,
|
||||
#[string] path: String,
|
||||
|
@ -323,7 +323,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[op2(async)]
|
||||
#[op2(async, stack_trace)]
|
||||
pub async fn op_node_lchown<P>(
|
||||
state: Rc<RefCell<OpState>>,
|
||||
#[string] path: String,
|
||||
|
|
|
@ -49,7 +49,7 @@ use std::cmp::min;
|
|||
use tokio::io::AsyncReadExt;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[serde]
|
||||
pub fn op_node_http_request<P>(
|
||||
state: &mut OpState,
|
||||
|
|
|
@ -20,7 +20,7 @@ pub fn op_inspector_enabled() -> bool {
|
|||
false
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
pub fn op_inspector_open<P>(
|
||||
_state: &mut OpState,
|
||||
_port: Option<u16>,
|
||||
|
@ -85,7 +85,7 @@ struct JSInspectorSession {
|
|||
|
||||
impl GarbageCollected for JSInspectorSession {}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[cppgc]
|
||||
pub fn op_inspector_connect<'s, P>(
|
||||
isolate: *mut v8::Isolate,
|
||||
|
|
|
@ -10,6 +10,7 @@ pub mod idna;
|
|||
pub mod inspector;
|
||||
pub mod ipc;
|
||||
pub mod os;
|
||||
pub mod perf_hooks;
|
||||
pub mod process;
|
||||
pub mod require;
|
||||
pub mod tls;
|
||||
|
|
|
@ -21,7 +21,7 @@ pub enum OsError {
|
|||
FailedToGetUserInfo(#[source] std::io::Error),
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_node_os_get_priority<P>(
|
||||
state: &mut OpState,
|
||||
pid: u32,
|
||||
|
@ -37,7 +37,7 @@ where
|
|||
priority::get_priority(pid).map_err(OsError::Priority)
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_node_os_set_priority<P>(
|
||||
state: &mut OpState,
|
||||
pid: u32,
|
||||
|
@ -193,7 +193,7 @@ fn get_user_info(_uid: u32) -> Result<UserInfo, OsError> {
|
|||
})
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[serde]
|
||||
pub fn op_node_os_user_info<P>(
|
||||
state: &mut OpState,
|
||||
|
@ -212,7 +212,7 @@ where
|
|||
get_user_info(uid)
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_geteuid<P>(
|
||||
state: &mut OpState,
|
||||
) -> Result<u32, deno_core::error::AnyError>
|
||||
|
@ -233,7 +233,7 @@ where
|
|||
Ok(euid)
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
#[op2(fast, stack_trace)]
|
||||
pub fn op_getegid<P>(
|
||||
state: &mut OpState,
|
||||
) -> Result<u32, deno_core::error::AnyError>
|
||||
|
@ -254,7 +254,7 @@ where
|
|||
Ok(egid)
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[serde]
|
||||
pub fn op_cpus<P>(state: &mut OpState) -> Result<Vec<cpus::CpuInfo>, OsError>
|
||||
where
|
||||
|
@ -268,7 +268,7 @@ where
|
|||
cpus::cpu_info().ok_or(OsError::FailedToGetCpuInfo)
|
||||
}
|
||||
|
||||
#[op2]
|
||||
#[op2(stack_trace)]
|
||||
#[string]
|
||||
pub fn op_homedir<P>(
|
||||
state: &mut OpState,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue