mirror of
https://github.com/denoland/deno.git
synced 2025-01-21 04:52:26 -05:00
Merge remote-tracking branch 'upstream/main' into check-workspace-member-compiler-options
This commit is contained in:
commit
6499ad6ae7
66 changed files with 2194 additions and 902 deletions
47
Cargo.lock
generated
47
Cargo.lock
generated
|
@ -1220,6 +1220,7 @@ dependencies = [
|
|||
"deno_lint",
|
||||
"deno_lockfile",
|
||||
"deno_npm",
|
||||
"deno_npm_cache",
|
||||
"deno_package_json",
|
||||
"deno_path_util",
|
||||
"deno_resolver",
|
||||
|
@ -1802,9 +1803,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_lint"
|
||||
version = "0.68.1"
|
||||
version = "0.68.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ce2a040657166e39c7d59ad34230f0cc829f8ea8b7b2377038cc012ec1a1ef16"
|
||||
checksum = "ce713d564f76efd90535061113210bdc6b942ed6327b33eb1d5f76a5daf8e7a5"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"deno_ast",
|
||||
|
@ -1980,9 +1981,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_npm"
|
||||
version = "0.25.5"
|
||||
version = "0.26.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "89ded7af9db5d9f2986a739d1b5fbe1c57f498e4f996ae4114728e7c6dad213f"
|
||||
checksum = "f2f125a5dba7839c46394a0a9c835da9fe60f5f412587ab4956a76492a1cc6a8"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
|
@ -1997,6 +1998,35 @@ dependencies = [
|
|||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deno_npm_cache"
|
||||
version = "0.0.1"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"base64 0.21.7",
|
||||
"boxed_error",
|
||||
"deno_cache_dir",
|
||||
"deno_core",
|
||||
"deno_npm",
|
||||
"deno_semver",
|
||||
"deno_unsync",
|
||||
"faster-hex",
|
||||
"flate2",
|
||||
"futures",
|
||||
"http 1.1.0",
|
||||
"log",
|
||||
"parking_lot",
|
||||
"percent-encoding",
|
||||
"rand",
|
||||
"ring",
|
||||
"serde_json",
|
||||
"tar",
|
||||
"tempfile",
|
||||
"thiserror 1.0.64",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deno_ops"
|
||||
version = "0.199.0"
|
||||
|
@ -2259,10 +2289,11 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "deno_unsync"
|
||||
version = "0.4.1"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2f36b4ef61a04ce201b925a5dffa90f88437d37fee4836c758470dd15ba7f05e"
|
||||
checksum = "d774fd83f26b24f0805a6ab8b26834a0d06ceac0db517b769b1e4633c96a2057"
|
||||
dependencies = [
|
||||
"futures",
|
||||
"parking_lot",
|
||||
"tokio",
|
||||
]
|
||||
|
@ -4681,9 +4712,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "markup_fmt"
|
||||
version = "0.16.0"
|
||||
version = "0.18.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f303c36143671ac6c54112eb5aa95649b169dae783fdb6ead2c0e88b408c425c"
|
||||
checksum = "fa7605bb4ad755a9ab5c96f2ce3bfd4eb8acd559b842c041fc8a5f84d63aed3a"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"css_dataset",
|
||||
|
|
|
@ -30,6 +30,7 @@ members = [
|
|||
"ext/webstorage",
|
||||
"resolvers/deno",
|
||||
"resolvers/node",
|
||||
"resolvers/npm_cache",
|
||||
"runtime",
|
||||
"runtime/permissions",
|
||||
"tests",
|
||||
|
@ -54,7 +55,7 @@ deno_bench_util = { version = "0.174.0", path = "./bench_util" }
|
|||
deno_config = { git = "https://github.com/denoland/deno_config.git", branch = "compiler-options-from-workspace-member", features = ["workspace", "sync"] }
|
||||
deno_lockfile = "=0.23.2"
|
||||
deno_media_type = { version = "0.2.0", features = ["module_specifier"] }
|
||||
deno_npm = "=0.25.5"
|
||||
deno_npm = "=0.26.0"
|
||||
deno_path_util = "=0.2.1"
|
||||
deno_permissions = { version = "0.40.0", path = "./runtime/permissions" }
|
||||
deno_runtime = { version = "0.189.0", path = "./runtime" }
|
||||
|
@ -94,6 +95,7 @@ deno_websocket = { version = "0.185.0", path = "./ext/websocket" }
|
|||
deno_webstorage = { version = "0.175.0", path = "./ext/webstorage" }
|
||||
|
||||
# resolvers
|
||||
deno_npm_cache = { version = "0.0.1", path = "./resolvers/npm_cache" }
|
||||
deno_resolver = { version = "0.12.0", path = "./resolvers/deno" }
|
||||
node_resolver = { version = "0.19.0", path = "./resolvers/node" }
|
||||
|
||||
|
@ -118,6 +120,7 @@ data-encoding = "2.3.3"
|
|||
data-url = "=0.3.0"
|
||||
deno_cache_dir = "=0.14.0"
|
||||
deno_package_json = { version = "0.2.1", default-features = false }
|
||||
deno_unsync = "0.4.2"
|
||||
dlopen2 = "0.6.1"
|
||||
ecb = "=0.1.2"
|
||||
elliptic-curve = { version = "0.13.4", features = ["alloc", "arithmetic", "ecdh", "std", "pem", "jwk"] }
|
||||
|
|
|
@ -74,9 +74,10 @@ deno_config.workspace = true
|
|||
deno_core = { workspace = true, features = ["include_js_files_for_snapshotting"] }
|
||||
deno_doc = { version = "=0.161.2", features = ["rust", "comrak"] }
|
||||
deno_graph = { version = "=0.86.3" }
|
||||
deno_lint = { version = "=0.68.1", features = ["docs"] }
|
||||
deno_lint = { version = "=0.68.2", features = ["docs"] }
|
||||
deno_lockfile.workspace = true
|
||||
deno_npm.workspace = true
|
||||
deno_npm_cache.workspace = true
|
||||
deno_package_json.workspace = true
|
||||
deno_path_util.workspace = true
|
||||
deno_resolver.workspace = true
|
||||
|
@ -130,7 +131,7 @@ libz-sys.workspace = true
|
|||
log = { workspace = true, features = ["serde"] }
|
||||
lsp-types.workspace = true
|
||||
malva = "=0.11.0"
|
||||
markup_fmt = "=0.16.0"
|
||||
markup_fmt = "=0.18.0"
|
||||
memmem.workspace = true
|
||||
monch.workspace = true
|
||||
notify.workspace = true
|
||||
|
|
|
@ -27,6 +27,7 @@ use deno_npm::npm_rc::NpmRc;
|
|||
use deno_npm::npm_rc::ResolvedNpmRc;
|
||||
use deno_npm::resolution::ValidSerializedNpmResolutionSnapshot;
|
||||
use deno_npm::NpmSystemInfo;
|
||||
use deno_npm_cache::NpmCacheSetting;
|
||||
use deno_path_util::normalize_path;
|
||||
use deno_semver::npm::NpmPackageReqReference;
|
||||
use deno_telemetry::OtelConfig;
|
||||
|
@ -239,20 +240,25 @@ pub enum CacheSetting {
|
|||
}
|
||||
|
||||
impl CacheSetting {
|
||||
pub fn should_use_for_npm_package(&self, package_name: &str) -> bool {
|
||||
pub fn as_npm_cache_setting(&self) -> NpmCacheSetting {
|
||||
match self {
|
||||
CacheSetting::ReloadAll => false,
|
||||
CacheSetting::ReloadSome(list) => {
|
||||
if list.iter().any(|i| i == "npm:") {
|
||||
return false;
|
||||
CacheSetting::Only => NpmCacheSetting::Only,
|
||||
CacheSetting::ReloadAll => NpmCacheSetting::ReloadAll,
|
||||
CacheSetting::ReloadSome(values) => {
|
||||
if values.iter().any(|v| v == "npm:") {
|
||||
NpmCacheSetting::ReloadAll
|
||||
} else {
|
||||
NpmCacheSetting::ReloadSome {
|
||||
npm_package_names: values
|
||||
.iter()
|
||||
.filter_map(|v| v.strip_prefix("npm:"))
|
||||
.map(|n| n.to_string())
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
let specifier = format!("npm:{package_name}");
|
||||
if list.contains(&specifier) {
|
||||
return false;
|
||||
}
|
||||
true
|
||||
}
|
||||
_ => true,
|
||||
CacheSetting::RespectHeaders => unreachable!(), // not supported
|
||||
CacheSetting::Use => NpmCacheSetting::Use,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1262,7 +1262,7 @@ impl DenoDiagnostic {
|
|||
Self::NoAttributeType => (lsp::DiagnosticSeverity::ERROR, "The module is a JSON module and not being imported with an import attribute. Consider adding `with { type: \"json\" }` to the import statement.".to_string(), None),
|
||||
Self::NoCache(specifier) => (lsp::DiagnosticSeverity::ERROR, format!("Uncached or missing remote URL: {specifier}"), Some(json!({ "specifier": specifier }))),
|
||||
Self::NotInstalledJsr(pkg_req, specifier) => (lsp::DiagnosticSeverity::ERROR, format!("JSR package \"{pkg_req}\" is not installed or doesn't exist."), Some(json!({ "specifier": specifier }))),
|
||||
Self::NotInstalledNpm(pkg_req, specifier) => (lsp::DiagnosticSeverity::ERROR, format!("NPM package \"{pkg_req}\" is not installed or doesn't exist."), Some(json!({ "specifier": specifier }))),
|
||||
Self::NotInstalledNpm(pkg_req, specifier) => (lsp::DiagnosticSeverity::ERROR, format!("npm package \"{pkg_req}\" is not installed or doesn't exist."), Some(json!({ "specifier": specifier }))),
|
||||
Self::NoLocal(specifier) => {
|
||||
let maybe_sloppy_resolution = CliSloppyImportsResolver::new(
|
||||
SloppyImportsCachedFs::new(Arc::new(deno_fs::RealFs))
|
||||
|
|
|
@ -5,8 +5,6 @@ use std::path::Path;
|
|||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use cache::RegistryInfoDownloader;
|
||||
use cache::TarballCache;
|
||||
use deno_ast::ModuleSpecifier;
|
||||
use deno_cache_dir::npm::NpmCacheDir;
|
||||
use deno_core::anyhow::Context;
|
||||
|
@ -42,23 +40,22 @@ use crate::args::NpmProcessState;
|
|||
use crate::args::NpmProcessStateKind;
|
||||
use crate::args::PackageJsonDepValueParseWithLocationError;
|
||||
use crate::cache::FastInsecureHasher;
|
||||
use crate::http_util::HttpClientProvider;
|
||||
use crate::util::fs::canonicalize_path_maybe_not_exists_with_fs;
|
||||
use crate::util::progress_bar::ProgressBar;
|
||||
use crate::util::sync::AtomicFlag;
|
||||
|
||||
use self::cache::NpmCache;
|
||||
use self::registry::CliNpmRegistryApi;
|
||||
use self::resolution::NpmResolution;
|
||||
use self::resolvers::create_npm_fs_resolver;
|
||||
use self::resolvers::NpmPackageFsResolver;
|
||||
|
||||
use super::CliNpmCache;
|
||||
use super::CliNpmCacheEnv;
|
||||
use super::CliNpmRegistryInfoProvider;
|
||||
use super::CliNpmResolver;
|
||||
use super::CliNpmTarballCache;
|
||||
use super::InnerCliNpmResolverRef;
|
||||
use super::ResolvePkgFolderFromDenoReqError;
|
||||
|
||||
pub mod cache;
|
||||
mod registry;
|
||||
mod resolution;
|
||||
mod resolvers;
|
||||
|
||||
|
@ -85,8 +82,9 @@ pub struct CliManagedNpmResolverCreateOptions {
|
|||
pub async fn create_managed_npm_resolver_for_lsp(
|
||||
options: CliManagedNpmResolverCreateOptions,
|
||||
) -> Arc<dyn CliNpmResolver> {
|
||||
let npm_cache = create_cache(&options);
|
||||
let npm_api = create_api(&options, npm_cache.clone());
|
||||
let cache_env = create_cache_env(&options);
|
||||
let npm_cache = create_cache(cache_env.clone(), &options);
|
||||
let npm_api = create_api(npm_cache.clone(), cache_env.clone(), &options);
|
||||
// spawn due to the lsp's `Send` requirement
|
||||
deno_core::unsync::spawn(async move {
|
||||
let snapshot = match resolve_snapshot(&npm_api, options.snapshot).await {
|
||||
|
@ -97,8 +95,8 @@ pub async fn create_managed_npm_resolver_for_lsp(
|
|||
}
|
||||
};
|
||||
create_inner(
|
||||
cache_env,
|
||||
options.fs,
|
||||
options.http_client_provider,
|
||||
options.maybe_lockfile,
|
||||
npm_api,
|
||||
npm_cache,
|
||||
|
@ -118,14 +116,15 @@ pub async fn create_managed_npm_resolver_for_lsp(
|
|||
pub async fn create_managed_npm_resolver(
|
||||
options: CliManagedNpmResolverCreateOptions,
|
||||
) -> Result<Arc<dyn CliNpmResolver>, AnyError> {
|
||||
let npm_cache = create_cache(&options);
|
||||
let npm_api = create_api(&options, npm_cache.clone());
|
||||
let snapshot = resolve_snapshot(&npm_api, options.snapshot).await?;
|
||||
let npm_cache_env = create_cache_env(&options);
|
||||
let npm_cache = create_cache(npm_cache_env.clone(), &options);
|
||||
let api = create_api(npm_cache.clone(), npm_cache_env.clone(), &options);
|
||||
let snapshot = resolve_snapshot(&api, options.snapshot).await?;
|
||||
Ok(create_inner(
|
||||
npm_cache_env,
|
||||
options.fs,
|
||||
options.http_client_provider,
|
||||
options.maybe_lockfile,
|
||||
npm_api,
|
||||
api,
|
||||
npm_cache,
|
||||
options.npmrc,
|
||||
options.npm_install_deps_provider,
|
||||
|
@ -139,11 +138,11 @@ pub async fn create_managed_npm_resolver(
|
|||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn create_inner(
|
||||
env: Arc<CliNpmCacheEnv>,
|
||||
fs: Arc<dyn deno_runtime::deno_fs::FileSystem>,
|
||||
http_client_provider: Arc<HttpClientProvider>,
|
||||
maybe_lockfile: Option<Arc<CliLockfile>>,
|
||||
npm_api: Arc<CliNpmRegistryApi>,
|
||||
npm_cache: Arc<NpmCache>,
|
||||
registry_info_provider: Arc<CliNpmRegistryInfoProvider>,
|
||||
npm_cache: Arc<CliNpmCache>,
|
||||
npm_rc: Arc<ResolvedNpmRc>,
|
||||
npm_install_deps_provider: Arc<NpmInstallDepsProvider>,
|
||||
text_only_progress_bar: crate::util::progress_bar::ProgressBar,
|
||||
|
@ -153,16 +152,14 @@ fn create_inner(
|
|||
lifecycle_scripts: LifecycleScriptsConfig,
|
||||
) -> Arc<dyn CliNpmResolver> {
|
||||
let resolution = Arc::new(NpmResolution::from_serialized(
|
||||
npm_api.clone(),
|
||||
registry_info_provider.clone(),
|
||||
snapshot,
|
||||
maybe_lockfile.clone(),
|
||||
));
|
||||
let tarball_cache = Arc::new(TarballCache::new(
|
||||
let tarball_cache = Arc::new(CliNpmTarballCache::new(
|
||||
npm_cache.clone(),
|
||||
fs.clone(),
|
||||
http_client_provider.clone(),
|
||||
env,
|
||||
npm_rc.clone(),
|
||||
text_only_progress_bar.clone(),
|
||||
));
|
||||
let fs_resolver = create_npm_fs_resolver(
|
||||
fs.clone(),
|
||||
|
@ -179,7 +176,7 @@ fn create_inner(
|
|||
fs,
|
||||
fs_resolver,
|
||||
maybe_lockfile,
|
||||
npm_api,
|
||||
registry_info_provider,
|
||||
npm_cache,
|
||||
npm_install_deps_provider,
|
||||
resolution,
|
||||
|
@ -190,41 +187,55 @@ fn create_inner(
|
|||
))
|
||||
}
|
||||
|
||||
fn create_cache(options: &CliManagedNpmResolverCreateOptions) -> Arc<NpmCache> {
|
||||
Arc::new(NpmCache::new(
|
||||
fn create_cache_env(
|
||||
options: &CliManagedNpmResolverCreateOptions,
|
||||
) -> Arc<CliNpmCacheEnv> {
|
||||
Arc::new(CliNpmCacheEnv::new(
|
||||
options.fs.clone(),
|
||||
options.http_client_provider.clone(),
|
||||
options.text_only_progress_bar.clone(),
|
||||
))
|
||||
}
|
||||
|
||||
fn create_cache(
|
||||
env: Arc<CliNpmCacheEnv>,
|
||||
options: &CliManagedNpmResolverCreateOptions,
|
||||
) -> Arc<CliNpmCache> {
|
||||
Arc::new(CliNpmCache::new(
|
||||
options.npm_cache_dir.clone(),
|
||||
options.cache_setting.clone(),
|
||||
options.cache_setting.as_npm_cache_setting(),
|
||||
env,
|
||||
options.npmrc.clone(),
|
||||
))
|
||||
}
|
||||
|
||||
fn create_api(
|
||||
cache: Arc<CliNpmCache>,
|
||||
env: Arc<CliNpmCacheEnv>,
|
||||
options: &CliManagedNpmResolverCreateOptions,
|
||||
npm_cache: Arc<NpmCache>,
|
||||
) -> Arc<CliNpmRegistryApi> {
|
||||
Arc::new(CliNpmRegistryApi::new(
|
||||
npm_cache.clone(),
|
||||
Arc::new(RegistryInfoDownloader::new(
|
||||
npm_cache,
|
||||
options.http_client_provider.clone(),
|
||||
options.npmrc.clone(),
|
||||
options.text_only_progress_bar.clone(),
|
||||
)),
|
||||
) -> Arc<CliNpmRegistryInfoProvider> {
|
||||
Arc::new(CliNpmRegistryInfoProvider::new(
|
||||
cache,
|
||||
env,
|
||||
options.npmrc.clone(),
|
||||
))
|
||||
}
|
||||
|
||||
async fn resolve_snapshot(
|
||||
api: &CliNpmRegistryApi,
|
||||
registry_info_provider: &Arc<CliNpmRegistryInfoProvider>,
|
||||
snapshot: CliNpmResolverManagedSnapshotOption,
|
||||
) -> Result<Option<ValidSerializedNpmResolutionSnapshot>, AnyError> {
|
||||
match snapshot {
|
||||
CliNpmResolverManagedSnapshotOption::ResolveFromLockfile(lockfile) => {
|
||||
if !lockfile.overwrite() {
|
||||
let snapshot = snapshot_from_lockfile(lockfile.clone(), api)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("failed reading lockfile '{}'", lockfile.filename.display())
|
||||
})?;
|
||||
let snapshot = snapshot_from_lockfile(
|
||||
lockfile.clone(),
|
||||
®istry_info_provider.as_npm_registry_api(),
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("failed reading lockfile '{}'", lockfile.filename.display())
|
||||
})?;
|
||||
Ok(Some(snapshot))
|
||||
} else {
|
||||
Ok(None)
|
||||
|
@ -291,11 +302,11 @@ pub struct ManagedCliNpmResolver {
|
|||
fs: Arc<dyn FileSystem>,
|
||||
fs_resolver: Arc<dyn NpmPackageFsResolver>,
|
||||
maybe_lockfile: Option<Arc<CliLockfile>>,
|
||||
npm_api: Arc<CliNpmRegistryApi>,
|
||||
npm_cache: Arc<NpmCache>,
|
||||
registry_info_provider: Arc<CliNpmRegistryInfoProvider>,
|
||||
npm_cache: Arc<CliNpmCache>,
|
||||
npm_install_deps_provider: Arc<NpmInstallDepsProvider>,
|
||||
resolution: Arc<NpmResolution>,
|
||||
tarball_cache: Arc<TarballCache>,
|
||||
tarball_cache: Arc<CliNpmTarballCache>,
|
||||
text_only_progress_bar: ProgressBar,
|
||||
npm_system_info: NpmSystemInfo,
|
||||
top_level_install_flag: AtomicFlag,
|
||||
|
@ -316,11 +327,11 @@ impl ManagedCliNpmResolver {
|
|||
fs: Arc<dyn FileSystem>,
|
||||
fs_resolver: Arc<dyn NpmPackageFsResolver>,
|
||||
maybe_lockfile: Option<Arc<CliLockfile>>,
|
||||
npm_api: Arc<CliNpmRegistryApi>,
|
||||
npm_cache: Arc<NpmCache>,
|
||||
registry_info_provider: Arc<CliNpmRegistryInfoProvider>,
|
||||
npm_cache: Arc<CliNpmCache>,
|
||||
npm_install_deps_provider: Arc<NpmInstallDepsProvider>,
|
||||
resolution: Arc<NpmResolution>,
|
||||
tarball_cache: Arc<TarballCache>,
|
||||
tarball_cache: Arc<CliNpmTarballCache>,
|
||||
text_only_progress_bar: ProgressBar,
|
||||
npm_system_info: NpmSystemInfo,
|
||||
lifecycle_scripts: LifecycleScriptsConfig,
|
||||
|
@ -329,7 +340,7 @@ impl ManagedCliNpmResolver {
|
|||
fs,
|
||||
fs_resolver,
|
||||
maybe_lockfile,
|
||||
npm_api,
|
||||
registry_info_provider,
|
||||
npm_cache,
|
||||
npm_install_deps_provider,
|
||||
text_only_progress_bar,
|
||||
|
@ -575,7 +586,7 @@ impl ManagedCliNpmResolver {
|
|||
) -> Result<Arc<NpmPackageInfo>, AnyError> {
|
||||
// this will internally cache the package information
|
||||
self
|
||||
.npm_api
|
||||
.registry_info_provider
|
||||
.package_info(package_name)
|
||||
.await
|
||||
.map_err(|err| err.into())
|
||||
|
@ -671,7 +682,7 @@ impl CliNpmResolver for ManagedCliNpmResolver {
|
|||
fn clone_snapshotted(&self) -> Arc<dyn CliNpmResolver> {
|
||||
// create a new snapshotted npm resolution and resolver
|
||||
let npm_resolution = Arc::new(NpmResolution::new(
|
||||
self.npm_api.clone(),
|
||||
self.registry_info_provider.clone(),
|
||||
self.resolution.snapshot(),
|
||||
self.maybe_lockfile.clone(),
|
||||
));
|
||||
|
@ -690,7 +701,7 @@ impl CliNpmResolver for ManagedCliNpmResolver {
|
|||
self.lifecycle_scripts.clone(),
|
||||
),
|
||||
self.maybe_lockfile.clone(),
|
||||
self.npm_api.clone(),
|
||||
self.registry_info_provider.clone(),
|
||||
self.npm_cache.clone(),
|
||||
self.npm_install_deps_provider.clone(),
|
||||
npm_resolution,
|
||||
|
|
|
@ -1,200 +0,0 @@
|
|||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use deno_core::anyhow::anyhow;
|
||||
use deno_core::error::AnyError;
|
||||
use deno_core::futures::future::BoxFuture;
|
||||
use deno_core::futures::future::Shared;
|
||||
use deno_core::futures::FutureExt;
|
||||
use deno_core::parking_lot::Mutex;
|
||||
use deno_npm::registry::NpmPackageInfo;
|
||||
use deno_npm::registry::NpmRegistryApi;
|
||||
use deno_npm::registry::NpmRegistryPackageInfoLoadError;
|
||||
|
||||
use crate::args::CacheSetting;
|
||||
use crate::util::sync::AtomicFlag;
|
||||
|
||||
use super::cache::NpmCache;
|
||||
use super::cache::RegistryInfoDownloader;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct CliNpmRegistryApi(Option<Arc<CliNpmRegistryApiInner>>);
|
||||
|
||||
impl CliNpmRegistryApi {
|
||||
pub fn new(
|
||||
cache: Arc<NpmCache>,
|
||||
registry_info_downloader: Arc<RegistryInfoDownloader>,
|
||||
) -> Self {
|
||||
Self(Some(Arc::new(CliNpmRegistryApiInner {
|
||||
cache,
|
||||
force_reload_flag: Default::default(),
|
||||
mem_cache: Default::default(),
|
||||
previously_reloaded_packages: Default::default(),
|
||||
registry_info_downloader,
|
||||
})))
|
||||
}
|
||||
|
||||
/// Clears the internal memory cache.
|
||||
pub fn clear_memory_cache(&self) {
|
||||
self.inner().clear_memory_cache();
|
||||
}
|
||||
|
||||
fn inner(&self) -> &Arc<CliNpmRegistryApiInner> {
|
||||
// this panicking indicates a bug in the code where this
|
||||
// wasn't initialized
|
||||
self.0.as_ref().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait(?Send)]
|
||||
impl NpmRegistryApi for CliNpmRegistryApi {
|
||||
async fn package_info(
|
||||
&self,
|
||||
name: &str,
|
||||
) -> Result<Arc<NpmPackageInfo>, NpmRegistryPackageInfoLoadError> {
|
||||
match self.inner().maybe_package_info(name).await {
|
||||
Ok(Some(info)) => Ok(info),
|
||||
Ok(None) => Err(NpmRegistryPackageInfoLoadError::PackageNotExists {
|
||||
package_name: name.to_string(),
|
||||
}),
|
||||
Err(err) => {
|
||||
Err(NpmRegistryPackageInfoLoadError::LoadError(Arc::new(err)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn mark_force_reload(&self) -> bool {
|
||||
self.inner().mark_force_reload()
|
||||
}
|
||||
}
|
||||
|
||||
type CacheItemPendingResult =
|
||||
Result<Option<Arc<NpmPackageInfo>>, Arc<AnyError>>;
|
||||
|
||||
#[derive(Debug)]
|
||||
enum CacheItem {
|
||||
Pending(Shared<BoxFuture<'static, CacheItemPendingResult>>),
|
||||
Resolved(Option<Arc<NpmPackageInfo>>),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct CliNpmRegistryApiInner {
|
||||
cache: Arc<NpmCache>,
|
||||
force_reload_flag: AtomicFlag,
|
||||
mem_cache: Mutex<HashMap<String, CacheItem>>,
|
||||
previously_reloaded_packages: Mutex<HashSet<String>>,
|
||||
registry_info_downloader: Arc<RegistryInfoDownloader>,
|
||||
}
|
||||
|
||||
impl CliNpmRegistryApiInner {
|
||||
pub async fn maybe_package_info(
|
||||
self: &Arc<Self>,
|
||||
name: &str,
|
||||
) -> Result<Option<Arc<NpmPackageInfo>>, AnyError> {
|
||||
let (created, future) = {
|
||||
let mut mem_cache = self.mem_cache.lock();
|
||||
match mem_cache.get(name) {
|
||||
Some(CacheItem::Resolved(maybe_info)) => {
|
||||
return Ok(maybe_info.clone());
|
||||
}
|
||||
Some(CacheItem::Pending(future)) => (false, future.clone()),
|
||||
None => {
|
||||
let future = {
|
||||
let api = self.clone();
|
||||
let name = name.to_string();
|
||||
async move {
|
||||
if (api.cache.cache_setting().should_use_for_npm_package(&name) && !api.force_reload_flag.is_raised())
|
||||
// if this has been previously reloaded, then try loading from the
|
||||
// file system cache
|
||||
|| !api.previously_reloaded_packages.lock().insert(name.to_string())
|
||||
{
|
||||
// attempt to load from the file cache
|
||||
if let Some(info) = api.load_file_cached_package_info(&name).await {
|
||||
let result = Some(Arc::new(info));
|
||||
return Ok(result);
|
||||
}
|
||||
}
|
||||
api.registry_info_downloader
|
||||
.load_package_info(&name)
|
||||
.await
|
||||
.map_err(Arc::new)
|
||||
}
|
||||
.boxed()
|
||||
.shared()
|
||||
};
|
||||
mem_cache
|
||||
.insert(name.to_string(), CacheItem::Pending(future.clone()));
|
||||
(true, future)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if created {
|
||||
match future.await {
|
||||
Ok(maybe_info) => {
|
||||
// replace the cache item to say it's resolved now
|
||||
self
|
||||
.mem_cache
|
||||
.lock()
|
||||
.insert(name.to_string(), CacheItem::Resolved(maybe_info.clone()));
|
||||
Ok(maybe_info)
|
||||
}
|
||||
Err(err) => {
|
||||
// purge the item from the cache so it loads next time
|
||||
self.mem_cache.lock().remove(name);
|
||||
Err(anyhow!("{:#}", err))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Ok(future.await.map_err(|err| anyhow!("{:#}", err))?)
|
||||
}
|
||||
}
|
||||
|
||||
fn mark_force_reload(&self) -> bool {
|
||||
// never force reload the registry information if reloading
|
||||
// is disabled or if we're already reloading
|
||||
if matches!(
|
||||
self.cache.cache_setting(),
|
||||
CacheSetting::Only | CacheSetting::ReloadAll
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
if self.force_reload_flag.raise() {
|
||||
self.clear_memory_cache();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
async fn load_file_cached_package_info(
|
||||
&self,
|
||||
name: &str,
|
||||
) -> Option<NpmPackageInfo> {
|
||||
let result = deno_core::unsync::spawn_blocking({
|
||||
let cache = self.cache.clone();
|
||||
let name = name.to_string();
|
||||
move || cache.load_package_info(&name)
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
match result {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
if cfg!(debug_assertions) {
|
||||
panic!("error loading cached npm package info for {name}: {err:#}");
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn clear_memory_cache(&self) {
|
||||
self.mem_cache.lock().clear();
|
||||
}
|
||||
}
|
|
@ -8,11 +8,10 @@ use deno_core::error::AnyError;
|
|||
use deno_lockfile::NpmPackageDependencyLockfileInfo;
|
||||
use deno_lockfile::NpmPackageLockfileInfo;
|
||||
use deno_npm::registry::NpmRegistryApi;
|
||||
use deno_npm::resolution::AddPkgReqsOptions;
|
||||
use deno_npm::resolution::NpmPackagesPartitioned;
|
||||
use deno_npm::resolution::NpmResolutionError;
|
||||
use deno_npm::resolution::NpmResolutionSnapshot;
|
||||
use deno_npm::resolution::NpmResolutionSnapshotPendingResolver;
|
||||
use deno_npm::resolution::NpmResolutionSnapshotPendingResolverOptions;
|
||||
use deno_npm::resolution::PackageCacheFolderIdNotFoundError;
|
||||
use deno_npm::resolution::PackageNotFoundFromReferrerError;
|
||||
use deno_npm::resolution::PackageNvNotFoundError;
|
||||
|
@ -28,10 +27,9 @@ use deno_semver::package::PackageReq;
|
|||
use deno_semver::VersionReq;
|
||||
|
||||
use crate::args::CliLockfile;
|
||||
use crate::npm::CliNpmRegistryInfoProvider;
|
||||
use crate::util::sync::SyncReadAsyncWriteLock;
|
||||
|
||||
use super::CliNpmRegistryApi;
|
||||
|
||||
pub struct AddPkgReqsResult {
|
||||
/// Results from adding the individual packages.
|
||||
///
|
||||
|
@ -48,7 +46,7 @@ pub struct AddPkgReqsResult {
|
|||
///
|
||||
/// This does not interact with the file system.
|
||||
pub struct NpmResolution {
|
||||
api: Arc<CliNpmRegistryApi>,
|
||||
registry_info_provider: Arc<CliNpmRegistryInfoProvider>,
|
||||
snapshot: SyncReadAsyncWriteLock<NpmResolutionSnapshot>,
|
||||
maybe_lockfile: Option<Arc<CliLockfile>>,
|
||||
}
|
||||
|
@ -64,22 +62,22 @@ impl std::fmt::Debug for NpmResolution {
|
|||
|
||||
impl NpmResolution {
|
||||
pub fn from_serialized(
|
||||
api: Arc<CliNpmRegistryApi>,
|
||||
registry_info_provider: Arc<CliNpmRegistryInfoProvider>,
|
||||
initial_snapshot: Option<ValidSerializedNpmResolutionSnapshot>,
|
||||
maybe_lockfile: Option<Arc<CliLockfile>>,
|
||||
) -> Self {
|
||||
let snapshot =
|
||||
NpmResolutionSnapshot::new(initial_snapshot.unwrap_or_default());
|
||||
Self::new(api, snapshot, maybe_lockfile)
|
||||
Self::new(registry_info_provider, snapshot, maybe_lockfile)
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
api: Arc<CliNpmRegistryApi>,
|
||||
registry_info_provider: Arc<CliNpmRegistryInfoProvider>,
|
||||
initial_snapshot: NpmResolutionSnapshot,
|
||||
maybe_lockfile: Option<Arc<CliLockfile>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
api,
|
||||
registry_info_provider,
|
||||
snapshot: SyncReadAsyncWriteLock::new(initial_snapshot),
|
||||
maybe_lockfile,
|
||||
}
|
||||
|
@ -92,7 +90,7 @@ impl NpmResolution {
|
|||
// only allow one thread in here at a time
|
||||
let snapshot_lock = self.snapshot.acquire().await;
|
||||
let result = add_package_reqs_to_snapshot(
|
||||
&self.api,
|
||||
&self.registry_info_provider,
|
||||
package_reqs,
|
||||
self.maybe_lockfile.clone(),
|
||||
|| snapshot_lock.read().clone(),
|
||||
|
@ -120,7 +118,7 @@ impl NpmResolution {
|
|||
|
||||
let reqs_set = package_reqs.iter().collect::<HashSet<_>>();
|
||||
let snapshot = add_package_reqs_to_snapshot(
|
||||
&self.api,
|
||||
&self.registry_info_provider,
|
||||
package_reqs,
|
||||
self.maybe_lockfile.clone(),
|
||||
|| {
|
||||
|
@ -260,7 +258,7 @@ impl NpmResolution {
|
|||
}
|
||||
|
||||
async fn add_package_reqs_to_snapshot(
|
||||
api: &CliNpmRegistryApi,
|
||||
registry_info_provider: &Arc<CliNpmRegistryInfoProvider>,
|
||||
package_reqs: &[PackageReq],
|
||||
maybe_lockfile: Option<Arc<CliLockfile>>,
|
||||
get_new_snapshot: impl Fn() -> NpmResolutionSnapshot,
|
||||
|
@ -283,23 +281,28 @@ async fn add_package_reqs_to_snapshot(
|
|||
/* this string is used in tests */
|
||||
"Running npm resolution."
|
||||
);
|
||||
let pending_resolver = get_npm_pending_resolver(api);
|
||||
let result = pending_resolver.add_pkg_reqs(snapshot, package_reqs).await;
|
||||
api.clear_memory_cache();
|
||||
let npm_registry_api = registry_info_provider.as_npm_registry_api();
|
||||
let result = snapshot
|
||||
.add_pkg_reqs(&npm_registry_api, get_add_pkg_reqs_options(package_reqs))
|
||||
.await;
|
||||
let result = match &result.dep_graph_result {
|
||||
Err(NpmResolutionError::Resolution(err)) if api.mark_force_reload() => {
|
||||
Err(NpmResolutionError::Resolution(err))
|
||||
if npm_registry_api.mark_force_reload() =>
|
||||
{
|
||||
log::debug!("{err:#}");
|
||||
log::debug!("npm resolution failed. Trying again...");
|
||||
|
||||
// try again
|
||||
// try again with forced reloading
|
||||
let snapshot = get_new_snapshot();
|
||||
let result = pending_resolver.add_pkg_reqs(snapshot, package_reqs).await;
|
||||
api.clear_memory_cache();
|
||||
result
|
||||
snapshot
|
||||
.add_pkg_reqs(&npm_registry_api, get_add_pkg_reqs_options(package_reqs))
|
||||
.await
|
||||
}
|
||||
_ => result,
|
||||
};
|
||||
|
||||
registry_info_provider.clear_memory_cache();
|
||||
|
||||
if let Ok(snapshot) = &result.dep_graph_result {
|
||||
if let Some(lockfile) = maybe_lockfile {
|
||||
populate_lockfile_from_snapshot(&lockfile, snapshot);
|
||||
|
@ -309,19 +312,15 @@ async fn add_package_reqs_to_snapshot(
|
|||
result
|
||||
}
|
||||
|
||||
fn get_npm_pending_resolver(
|
||||
api: &CliNpmRegistryApi,
|
||||
) -> NpmResolutionSnapshotPendingResolver<CliNpmRegistryApi> {
|
||||
NpmResolutionSnapshotPendingResolver::new(
|
||||
NpmResolutionSnapshotPendingResolverOptions {
|
||||
api,
|
||||
// WARNING: When bumping this version, check if anything needs to be
|
||||
// updated in the `setNodeOnlyGlobalNames` call in 99_main_compiler.js
|
||||
types_node_version_req: Some(
|
||||
VersionReq::parse_from_npm("22.0.0 - 22.5.4").unwrap(),
|
||||
),
|
||||
},
|
||||
)
|
||||
fn get_add_pkg_reqs_options(package_reqs: &[PackageReq]) -> AddPkgReqsOptions {
|
||||
AddPkgReqsOptions {
|
||||
package_reqs,
|
||||
// WARNING: When bumping this version, check if anything needs to be
|
||||
// updated in the `setNodeOnlyGlobalNames` call in 99_main_compiler.js
|
||||
types_node_version_req: Some(
|
||||
VersionReq::parse_from_npm("22.0.0 - 22.5.4").unwrap(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
fn populate_lockfile_from_snapshot(
|
||||
|
|
|
@ -24,7 +24,7 @@ use deno_runtime::deno_fs::FileSystem;
|
|||
use deno_runtime::deno_node::NodePermissions;
|
||||
use node_resolver::errors::PackageFolderResolveError;
|
||||
|
||||
use crate::npm::managed::cache::TarballCache;
|
||||
use crate::npm::CliNpmTarballCache;
|
||||
|
||||
/// Part of the resolution that interacts with the file system.
|
||||
#[async_trait(?Send)]
|
||||
|
@ -140,7 +140,7 @@ impl RegistryReadPermissionChecker {
|
|||
/// Caches all the packages in parallel.
|
||||
pub async fn cache_packages(
|
||||
packages: &[NpmResolutionPackage],
|
||||
tarball_cache: &Arc<TarballCache>,
|
||||
tarball_cache: &Arc<CliNpmTarballCache>,
|
||||
) -> Result<(), AnyError> {
|
||||
let mut futures_unordered = futures::stream::FuturesUnordered::new();
|
||||
for package in packages {
|
||||
|
|
|
@ -8,6 +8,8 @@ use std::path::PathBuf;
|
|||
use std::sync::Arc;
|
||||
|
||||
use crate::colors;
|
||||
use crate::npm::CliNpmCache;
|
||||
use crate::npm::CliNpmTarballCache;
|
||||
use async_trait::async_trait;
|
||||
use deno_ast::ModuleSpecifier;
|
||||
use deno_core::error::AnyError;
|
||||
|
@ -24,8 +26,6 @@ use node_resolver::errors::ReferrerNotFoundError;
|
|||
use crate::args::LifecycleScriptsConfig;
|
||||
use crate::cache::FastInsecureHasher;
|
||||
|
||||
use super::super::cache::NpmCache;
|
||||
use super::super::cache::TarballCache;
|
||||
use super::super::resolution::NpmResolution;
|
||||
use super::common::cache_packages;
|
||||
use super::common::lifecycle_scripts::LifecycleScriptsStrategy;
|
||||
|
@ -35,8 +35,8 @@ use super::common::RegistryReadPermissionChecker;
|
|||
/// Resolves packages from the global npm cache.
|
||||
#[derive(Debug)]
|
||||
pub struct GlobalNpmPackageResolver {
|
||||
cache: Arc<NpmCache>,
|
||||
tarball_cache: Arc<TarballCache>,
|
||||
cache: Arc<CliNpmCache>,
|
||||
tarball_cache: Arc<CliNpmTarballCache>,
|
||||
resolution: Arc<NpmResolution>,
|
||||
system_info: NpmSystemInfo,
|
||||
registry_read_permission_checker: RegistryReadPermissionChecker,
|
||||
|
@ -45,9 +45,9 @@ pub struct GlobalNpmPackageResolver {
|
|||
|
||||
impl GlobalNpmPackageResolver {
|
||||
pub fn new(
|
||||
cache: Arc<NpmCache>,
|
||||
cache: Arc<CliNpmCache>,
|
||||
fs: Arc<dyn FileSystem>,
|
||||
tarball_cache: Arc<TarballCache>,
|
||||
tarball_cache: Arc<CliNpmTarballCache>,
|
||||
resolution: Arc<NpmResolution>,
|
||||
system_info: NpmSystemInfo,
|
||||
lifecycle_scripts: LifecycleScriptsConfig,
|
||||
|
|
|
@ -17,6 +17,8 @@ use std::sync::Arc;
|
|||
|
||||
use crate::args::LifecycleScriptsConfig;
|
||||
use crate::colors;
|
||||
use crate::npm::CliNpmCache;
|
||||
use crate::npm::CliNpmTarballCache;
|
||||
use async_trait::async_trait;
|
||||
use deno_ast::ModuleSpecifier;
|
||||
use deno_cache_dir::npm::mixed_case_package_name_decode;
|
||||
|
@ -52,8 +54,6 @@ use crate::util::fs::LaxSingleProcessFsFlag;
|
|||
use crate::util::progress_bar::ProgressBar;
|
||||
use crate::util::progress_bar::ProgressMessagePrompt;
|
||||
|
||||
use super::super::cache::NpmCache;
|
||||
use super::super::cache::TarballCache;
|
||||
use super::super::resolution::NpmResolution;
|
||||
use super::common::bin_entries;
|
||||
use super::common::NpmPackageFsResolver;
|
||||
|
@ -63,12 +63,12 @@ use super::common::RegistryReadPermissionChecker;
|
|||
/// and resolves packages from it.
|
||||
#[derive(Debug)]
|
||||
pub struct LocalNpmPackageResolver {
|
||||
cache: Arc<NpmCache>,
|
||||
cache: Arc<CliNpmCache>,
|
||||
fs: Arc<dyn deno_fs::FileSystem>,
|
||||
npm_install_deps_provider: Arc<NpmInstallDepsProvider>,
|
||||
progress_bar: ProgressBar,
|
||||
resolution: Arc<NpmResolution>,
|
||||
tarball_cache: Arc<TarballCache>,
|
||||
tarball_cache: Arc<CliNpmTarballCache>,
|
||||
root_node_modules_path: PathBuf,
|
||||
root_node_modules_url: Url,
|
||||
system_info: NpmSystemInfo,
|
||||
|
@ -79,12 +79,12 @@ pub struct LocalNpmPackageResolver {
|
|||
impl LocalNpmPackageResolver {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
cache: Arc<NpmCache>,
|
||||
cache: Arc<CliNpmCache>,
|
||||
fs: Arc<dyn deno_fs::FileSystem>,
|
||||
npm_install_deps_provider: Arc<NpmInstallDepsProvider>,
|
||||
progress_bar: ProgressBar,
|
||||
resolution: Arc<NpmResolution>,
|
||||
tarball_cache: Arc<TarballCache>,
|
||||
tarball_cache: Arc<CliNpmTarballCache>,
|
||||
node_modules_folder: PathBuf,
|
||||
system_info: NpmSystemInfo,
|
||||
lifecycle_scripts: LifecycleScriptsConfig,
|
||||
|
@ -284,10 +284,10 @@ fn local_node_modules_package_contents_path(
|
|||
#[allow(clippy::too_many_arguments)]
|
||||
async fn sync_resolution_with_fs(
|
||||
snapshot: &NpmResolutionSnapshot,
|
||||
cache: &Arc<NpmCache>,
|
||||
cache: &Arc<CliNpmCache>,
|
||||
npm_install_deps_provider: &NpmInstallDepsProvider,
|
||||
progress_bar: &ProgressBar,
|
||||
tarball_cache: &Arc<TarballCache>,
|
||||
tarball_cache: &Arc<CliNpmTarballCache>,
|
||||
root_node_modules_dir_path: &Path,
|
||||
system_info: &NpmSystemInfo,
|
||||
lifecycle_scripts: &LifecycleScriptsConfig,
|
||||
|
|
|
@ -12,6 +12,8 @@ use deno_runtime::deno_fs::FileSystem;
|
|||
|
||||
use crate::args::LifecycleScriptsConfig;
|
||||
use crate::args::NpmInstallDepsProvider;
|
||||
use crate::npm::CliNpmCache;
|
||||
use crate::npm::CliNpmTarballCache;
|
||||
use crate::util::progress_bar::ProgressBar;
|
||||
|
||||
pub use self::common::NpmPackageFsResolver;
|
||||
|
@ -19,18 +21,16 @@ pub use self::common::NpmPackageFsResolver;
|
|||
use self::global::GlobalNpmPackageResolver;
|
||||
use self::local::LocalNpmPackageResolver;
|
||||
|
||||
use super::cache::NpmCache;
|
||||
use super::cache::TarballCache;
|
||||
use super::resolution::NpmResolution;
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn create_npm_fs_resolver(
|
||||
fs: Arc<dyn FileSystem>,
|
||||
npm_cache: Arc<NpmCache>,
|
||||
npm_cache: Arc<CliNpmCache>,
|
||||
npm_install_deps_provider: &Arc<NpmInstallDepsProvider>,
|
||||
progress_bar: &ProgressBar,
|
||||
resolution: Arc<NpmResolution>,
|
||||
tarball_cache: Arc<TarballCache>,
|
||||
tarball_cache: Arc<CliNpmTarballCache>,
|
||||
maybe_node_modules_path: Option<PathBuf>,
|
||||
system_info: NpmSystemInfo,
|
||||
lifecycle_scripts: LifecycleScriptsConfig,
|
||||
|
|
111
cli/npm/mod.rs
111
cli/npm/mod.rs
|
@ -1,33 +1,39 @@
|
|||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
mod byonm;
|
||||
mod common;
|
||||
mod managed;
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::maybe_auth_header_for_npm_registry;
|
||||
use dashmap::DashMap;
|
||||
use deno_core::error::AnyError;
|
||||
use deno_core::serde_json;
|
||||
use deno_core::url::Url;
|
||||
use deno_npm::npm_rc::ResolvedNpmRc;
|
||||
use deno_npm::registry::NpmPackageInfo;
|
||||
use deno_resolver::npm::ByonmInNpmPackageChecker;
|
||||
use deno_resolver::npm::ByonmNpmResolver;
|
||||
use deno_resolver::npm::CliNpmReqResolver;
|
||||
use deno_resolver::npm::ResolvePkgFolderFromDenoReqError;
|
||||
use deno_runtime::deno_fs::FileSystem;
|
||||
use deno_runtime::deno_node::NodePermissions;
|
||||
use deno_runtime::ops::process::NpmProcessStateProvider;
|
||||
use deno_semver::package::PackageNv;
|
||||
use deno_semver::package::PackageReq;
|
||||
use managed::cache::registry_info::get_package_url;
|
||||
use http::HeaderName;
|
||||
use http::HeaderValue;
|
||||
use managed::create_managed_in_npm_pkg_checker;
|
||||
use node_resolver::InNpmPackageChecker;
|
||||
use node_resolver::NpmPackageFolderResolver;
|
||||
|
||||
use crate::file_fetcher::FileFetcher;
|
||||
use crate::http_util::HttpClientProvider;
|
||||
use crate::util::fs::atomic_write_file_with_retries_and_fs;
|
||||
use crate::util::fs::hard_link_dir_recursive;
|
||||
use crate::util::fs::AtomicWriteFileFsAdapter;
|
||||
use crate::util::progress_bar::ProgressBar;
|
||||
|
||||
pub use self::byonm::CliByonmNpmResolver;
|
||||
pub use self::byonm::CliByonmNpmResolverCreateOptions;
|
||||
|
@ -36,6 +42,99 @@ pub use self::managed::CliManagedNpmResolverCreateOptions;
|
|||
pub use self::managed::CliNpmResolverManagedSnapshotOption;
|
||||
pub use self::managed::ManagedCliNpmResolver;
|
||||
|
||||
pub type CliNpmTarballCache = deno_npm_cache::TarballCache<CliNpmCacheEnv>;
|
||||
pub type CliNpmCache = deno_npm_cache::NpmCache<CliNpmCacheEnv>;
|
||||
pub type CliNpmRegistryInfoProvider =
|
||||
deno_npm_cache::RegistryInfoProvider<CliNpmCacheEnv>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct CliNpmCacheEnv {
|
||||
fs: Arc<dyn FileSystem>,
|
||||
http_client_provider: Arc<HttpClientProvider>,
|
||||
progress_bar: ProgressBar,
|
||||
}
|
||||
|
||||
impl CliNpmCacheEnv {
|
||||
pub fn new(
|
||||
fs: Arc<dyn FileSystem>,
|
||||
http_client_provider: Arc<HttpClientProvider>,
|
||||
progress_bar: ProgressBar,
|
||||
) -> Self {
|
||||
Self {
|
||||
fs,
|
||||
http_client_provider,
|
||||
progress_bar,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait(?Send)]
|
||||
impl deno_npm_cache::NpmCacheEnv for CliNpmCacheEnv {
|
||||
fn exists(&self, path: &Path) -> bool {
|
||||
self.fs.exists_sync(path)
|
||||
}
|
||||
|
||||
fn hard_link_dir_recursive(
|
||||
&self,
|
||||
from: &Path,
|
||||
to: &Path,
|
||||
) -> Result<(), AnyError> {
|
||||
// todo(dsherret): use self.fs here instead
|
||||
hard_link_dir_recursive(from, to)
|
||||
}
|
||||
|
||||
fn atomic_write_file_with_retries(
|
||||
&self,
|
||||
file_path: &Path,
|
||||
data: &[u8],
|
||||
) -> std::io::Result<()> {
|
||||
atomic_write_file_with_retries_and_fs(
|
||||
&AtomicWriteFileFsAdapter {
|
||||
fs: self.fs.as_ref(),
|
||||
write_mode: crate::cache::CACHE_PERM,
|
||||
},
|
||||
file_path,
|
||||
data,
|
||||
)
|
||||
}
|
||||
|
||||
async fn download_with_retries_on_any_tokio_runtime(
|
||||
&self,
|
||||
url: Url,
|
||||
maybe_auth_header: Option<(HeaderName, HeaderValue)>,
|
||||
) -> Result<Option<Vec<u8>>, deno_npm_cache::DownloadError> {
|
||||
let guard = self.progress_bar.update(url.as_str());
|
||||
let client = self.http_client_provider.get_or_create().map_err(|err| {
|
||||
deno_npm_cache::DownloadError {
|
||||
status_code: None,
|
||||
error: err,
|
||||
}
|
||||
})?;
|
||||
client
|
||||
.download_with_progress_and_retries(url, maybe_auth_header, &guard)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
use crate::http_util::DownloadError::*;
|
||||
let status_code = match &err {
|
||||
Fetch { .. }
|
||||
| UrlParse { .. }
|
||||
| HttpParse { .. }
|
||||
| Json { .. }
|
||||
| ToStr { .. }
|
||||
| NoRedirectHeader { .. }
|
||||
| TooManyRedirects => None,
|
||||
BadResponse(bad_response_error) => {
|
||||
Some(bad_response_error.status_code)
|
||||
}
|
||||
};
|
||||
deno_npm_cache::DownloadError {
|
||||
status_code,
|
||||
error: err.into(),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub enum CliNpmResolverCreateOptions {
|
||||
Managed(CliManagedNpmResolverCreateOptions),
|
||||
Byonm(CliByonmNpmResolverCreateOptions),
|
||||
|
@ -179,13 +278,15 @@ impl NpmFetchResolver {
|
|||
if let Some(info) = self.info_by_name.get(name) {
|
||||
return info.value().clone();
|
||||
}
|
||||
// todo(#27198): use RegistryInfoProvider instead
|
||||
let fetch_package_info = || async {
|
||||
let info_url = get_package_url(&self.npmrc, name);
|
||||
let info_url = deno_npm_cache::get_package_url(&self.npmrc, name);
|
||||
let file_fetcher = self.file_fetcher.clone();
|
||||
let registry_config = self.npmrc.get_registry_config(name);
|
||||
// TODO(bartlomieju): this should error out, not use `.ok()`.
|
||||
let maybe_auth_header =
|
||||
maybe_auth_header_for_npm_registry(registry_config).ok()?;
|
||||
deno_npm_cache::maybe_auth_header_for_npm_registry(registry_config)
|
||||
.ok()?;
|
||||
// spawn due to the lsp's `Send` requirement
|
||||
let file = deno_core::unsync::spawn(async move {
|
||||
file_fetcher
|
||||
|
|
|
@ -440,8 +440,10 @@ pub fn format_html(
|
|||
)
|
||||
}
|
||||
_ => {
|
||||
let mut typescript_config =
|
||||
get_resolved_typescript_config(fmt_options);
|
||||
let mut typescript_config_builder =
|
||||
get_typescript_config_builder(fmt_options);
|
||||
typescript_config_builder.file_indent_level(hints.indent_level);
|
||||
let mut typescript_config = typescript_config_builder.build();
|
||||
typescript_config.line_width = hints.print_width as u32;
|
||||
dprint_plugin_typescript::format_text(
|
||||
&path,
|
||||
|
@ -919,9 +921,9 @@ fn files_str(len: usize) -> &'static str {
|
|||
}
|
||||
}
|
||||
|
||||
fn get_resolved_typescript_config(
|
||||
fn get_typescript_config_builder(
|
||||
options: &FmtOptionsConfig,
|
||||
) -> dprint_plugin_typescript::configuration::Configuration {
|
||||
) -> dprint_plugin_typescript::configuration::ConfigurationBuilder {
|
||||
let mut builder =
|
||||
dprint_plugin_typescript::configuration::ConfigurationBuilder::new();
|
||||
builder.deno();
|
||||
|
@ -953,7 +955,13 @@ fn get_resolved_typescript_config(
|
|||
});
|
||||
}
|
||||
|
||||
builder.build()
|
||||
builder
|
||||
}
|
||||
|
||||
fn get_resolved_typescript_config(
|
||||
options: &FmtOptionsConfig,
|
||||
) -> dprint_plugin_typescript::configuration::Configuration {
|
||||
get_typescript_config_builder(options).build()
|
||||
}
|
||||
|
||||
fn get_resolved_markdown_config(
|
||||
|
@ -1075,6 +1083,7 @@ fn get_resolved_markup_fmt_config(
|
|||
};
|
||||
|
||||
let language_options = LanguageOptions {
|
||||
script_formatter: Some(markup_fmt::config::ScriptFormatter::Dprint),
|
||||
quotes: Quotes::Double,
|
||||
format_comments: false,
|
||||
script_indent: true,
|
||||
|
|
|
@ -241,12 +241,15 @@ pub async fn execute_script(
|
|||
description: None,
|
||||
},
|
||||
kill_signal,
|
||||
cli_options.argv(),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
for task_config in &packages_task_configs {
|
||||
let exit_code = task_runner.run_tasks(task_config, &kill_signal).await?;
|
||||
let exit_code = task_runner
|
||||
.run_tasks(task_config, &kill_signal, cli_options.argv())
|
||||
.await?;
|
||||
if exit_code > 0 {
|
||||
return Ok(exit_code);
|
||||
}
|
||||
|
@ -263,6 +266,7 @@ struct RunSingleOptions<'a> {
|
|||
cwd: &'a Path,
|
||||
custom_commands: HashMap<String, Rc<dyn ShellCommand>>,
|
||||
kill_signal: KillSignal,
|
||||
argv: &'a [String],
|
||||
}
|
||||
|
||||
struct TaskRunner<'a> {
|
||||
|
@ -279,9 +283,10 @@ impl<'a> TaskRunner<'a> {
|
|||
&self,
|
||||
pkg_tasks_config: &PackageTaskInfo,
|
||||
kill_signal: &KillSignal,
|
||||
argv: &[String],
|
||||
) -> Result<i32, deno_core::anyhow::Error> {
|
||||
match sort_tasks_topo(pkg_tasks_config) {
|
||||
Ok(sorted) => self.run_tasks_in_parallel(sorted, kill_signal).await,
|
||||
Ok(sorted) => self.run_tasks_in_parallel(sorted, kill_signal, argv).await,
|
||||
Err(err) => match err {
|
||||
TaskError::NotFound(name) => {
|
||||
if self.task_flags.is_run {
|
||||
|
@ -317,6 +322,7 @@ impl<'a> TaskRunner<'a> {
|
|||
&self,
|
||||
tasks: Vec<ResolvedTask<'a>>,
|
||||
kill_signal: &KillSignal,
|
||||
args: &[String],
|
||||
) -> Result<i32, deno_core::anyhow::Error> {
|
||||
struct PendingTasksContext<'a> {
|
||||
completed: HashSet<usize>,
|
||||
|
@ -338,13 +344,21 @@ impl<'a> TaskRunner<'a> {
|
|||
&mut self,
|
||||
runner: &'b TaskRunner<'b>,
|
||||
kill_signal: &KillSignal,
|
||||
argv: &'a [String],
|
||||
) -> Option<
|
||||
LocalBoxFuture<'b, Result<(i32, &'a ResolvedTask<'a>), AnyError>>,
|
||||
>
|
||||
where
|
||||
'a: 'b,
|
||||
{
|
||||
for task in self.tasks.iter() {
|
||||
let mut tasks_iter = self.tasks.iter().peekable();
|
||||
while let Some(task) = tasks_iter.next() {
|
||||
let args = if tasks_iter.peek().is_none() {
|
||||
argv
|
||||
} else {
|
||||
&[]
|
||||
};
|
||||
|
||||
if self.completed.contains(&task.id)
|
||||
|| self.running.contains(&task.id)
|
||||
{
|
||||
|
@ -366,7 +380,13 @@ impl<'a> TaskRunner<'a> {
|
|||
match task.task_or_script {
|
||||
TaskOrScript::Task(_, def) => {
|
||||
runner
|
||||
.run_deno_task(task.folder_url, task.name, def, kill_signal)
|
||||
.run_deno_task(
|
||||
task.folder_url,
|
||||
task.name,
|
||||
def,
|
||||
kill_signal,
|
||||
args,
|
||||
)
|
||||
.await
|
||||
}
|
||||
TaskOrScript::Script(scripts, _) => {
|
||||
|
@ -376,6 +396,7 @@ impl<'a> TaskRunner<'a> {
|
|||
task.name,
|
||||
scripts,
|
||||
kill_signal,
|
||||
args,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
@ -399,7 +420,7 @@ impl<'a> TaskRunner<'a> {
|
|||
|
||||
while context.has_remaining_tasks() {
|
||||
while queue.len() < self.concurrency {
|
||||
if let Some(task) = context.get_next_task(self, kill_signal) {
|
||||
if let Some(task) = context.get_next_task(self, kill_signal, args) {
|
||||
queue.push(task);
|
||||
} else {
|
||||
break;
|
||||
|
@ -429,6 +450,7 @@ impl<'a> TaskRunner<'a> {
|
|||
task_name: &str,
|
||||
definition: &TaskDefinition,
|
||||
kill_signal: KillSignal,
|
||||
argv: &'a [String],
|
||||
) -> Result<i32, deno_core::anyhow::Error> {
|
||||
let cwd = match &self.task_flags.cwd {
|
||||
Some(path) => canonicalize_path(&PathBuf::from(path))
|
||||
|
@ -447,6 +469,7 @@ impl<'a> TaskRunner<'a> {
|
|||
cwd: &cwd,
|
||||
custom_commands,
|
||||
kill_signal,
|
||||
argv,
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
@ -457,6 +480,7 @@ impl<'a> TaskRunner<'a> {
|
|||
task_name: &str,
|
||||
scripts: &IndexMap<String, String>,
|
||||
kill_signal: KillSignal,
|
||||
argv: &[String],
|
||||
) -> Result<i32, deno_core::anyhow::Error> {
|
||||
// ensure the npm packages are installed if using a managed resolver
|
||||
if let Some(npm_resolver) = self.npm_resolver.as_managed() {
|
||||
|
@ -489,6 +513,7 @@ impl<'a> TaskRunner<'a> {
|
|||
cwd: &cwd,
|
||||
custom_commands: custom_commands.clone(),
|
||||
kill_signal: kill_signal.clone(),
|
||||
argv,
|
||||
})
|
||||
.await?;
|
||||
if exit_code > 0 {
|
||||
|
@ -510,11 +535,12 @@ impl<'a> TaskRunner<'a> {
|
|||
cwd,
|
||||
custom_commands,
|
||||
kill_signal,
|
||||
argv,
|
||||
} = opts;
|
||||
|
||||
output_task(
|
||||
opts.task_name,
|
||||
&task_runner::get_script_with_args(script, self.cli_options.argv()),
|
||||
&task_runner::get_script_with_args(script, argv),
|
||||
);
|
||||
|
||||
Ok(
|
||||
|
@ -525,7 +551,7 @@ impl<'a> TaskRunner<'a> {
|
|||
env_vars: self.env_vars.clone(),
|
||||
custom_commands,
|
||||
init_cwd: self.cli_options.initial_cwd(),
|
||||
argv: self.cli_options.argv(),
|
||||
argv,
|
||||
root_node_modules_dir: self.npm_resolver.root_node_modules_path(),
|
||||
stdio: None,
|
||||
kill_signal,
|
||||
|
|
|
@ -51,19 +51,6 @@ pub fn get_extension(file_path: &Path) -> Option<String> {
|
|||
.map(|e| e.to_lowercase());
|
||||
}
|
||||
|
||||
pub fn get_atomic_dir_path(file_path: &Path) -> PathBuf {
|
||||
let rand = gen_rand_path_component();
|
||||
let new_file_name = format!(
|
||||
".{}_{}",
|
||||
file_path
|
||||
.file_name()
|
||||
.map(|f| f.to_string_lossy())
|
||||
.unwrap_or(Cow::Borrowed("")),
|
||||
rand
|
||||
);
|
||||
file_path.with_file_name(new_file_name)
|
||||
}
|
||||
|
||||
pub fn get_atomic_file_path(file_path: &Path) -> PathBuf {
|
||||
let rand = gen_rand_path_component();
|
||||
let extension = format!("{rand}.tmp");
|
||||
|
|
|
@ -3,11 +3,9 @@
|
|||
mod async_flag;
|
||||
mod sync_read_async_write_lock;
|
||||
mod task_queue;
|
||||
mod value_creator;
|
||||
|
||||
pub use async_flag::AsyncFlag;
|
||||
pub use deno_core::unsync::sync::AtomicFlag;
|
||||
pub use sync_read_async_write_lock::SyncReadAsyncWriteLock;
|
||||
pub use task_queue::TaskQueue;
|
||||
pub use task_queue::TaskQueuePermit;
|
||||
pub use value_creator::MultiRuntimeAsyncValueCreator;
|
||||
|
|
|
@ -1,213 +0,0 @@
|
|||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use deno_core::futures::future::BoxFuture;
|
||||
use deno_core::futures::future::LocalBoxFuture;
|
||||
use deno_core::futures::future::Shared;
|
||||
use deno_core::futures::FutureExt;
|
||||
use deno_core::parking_lot::Mutex;
|
||||
use tokio::task::JoinError;
|
||||
|
||||
type JoinResult<TResult> = Result<TResult, Arc<JoinError>>;
|
||||
type CreateFutureFn<TResult> =
|
||||
Box<dyn Fn() -> LocalBoxFuture<'static, TResult> + Send + Sync>;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct State<TResult> {
|
||||
retry_index: usize,
|
||||
future: Option<Shared<BoxFuture<'static, JoinResult<TResult>>>>,
|
||||
}
|
||||
|
||||
/// Attempts to create a shared value asynchronously on one tokio runtime while
|
||||
/// many runtimes are requesting the value.
|
||||
///
|
||||
/// This is only useful when the value needs to get created once across
|
||||
/// many runtimes.
|
||||
///
|
||||
/// This handles the case where the tokio runtime creating the value goes down
|
||||
/// while another one is waiting on the value.
|
||||
pub struct MultiRuntimeAsyncValueCreator<TResult: Send + Clone + 'static> {
|
||||
create_future: CreateFutureFn<TResult>,
|
||||
state: Mutex<State<TResult>>,
|
||||
}
|
||||
|
||||
impl<TResult: Send + Clone + 'static> std::fmt::Debug
|
||||
for MultiRuntimeAsyncValueCreator<TResult>
|
||||
{
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("MultiRuntimeAsyncValueCreator").finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<TResult: Send + Clone + 'static> MultiRuntimeAsyncValueCreator<TResult> {
|
||||
pub fn new(create_future: CreateFutureFn<TResult>) -> Self {
|
||||
Self {
|
||||
state: Mutex::new(State {
|
||||
retry_index: 0,
|
||||
future: None,
|
||||
}),
|
||||
create_future,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get(&self) -> TResult {
|
||||
let (mut future, mut retry_index) = {
|
||||
let mut state = self.state.lock();
|
||||
let future = match &state.future {
|
||||
Some(future) => future.clone(),
|
||||
None => {
|
||||
let future = self.create_shared_future();
|
||||
state.future = Some(future.clone());
|
||||
future
|
||||
}
|
||||
};
|
||||
(future, state.retry_index)
|
||||
};
|
||||
|
||||
loop {
|
||||
let result = future.await;
|
||||
|
||||
match result {
|
||||
Ok(result) => return result,
|
||||
Err(join_error) => {
|
||||
if join_error.is_cancelled() {
|
||||
let mut state = self.state.lock();
|
||||
|
||||
if state.retry_index == retry_index {
|
||||
// we were the first one to retry, so create a new future
|
||||
// that we'll run from the current runtime
|
||||
state.retry_index += 1;
|
||||
state.future = Some(self.create_shared_future());
|
||||
}
|
||||
|
||||
retry_index = state.retry_index;
|
||||
future = state.future.as_ref().unwrap().clone();
|
||||
|
||||
// just in case we're stuck in a loop
|
||||
if retry_index > 1000 {
|
||||
panic!("Something went wrong.") // should never happen
|
||||
}
|
||||
} else {
|
||||
panic!("{}", join_error);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn create_shared_future(
|
||||
&self,
|
||||
) -> Shared<BoxFuture<'static, JoinResult<TResult>>> {
|
||||
let future = (self.create_future)();
|
||||
deno_core::unsync::spawn(future)
|
||||
.map(|result| result.map_err(Arc::new))
|
||||
.boxed()
|
||||
.shared()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use deno_core::unsync::spawn;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn single_runtime() {
|
||||
let value_creator = MultiRuntimeAsyncValueCreator::new(Box::new(|| {
|
||||
async { 1 }.boxed_local()
|
||||
}));
|
||||
let value = value_creator.get().await;
|
||||
assert_eq!(value, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multi_runtimes() {
|
||||
let value_creator =
|
||||
Arc::new(MultiRuntimeAsyncValueCreator::new(Box::new(|| {
|
||||
async {
|
||||
tokio::task::yield_now().await;
|
||||
1
|
||||
}
|
||||
.boxed_local()
|
||||
})));
|
||||
let handles = (0..3)
|
||||
.map(|_| {
|
||||
let value_creator = value_creator.clone();
|
||||
std::thread::spawn(|| {
|
||||
create_runtime().block_on(async move { value_creator.get().await })
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
for handle in handles {
|
||||
assert_eq!(handle.join().unwrap(), 1);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multi_runtimes_first_never_finishes() {
|
||||
let is_first_run = Arc::new(Mutex::new(true));
|
||||
let (tx, rx) = std::sync::mpsc::channel::<()>();
|
||||
let value_creator = Arc::new(MultiRuntimeAsyncValueCreator::new({
|
||||
let is_first_run = is_first_run.clone();
|
||||
Box::new(move || {
|
||||
let is_first_run = is_first_run.clone();
|
||||
let tx = tx.clone();
|
||||
async move {
|
||||
let is_first_run = {
|
||||
let mut is_first_run = is_first_run.lock();
|
||||
let initial_value = *is_first_run;
|
||||
*is_first_run = false;
|
||||
tx.send(()).unwrap();
|
||||
initial_value
|
||||
};
|
||||
if is_first_run {
|
||||
tokio::time::sleep(std::time::Duration::from_millis(30_000)).await;
|
||||
panic!("TIMED OUT"); // should not happen
|
||||
} else {
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
1
|
||||
}
|
||||
.boxed_local()
|
||||
})
|
||||
}));
|
||||
std::thread::spawn({
|
||||
let value_creator = value_creator.clone();
|
||||
let is_first_run = is_first_run.clone();
|
||||
move || {
|
||||
create_runtime().block_on(async {
|
||||
let value_creator = value_creator.clone();
|
||||
// spawn a task that will never complete
|
||||
spawn(async move { value_creator.get().await });
|
||||
// wait for the task to set is_first_run to false
|
||||
while *is_first_run.lock() {
|
||||
tokio::time::sleep(std::time::Duration::from_millis(20)).await;
|
||||
}
|
||||
// now exit the runtime while the value_creator is still pending
|
||||
})
|
||||
}
|
||||
});
|
||||
let handle = {
|
||||
let value_creator = value_creator.clone();
|
||||
std::thread::spawn(|| {
|
||||
create_runtime().block_on(async move {
|
||||
let value_creator = value_creator.clone();
|
||||
rx.recv().unwrap();
|
||||
// even though the other runtime shutdown, this get() should
|
||||
// recover and still get the value
|
||||
value_creator.get().await
|
||||
})
|
||||
})
|
||||
};
|
||||
assert_eq!(handle.join().unwrap(), 1);
|
||||
}
|
||||
|
||||
fn create_runtime() -> tokio::runtime::Runtime {
|
||||
tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap()
|
||||
}
|
||||
}
|
|
@ -4,12 +4,13 @@
|
|||
// deno-lint-ignore-file prefer-primordials
|
||||
|
||||
import { TextDecoder, TextEncoder } from "ext:deno_web/08_text_encoding.js";
|
||||
import { asyncIterableToCallback } from "ext:deno_node/_fs/_fs_watch.ts";
|
||||
import Dirent from "ext:deno_node/_fs/_fs_dirent.ts";
|
||||
import { denoErrorToNodeError } from "ext:deno_node/internal/errors.ts";
|
||||
import { getValidatedPath } from "ext:deno_node/internal/fs/utils.mjs";
|
||||
import { Buffer } from "node:buffer";
|
||||
import { promisify } from "ext:deno_node/internal/util.mjs";
|
||||
import { op_fs_read_dir_async, op_fs_read_dir_sync } from "ext:core/ops";
|
||||
import { join, relative } from "node:path";
|
||||
|
||||
function toDirent(val: Deno.DirEntry & { parentPath: string }): Dirent {
|
||||
return new Dirent(val);
|
||||
|
@ -18,6 +19,7 @@ function toDirent(val: Deno.DirEntry & { parentPath: string }): Dirent {
|
|||
type readDirOptions = {
|
||||
encoding?: string;
|
||||
withFileTypes?: boolean;
|
||||
recursive?: boolean;
|
||||
};
|
||||
|
||||
type readDirCallback = (err: Error | null, files: string[]) => void;
|
||||
|
@ -30,12 +32,12 @@ type readDirBoth = (
|
|||
|
||||
export function readdir(
|
||||
path: string | Buffer | URL,
|
||||
options: { withFileTypes?: false; encoding?: string },
|
||||
options: readDirOptions,
|
||||
callback: readDirCallback,
|
||||
): void;
|
||||
export function readdir(
|
||||
path: string | Buffer | URL,
|
||||
options: { withFileTypes: true; encoding?: string },
|
||||
options: readDirOptions,
|
||||
callback: readDirCallbackDirent,
|
||||
): void;
|
||||
export function readdir(path: string | URL, callback: readDirCallback): void;
|
||||
|
@ -51,8 +53,7 @@ export function readdir(
|
|||
const options = typeof optionsOrCallback === "object"
|
||||
? optionsOrCallback
|
||||
: null;
|
||||
const result: Array<string | Dirent> = [];
|
||||
path = getValidatedPath(path);
|
||||
path = getValidatedPath(path).toString();
|
||||
|
||||
if (!callback) throw new Error("No callback function supplied");
|
||||
|
||||
|
@ -66,24 +67,44 @@ export function readdir(
|
|||
}
|
||||
}
|
||||
|
||||
try {
|
||||
path = path.toString();
|
||||
asyncIterableToCallback(Deno.readDir(path), (val, done) => {
|
||||
if (typeof path !== "string") return;
|
||||
if (done) {
|
||||
callback(null, result);
|
||||
const result: Array<string | Dirent> = [];
|
||||
const dirs = [path];
|
||||
let current: string | undefined;
|
||||
(async () => {
|
||||
while ((current = dirs.shift()) !== undefined) {
|
||||
try {
|
||||
const entries = await op_fs_read_dir_async(current);
|
||||
|
||||
for (let i = 0; i < entries.length; i++) {
|
||||
const entry = entries[i];
|
||||
if (options?.recursive && entry.isDirectory) {
|
||||
dirs.push(join(current, entry.name));
|
||||
}
|
||||
|
||||
if (options?.withFileTypes) {
|
||||
entry.parentPath = current;
|
||||
result.push(toDirent(entry));
|
||||
} else {
|
||||
let name = decode(entry.name, options?.encoding);
|
||||
if (options?.recursive) {
|
||||
name = relative(path, join(current, name));
|
||||
}
|
||||
result.push(name);
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
callback(
|
||||
denoErrorToNodeError(err as Error, {
|
||||
syscall: "readdir",
|
||||
path: current,
|
||||
}),
|
||||
);
|
||||
return;
|
||||
}
|
||||
if (options?.withFileTypes) {
|
||||
val.parentPath = path;
|
||||
result.push(toDirent(val));
|
||||
} else result.push(decode(val.name));
|
||||
}, (e) => {
|
||||
callback(denoErrorToNodeError(e as Error, { syscall: "readdir" }));
|
||||
});
|
||||
} catch (e) {
|
||||
callback(denoErrorToNodeError(e as Error, { syscall: "readdir" }));
|
||||
}
|
||||
}
|
||||
|
||||
callback(null, result);
|
||||
})();
|
||||
}
|
||||
|
||||
function decode(str: string, encoding?: string): string {
|
||||
|
@ -118,8 +139,7 @@ export function readdirSync(
|
|||
path: string | Buffer | URL,
|
||||
options?: readDirOptions,
|
||||
): Array<string | Dirent> {
|
||||
const result = [];
|
||||
path = getValidatedPath(path);
|
||||
path = getValidatedPath(path).toString();
|
||||
|
||||
if (options?.encoding) {
|
||||
try {
|
||||
|
@ -131,16 +151,37 @@ export function readdirSync(
|
|||
}
|
||||
}
|
||||
|
||||
try {
|
||||
path = path.toString();
|
||||
for (const file of Deno.readDirSync(path)) {
|
||||
if (options?.withFileTypes) {
|
||||
file.parentPath = path;
|
||||
result.push(toDirent(file));
|
||||
} else result.push(decode(file.name));
|
||||
const result: Array<string | Dirent> = [];
|
||||
const dirs = [path];
|
||||
let current: string | undefined;
|
||||
while ((current = dirs.shift()) !== undefined) {
|
||||
try {
|
||||
const entries = op_fs_read_dir_sync(current);
|
||||
|
||||
for (let i = 0; i < entries.length; i++) {
|
||||
const entry = entries[i];
|
||||
if (options?.recursive && entry.isDirectory) {
|
||||
dirs.push(join(current, entry.name));
|
||||
}
|
||||
|
||||
if (options?.withFileTypes) {
|
||||
entry.parentPath = current;
|
||||
result.push(toDirent(entry));
|
||||
} else {
|
||||
let name = decode(entry.name, options?.encoding);
|
||||
if (options?.recursive) {
|
||||
name = relative(path, join(current, name));
|
||||
}
|
||||
result.push(name);
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
throw denoErrorToNodeError(e as Error, {
|
||||
syscall: "readdir",
|
||||
path: current,
|
||||
});
|
||||
}
|
||||
} catch (e) {
|
||||
throw denoErrorToNodeError(e as Error, { syscall: "readdir" });
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ use once_cell::sync::OnceCell;
|
|||
use opentelemetry::logs::AnyValue;
|
||||
use opentelemetry::logs::LogRecord as LogRecordTrait;
|
||||
use opentelemetry::logs::Severity;
|
||||
use opentelemetry::otel_error;
|
||||
use opentelemetry::trace::SpanContext;
|
||||
use opentelemetry::trace::SpanId;
|
||||
use opentelemetry::trace::SpanKind;
|
||||
|
@ -27,15 +28,21 @@ use opentelemetry::KeyValue;
|
|||
use opentelemetry::StringValue;
|
||||
use opentelemetry::Value;
|
||||
use opentelemetry_otlp::HttpExporterBuilder;
|
||||
use opentelemetry_otlp::MetricExporter;
|
||||
use opentelemetry_otlp::Protocol;
|
||||
use opentelemetry_otlp::WithExportConfig;
|
||||
use opentelemetry_otlp::WithHttpConfig;
|
||||
use opentelemetry_sdk::export::trace::SpanData;
|
||||
use opentelemetry_sdk::logs::BatchLogProcessor;
|
||||
use opentelemetry_sdk::logs::LogProcessor as LogProcessorTrait;
|
||||
use opentelemetry_sdk::logs::LogProcessor;
|
||||
use opentelemetry_sdk::logs::LogRecord;
|
||||
use opentelemetry_sdk::metrics::data::Metric;
|
||||
use opentelemetry_sdk::metrics::data::ResourceMetrics;
|
||||
use opentelemetry_sdk::metrics::data::ScopeMetrics;
|
||||
use opentelemetry_sdk::metrics::exporter::PushMetricExporter;
|
||||
use opentelemetry_sdk::metrics::Temporality;
|
||||
use opentelemetry_sdk::trace::BatchSpanProcessor;
|
||||
use opentelemetry_sdk::trace::SpanProcessor as SpanProcessorTrait;
|
||||
use opentelemetry_sdk::trace::SpanProcessor;
|
||||
use opentelemetry_sdk::Resource;
|
||||
use opentelemetry_semantic_conventions::resource::PROCESS_RUNTIME_NAME;
|
||||
use opentelemetry_semantic_conventions::resource::PROCESS_RUNTIME_VERSION;
|
||||
|
@ -54,9 +61,6 @@ use std::thread;
|
|||
use std::time::Duration;
|
||||
use std::time::SystemTime;
|
||||
|
||||
type SpanProcessor = BatchSpanProcessor<OtelSharedRuntime>;
|
||||
type LogProcessor = BatchLogProcessor<OtelSharedRuntime>;
|
||||
|
||||
deno_core::extension!(
|
||||
deno_telemetry,
|
||||
ops = [
|
||||
|
@ -71,6 +75,23 @@ deno_core::extension!(
|
|||
op_otel_span_attribute3,
|
||||
op_otel_span_set_dropped,
|
||||
op_otel_span_flush,
|
||||
op_otel_metrics_resource_attribute,
|
||||
op_otel_metrics_resource_attribute2,
|
||||
op_otel_metrics_resource_attribute3,
|
||||
op_otel_metrics_scope,
|
||||
op_otel_metrics_sum,
|
||||
op_otel_metrics_gauge,
|
||||
op_otel_metrics_sum_or_gauge_data_point,
|
||||
op_otel_metrics_histogram,
|
||||
op_otel_metrics_histogram_data_point,
|
||||
op_otel_metrics_histogram_data_point_entry_final,
|
||||
op_otel_metrics_histogram_data_point_entry1,
|
||||
op_otel_metrics_histogram_data_point_entry2,
|
||||
op_otel_metrics_histogram_data_point_entry3,
|
||||
op_otel_metrics_data_point_attribute,
|
||||
op_otel_metrics_data_point_attribute2,
|
||||
op_otel_metrics_data_point_attribute3,
|
||||
op_otel_metrics_submit,
|
||||
],
|
||||
esm = ["telemetry.ts", "util.ts"],
|
||||
);
|
||||
|
@ -322,8 +343,69 @@ mod hyper_client {
|
|||
}
|
||||
}
|
||||
|
||||
static OTEL_PROCESSORS: OnceCell<(SpanProcessor, LogProcessor)> =
|
||||
OnceCell::new();
|
||||
enum MetricProcessorMessage {
|
||||
ResourceMetrics(ResourceMetrics),
|
||||
Flush(tokio::sync::oneshot::Sender<()>),
|
||||
}
|
||||
|
||||
struct MetricProcessor {
|
||||
tx: tokio::sync::mpsc::Sender<MetricProcessorMessage>,
|
||||
}
|
||||
|
||||
impl MetricProcessor {
|
||||
fn new(exporter: MetricExporter) -> Self {
|
||||
let (tx, mut rx) = tokio::sync::mpsc::channel(2048);
|
||||
let future = async move {
|
||||
while let Some(message) = rx.recv().await {
|
||||
match message {
|
||||
MetricProcessorMessage::ResourceMetrics(mut rm) => {
|
||||
if let Err(err) = exporter.export(&mut rm).await {
|
||||
otel_error!(
|
||||
name: "MetricProcessor.Export.Error",
|
||||
error = format!("{}", err)
|
||||
);
|
||||
}
|
||||
}
|
||||
MetricProcessorMessage::Flush(tx) => {
|
||||
if let Err(()) = tx.send(()) {
|
||||
otel_error!(
|
||||
name: "MetricProcessor.Flush.SendResultError",
|
||||
error = "()",
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
(*OTEL_SHARED_RUNTIME_SPAWN_TASK_TX)
|
||||
.unbounded_send(Box::pin(future))
|
||||
.expect("failed to send task to shared OpenTelemetry runtime");
|
||||
|
||||
Self { tx }
|
||||
}
|
||||
|
||||
fn submit(&self, rm: ResourceMetrics) {
|
||||
let _ = self
|
||||
.tx
|
||||
.try_send(MetricProcessorMessage::ResourceMetrics(rm));
|
||||
}
|
||||
|
||||
fn force_flush(&self) -> Result<(), anyhow::Error> {
|
||||
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||
self.tx.try_send(MetricProcessorMessage::Flush(tx))?;
|
||||
deno_core::futures::executor::block_on(rx)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct Processors {
|
||||
spans: BatchSpanProcessor<OtelSharedRuntime>,
|
||||
logs: BatchLogProcessor<OtelSharedRuntime>,
|
||||
metrics: MetricProcessor,
|
||||
}
|
||||
|
||||
static OTEL_PROCESSORS: OnceCell<Processors> = OnceCell::new();
|
||||
|
||||
static BUILT_IN_INSTRUMENTATION_SCOPE: OnceCell<
|
||||
opentelemetry::InstrumentationScope,
|
||||
|
@ -404,6 +486,12 @@ pub fn init(config: OtelConfig) -> anyhow::Result<()> {
|
|||
BatchSpanProcessor::builder(span_exporter, OtelSharedRuntime).build();
|
||||
span_processor.set_resource(&resource);
|
||||
|
||||
let metric_exporter = HttpExporterBuilder::default()
|
||||
.with_http_client(client.clone())
|
||||
.with_protocol(protocol)
|
||||
.build_metrics_exporter(Temporality::Cumulative)?;
|
||||
let metric_processor = MetricProcessor::new(metric_exporter);
|
||||
|
||||
let log_exporter = HttpExporterBuilder::default()
|
||||
.with_http_client(client)
|
||||
.with_protocol(protocol)
|
||||
|
@ -413,7 +501,11 @@ pub fn init(config: OtelConfig) -> anyhow::Result<()> {
|
|||
log_processor.set_resource(&resource);
|
||||
|
||||
OTEL_PROCESSORS
|
||||
.set((span_processor, log_processor))
|
||||
.set(Processors {
|
||||
spans: span_processor,
|
||||
logs: log_processor,
|
||||
metrics: metric_processor,
|
||||
})
|
||||
.map_err(|_| anyhow!("failed to init otel"))?;
|
||||
|
||||
let builtin_instrumentation_scope =
|
||||
|
@ -431,16 +523,22 @@ pub fn init(config: OtelConfig) -> anyhow::Result<()> {
|
|||
/// `process::exit()`, to ensure that all OpenTelemetry logs are properly
|
||||
/// flushed before the process terminates.
|
||||
pub fn flush() {
|
||||
if let Some((span_processor, log_processor)) = OTEL_PROCESSORS.get() {
|
||||
let _ = span_processor.force_flush();
|
||||
let _ = log_processor.force_flush();
|
||||
if let Some(Processors {
|
||||
spans,
|
||||
logs,
|
||||
metrics,
|
||||
}) = OTEL_PROCESSORS.get()
|
||||
{
|
||||
let _ = spans.force_flush();
|
||||
let _ = logs.force_flush();
|
||||
let _ = metrics.force_flush();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handle_log(record: &log::Record) {
|
||||
use log::Level;
|
||||
|
||||
let Some((_, log_processor)) = OTEL_PROCESSORS.get() else {
|
||||
let Some(Processors { logs, .. }) = OTEL_PROCESSORS.get() else {
|
||||
return;
|
||||
};
|
||||
|
||||
|
@ -490,7 +588,7 @@ pub fn handle_log(record: &log::Record) {
|
|||
|
||||
let _ = record.key_values().visit(&mut Visitor(&mut log_record));
|
||||
|
||||
log_processor.emit(
|
||||
logs.emit(
|
||||
&mut log_record,
|
||||
BUILT_IN_INSTRUMENTATION_SCOPE.get().unwrap(),
|
||||
);
|
||||
|
@ -648,7 +746,7 @@ fn op_otel_log(
|
|||
span_id: v8::Local<'_, v8::Value>,
|
||||
#[smi] trace_flags: u8,
|
||||
) {
|
||||
let Some((_, log_processor)) = OTEL_PROCESSORS.get() else {
|
||||
let Some(Processors { logs, .. }) = OTEL_PROCESSORS.get() else {
|
||||
return;
|
||||
};
|
||||
|
||||
|
@ -678,12 +776,25 @@ fn op_otel_log(
|
|||
);
|
||||
}
|
||||
|
||||
log_processor.emit(
|
||||
logs.emit(
|
||||
&mut log_record,
|
||||
BUILT_IN_INSTRUMENTATION_SCOPE.get().unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
fn owned_string<'s>(
|
||||
scope: &mut v8::HandleScope<'s>,
|
||||
string: v8::Local<'s, v8::String>,
|
||||
) -> String {
|
||||
let x = v8::ValueView::new(scope, string);
|
||||
match x.data() {
|
||||
v8::ValueViewData::OneByte(bytes) => {
|
||||
String::from_utf8_lossy(bytes).into_owned()
|
||||
}
|
||||
v8::ValueViewData::TwoByte(bytes) => String::from_utf16_lossy(bytes),
|
||||
}
|
||||
}
|
||||
|
||||
struct TemporarySpan(SpanData);
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
|
@ -700,10 +811,10 @@ fn op_otel_span_start<'s>(
|
|||
end_time: f64,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
if let Some(temporary_span) = state.try_take::<TemporarySpan>() {
|
||||
let Some((span_processor, _)) = OTEL_PROCESSORS.get() else {
|
||||
let Some(Processors { spans, .. }) = OTEL_PROCESSORS.get() else {
|
||||
return Ok(());
|
||||
};
|
||||
span_processor.on_end(temporary_span.0);
|
||||
spans.on_end(temporary_span.0);
|
||||
};
|
||||
|
||||
let Some(InstrumentationScope(instrumentation_scope)) =
|
||||
|
@ -724,15 +835,7 @@ fn op_otel_span_start<'s>(
|
|||
|
||||
let parent_span_id = parse_span_id(scope, parent_span_id);
|
||||
|
||||
let name = {
|
||||
let x = v8::ValueView::new(scope, name.try_cast()?);
|
||||
match x.data() {
|
||||
v8::ValueViewData::OneByte(bytes) => {
|
||||
String::from_utf8_lossy(bytes).into_owned()
|
||||
}
|
||||
v8::ValueViewData::TwoByte(bytes) => String::from_utf16_lossy(bytes),
|
||||
}
|
||||
};
|
||||
let name = owned_string(scope, name.try_cast()?);
|
||||
|
||||
let temporary_span = TemporarySpan(SpanData {
|
||||
span_context: SpanContext::new(
|
||||
|
@ -866,9 +969,598 @@ fn op_otel_span_flush(state: &mut OpState) {
|
|||
return;
|
||||
};
|
||||
|
||||
let Some((span_processor, _)) = OTEL_PROCESSORS.get() else {
|
||||
let Some(Processors { spans, .. }) = OTEL_PROCESSORS.get() else {
|
||||
return;
|
||||
};
|
||||
|
||||
span_processor.on_end(temporary_span.0);
|
||||
spans.on_end(temporary_span.0);
|
||||
}
|
||||
|
||||
// Holds data being built from JS before
|
||||
// it is submitted to the rust processor.
|
||||
struct TemporaryMetricsExport {
|
||||
resource_attributes: Vec<KeyValue>,
|
||||
scope_metrics: Vec<ScopeMetrics>,
|
||||
metric: Option<TemporaryMetric>,
|
||||
}
|
||||
|
||||
struct TemporaryMetric {
|
||||
name: String,
|
||||
description: String,
|
||||
unit: String,
|
||||
data: TemporaryMetricData,
|
||||
}
|
||||
|
||||
enum TemporaryMetricData {
|
||||
Sum(opentelemetry_sdk::metrics::data::Sum<f64>),
|
||||
Gauge(opentelemetry_sdk::metrics::data::Gauge<f64>),
|
||||
Histogram(opentelemetry_sdk::metrics::data::Histogram<f64>),
|
||||
}
|
||||
|
||||
impl From<TemporaryMetric> for Metric {
|
||||
fn from(value: TemporaryMetric) -> Self {
|
||||
Metric {
|
||||
name: Cow::Owned(value.name),
|
||||
description: Cow::Owned(value.description),
|
||||
unit: Cow::Owned(value.unit),
|
||||
data: match value.data {
|
||||
TemporaryMetricData::Sum(sum) => Box::new(sum),
|
||||
TemporaryMetricData::Gauge(gauge) => Box::new(gauge),
|
||||
TemporaryMetricData::Histogram(histogram) => Box::new(histogram),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
fn op_otel_metrics_resource_attribute<'s>(
|
||||
scope: &mut v8::HandleScope<'s>,
|
||||
state: &mut OpState,
|
||||
#[smi] capacity: u32,
|
||||
key: v8::Local<'s, v8::Value>,
|
||||
value: v8::Local<'s, v8::Value>,
|
||||
) {
|
||||
let metrics_export = if let Some(metrics_export) =
|
||||
state.try_borrow_mut::<TemporaryMetricsExport>()
|
||||
{
|
||||
metrics_export.resource_attributes.reserve_exact(
|
||||
(capacity as usize) - metrics_export.resource_attributes.capacity(),
|
||||
);
|
||||
metrics_export
|
||||
} else {
|
||||
state.put(TemporaryMetricsExport {
|
||||
resource_attributes: Vec::with_capacity(capacity as usize),
|
||||
scope_metrics: vec![],
|
||||
metric: None,
|
||||
});
|
||||
state.borrow_mut()
|
||||
};
|
||||
attr!(scope, metrics_export.resource_attributes, key, value);
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
fn op_otel_metrics_resource_attribute2<'s>(
|
||||
scope: &mut v8::HandleScope<'s>,
|
||||
state: &mut OpState,
|
||||
#[smi] capacity: u32,
|
||||
key1: v8::Local<'s, v8::Value>,
|
||||
value1: v8::Local<'s, v8::Value>,
|
||||
key2: v8::Local<'s, v8::Value>,
|
||||
value2: v8::Local<'s, v8::Value>,
|
||||
) {
|
||||
let metrics_export = if let Some(metrics_export) =
|
||||
state.try_borrow_mut::<TemporaryMetricsExport>()
|
||||
{
|
||||
metrics_export.resource_attributes.reserve_exact(
|
||||
(capacity as usize) - metrics_export.resource_attributes.capacity(),
|
||||
);
|
||||
metrics_export
|
||||
} else {
|
||||
state.put(TemporaryMetricsExport {
|
||||
resource_attributes: Vec::with_capacity(capacity as usize),
|
||||
scope_metrics: vec![],
|
||||
metric: None,
|
||||
});
|
||||
state.borrow_mut()
|
||||
};
|
||||
attr!(scope, metrics_export.resource_attributes, key1, value1);
|
||||
attr!(scope, metrics_export.resource_attributes, key2, value2);
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[op2(fast)]
|
||||
fn op_otel_metrics_resource_attribute3<'s>(
|
||||
scope: &mut v8::HandleScope<'s>,
|
||||
state: &mut OpState,
|
||||
#[smi] capacity: u32,
|
||||
key1: v8::Local<'s, v8::Value>,
|
||||
value1: v8::Local<'s, v8::Value>,
|
||||
key2: v8::Local<'s, v8::Value>,
|
||||
value2: v8::Local<'s, v8::Value>,
|
||||
key3: v8::Local<'s, v8::Value>,
|
||||
value3: v8::Local<'s, v8::Value>,
|
||||
) {
|
||||
let metrics_export = if let Some(metrics_export) =
|
||||
state.try_borrow_mut::<TemporaryMetricsExport>()
|
||||
{
|
||||
metrics_export.resource_attributes.reserve_exact(
|
||||
(capacity as usize) - metrics_export.resource_attributes.capacity(),
|
||||
);
|
||||
metrics_export
|
||||
} else {
|
||||
state.put(TemporaryMetricsExport {
|
||||
resource_attributes: Vec::with_capacity(capacity as usize),
|
||||
scope_metrics: vec![],
|
||||
metric: None,
|
||||
});
|
||||
state.borrow_mut()
|
||||
};
|
||||
attr!(scope, metrics_export.resource_attributes, key1, value1);
|
||||
attr!(scope, metrics_export.resource_attributes, key2, value2);
|
||||
attr!(scope, metrics_export.resource_attributes, key3, value3);
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
fn op_otel_metrics_scope<'s>(
|
||||
scope: &mut v8::HandleScope<'s>,
|
||||
state: &mut OpState,
|
||||
name: v8::Local<'s, v8::Value>,
|
||||
schema_url: v8::Local<'s, v8::Value>,
|
||||
version: v8::Local<'s, v8::Value>,
|
||||
) {
|
||||
let name = owned_string(scope, name.cast());
|
||||
|
||||
let scope_builder = opentelemetry::InstrumentationScope::builder(name);
|
||||
let scope_builder = if schema_url.is_null_or_undefined() {
|
||||
scope_builder
|
||||
} else {
|
||||
scope_builder.with_schema_url(owned_string(scope, schema_url.cast()))
|
||||
};
|
||||
let scope_builder = if version.is_null_or_undefined() {
|
||||
scope_builder
|
||||
} else {
|
||||
scope_builder.with_version(owned_string(scope, version.cast()))
|
||||
};
|
||||
let scope = scope_builder.build();
|
||||
let scope_metric = ScopeMetrics {
|
||||
scope,
|
||||
metrics: vec![],
|
||||
};
|
||||
|
||||
match state.try_borrow_mut::<TemporaryMetricsExport>() {
|
||||
Some(temp) => {
|
||||
if let Some(current_metric) = temp.metric.take() {
|
||||
let metric = Metric::from(current_metric);
|
||||
temp.scope_metrics.last_mut().unwrap().metrics.push(metric);
|
||||
}
|
||||
temp.scope_metrics.push(scope_metric);
|
||||
}
|
||||
None => {
|
||||
state.put(TemporaryMetricsExport {
|
||||
resource_attributes: vec![],
|
||||
scope_metrics: vec![scope_metric],
|
||||
metric: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
fn op_otel_metrics_sum<'s>(
|
||||
scope: &mut v8::HandleScope<'s>,
|
||||
state: &mut OpState,
|
||||
name: v8::Local<'s, v8::Value>,
|
||||
description: v8::Local<'s, v8::Value>,
|
||||
unit: v8::Local<'s, v8::Value>,
|
||||
#[smi] temporality: u8,
|
||||
is_monotonic: bool,
|
||||
) {
|
||||
let Some(temp) = state.try_borrow_mut::<TemporaryMetricsExport>() else {
|
||||
return;
|
||||
};
|
||||
|
||||
if let Some(current_metric) = temp.metric.take() {
|
||||
let metric = Metric::from(current_metric);
|
||||
temp.scope_metrics.last_mut().unwrap().metrics.push(metric);
|
||||
}
|
||||
|
||||
let name = owned_string(scope, name.cast());
|
||||
let description = owned_string(scope, description.cast());
|
||||
let unit = owned_string(scope, unit.cast());
|
||||
let temporality = match temporality {
|
||||
0 => Temporality::Delta,
|
||||
1 => Temporality::Cumulative,
|
||||
_ => return,
|
||||
};
|
||||
let sum = opentelemetry_sdk::metrics::data::Sum {
|
||||
data_points: vec![],
|
||||
temporality,
|
||||
is_monotonic,
|
||||
};
|
||||
|
||||
temp.metric = Some(TemporaryMetric {
|
||||
name,
|
||||
description,
|
||||
unit,
|
||||
data: TemporaryMetricData::Sum(sum),
|
||||
});
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
fn op_otel_metrics_gauge<'s>(
|
||||
scope: &mut v8::HandleScope<'s>,
|
||||
state: &mut OpState,
|
||||
name: v8::Local<'s, v8::Value>,
|
||||
description: v8::Local<'s, v8::Value>,
|
||||
unit: v8::Local<'s, v8::Value>,
|
||||
) {
|
||||
let Some(temp) = state.try_borrow_mut::<TemporaryMetricsExport>() else {
|
||||
return;
|
||||
};
|
||||
|
||||
if let Some(current_metric) = temp.metric.take() {
|
||||
let metric = Metric::from(current_metric);
|
||||
temp.scope_metrics.last_mut().unwrap().metrics.push(metric);
|
||||
}
|
||||
|
||||
let name = owned_string(scope, name.cast());
|
||||
let description = owned_string(scope, description.cast());
|
||||
let unit = owned_string(scope, unit.cast());
|
||||
|
||||
let gauge = opentelemetry_sdk::metrics::data::Gauge {
|
||||
data_points: vec![],
|
||||
};
|
||||
|
||||
temp.metric = Some(TemporaryMetric {
|
||||
name,
|
||||
description,
|
||||
unit,
|
||||
data: TemporaryMetricData::Gauge(gauge),
|
||||
});
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
fn op_otel_metrics_sum_or_gauge_data_point(
|
||||
state: &mut OpState,
|
||||
value: f64,
|
||||
start_time: f64,
|
||||
time: f64,
|
||||
) {
|
||||
let Some(temp) = state.try_borrow_mut::<TemporaryMetricsExport>() else {
|
||||
return;
|
||||
};
|
||||
|
||||
let start_time = SystemTime::UNIX_EPOCH
|
||||
.checked_add(std::time::Duration::from_secs_f64(start_time))
|
||||
.unwrap();
|
||||
let time = SystemTime::UNIX_EPOCH
|
||||
.checked_add(std::time::Duration::from_secs_f64(time))
|
||||
.unwrap();
|
||||
|
||||
let data_point = opentelemetry_sdk::metrics::data::DataPoint {
|
||||
value,
|
||||
start_time: Some(start_time),
|
||||
time: Some(time),
|
||||
attributes: vec![],
|
||||
exemplars: vec![],
|
||||
};
|
||||
|
||||
match &mut temp.metric {
|
||||
Some(TemporaryMetric {
|
||||
data: TemporaryMetricData::Sum(sum),
|
||||
..
|
||||
}) => sum.data_points.push(data_point),
|
||||
Some(TemporaryMetric {
|
||||
data: TemporaryMetricData::Gauge(gauge),
|
||||
..
|
||||
}) => gauge.data_points.push(data_point),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
fn op_otel_metrics_histogram<'s>(
|
||||
scope: &mut v8::HandleScope<'s>,
|
||||
state: &mut OpState,
|
||||
name: v8::Local<'s, v8::Value>,
|
||||
description: v8::Local<'s, v8::Value>,
|
||||
unit: v8::Local<'s, v8::Value>,
|
||||
#[smi] temporality: u8,
|
||||
) {
|
||||
let Some(temp) = state.try_borrow_mut::<TemporaryMetricsExport>() else {
|
||||
return;
|
||||
};
|
||||
|
||||
if let Some(current_metric) = temp.metric.take() {
|
||||
let metric = Metric::from(current_metric);
|
||||
temp.scope_metrics.last_mut().unwrap().metrics.push(metric);
|
||||
}
|
||||
|
||||
let name = owned_string(scope, name.cast());
|
||||
let description = owned_string(scope, description.cast());
|
||||
let unit = owned_string(scope, unit.cast());
|
||||
|
||||
let temporality = match temporality {
|
||||
0 => Temporality::Delta,
|
||||
1 => Temporality::Cumulative,
|
||||
_ => return,
|
||||
};
|
||||
let histogram = opentelemetry_sdk::metrics::data::Histogram {
|
||||
data_points: vec![],
|
||||
temporality,
|
||||
};
|
||||
|
||||
temp.metric = Some(TemporaryMetric {
|
||||
name,
|
||||
description,
|
||||
unit,
|
||||
data: TemporaryMetricData::Histogram(histogram),
|
||||
});
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[op2(fast)]
|
||||
fn op_otel_metrics_histogram_data_point(
|
||||
state: &mut OpState,
|
||||
#[number] count: u64,
|
||||
min: f64,
|
||||
max: f64,
|
||||
sum: f64,
|
||||
start_time: f64,
|
||||
time: f64,
|
||||
#[smi] buckets: u32,
|
||||
) {
|
||||
let Some(temp) = state.try_borrow_mut::<TemporaryMetricsExport>() else {
|
||||
return;
|
||||
};
|
||||
|
||||
let min = if min.is_nan() { None } else { Some(min) };
|
||||
let max = if max.is_nan() { None } else { Some(max) };
|
||||
|
||||
let start_time = SystemTime::UNIX_EPOCH
|
||||
.checked_add(std::time::Duration::from_secs_f64(start_time))
|
||||
.unwrap();
|
||||
let time = SystemTime::UNIX_EPOCH
|
||||
.checked_add(std::time::Duration::from_secs_f64(time))
|
||||
.unwrap();
|
||||
|
||||
let data_point = opentelemetry_sdk::metrics::data::HistogramDataPoint {
|
||||
bounds: Vec::with_capacity(buckets as usize),
|
||||
bucket_counts: Vec::with_capacity((buckets as usize) + 1),
|
||||
count,
|
||||
sum,
|
||||
min,
|
||||
max,
|
||||
start_time,
|
||||
time,
|
||||
attributes: vec![],
|
||||
exemplars: vec![],
|
||||
};
|
||||
|
||||
if let Some(TemporaryMetric {
|
||||
data: TemporaryMetricData::Histogram(histogram),
|
||||
..
|
||||
}) = &mut temp.metric
|
||||
{
|
||||
histogram.data_points.push(data_point);
|
||||
}
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
fn op_otel_metrics_histogram_data_point_entry_final(
|
||||
state: &mut OpState,
|
||||
#[number] count1: u64,
|
||||
) {
|
||||
let Some(temp) = state.try_borrow_mut::<TemporaryMetricsExport>() else {
|
||||
return;
|
||||
};
|
||||
|
||||
if let Some(TemporaryMetric {
|
||||
data: TemporaryMetricData::Histogram(histogram),
|
||||
..
|
||||
}) = &mut temp.metric
|
||||
{
|
||||
histogram
|
||||
.data_points
|
||||
.last_mut()
|
||||
.unwrap()
|
||||
.bucket_counts
|
||||
.push(count1)
|
||||
}
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
fn op_otel_metrics_histogram_data_point_entry1(
|
||||
state: &mut OpState,
|
||||
#[number] count1: u64,
|
||||
bound1: f64,
|
||||
) {
|
||||
let Some(temp) = state.try_borrow_mut::<TemporaryMetricsExport>() else {
|
||||
return;
|
||||
};
|
||||
|
||||
if let Some(TemporaryMetric {
|
||||
data: TemporaryMetricData::Histogram(histogram),
|
||||
..
|
||||
}) = &mut temp.metric
|
||||
{
|
||||
let data_point = histogram.data_points.last_mut().unwrap();
|
||||
data_point.bucket_counts.push(count1);
|
||||
data_point.bounds.push(bound1);
|
||||
}
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
fn op_otel_metrics_histogram_data_point_entry2(
|
||||
state: &mut OpState,
|
||||
#[number] count1: u64,
|
||||
bound1: f64,
|
||||
#[number] count2: u64,
|
||||
bound2: f64,
|
||||
) {
|
||||
let Some(temp) = state.try_borrow_mut::<TemporaryMetricsExport>() else {
|
||||
return;
|
||||
};
|
||||
|
||||
if let Some(TemporaryMetric {
|
||||
data: TemporaryMetricData::Histogram(histogram),
|
||||
..
|
||||
}) = &mut temp.metric
|
||||
{
|
||||
let data_point = histogram.data_points.last_mut().unwrap();
|
||||
data_point.bucket_counts.push(count1);
|
||||
data_point.bounds.push(bound1);
|
||||
data_point.bucket_counts.push(count2);
|
||||
data_point.bounds.push(bound2);
|
||||
}
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
fn op_otel_metrics_histogram_data_point_entry3(
|
||||
state: &mut OpState,
|
||||
#[number] count1: u64,
|
||||
bound1: f64,
|
||||
#[number] count2: u64,
|
||||
bound2: f64,
|
||||
#[number] count3: u64,
|
||||
bound3: f64,
|
||||
) {
|
||||
let Some(temp) = state.try_borrow_mut::<TemporaryMetricsExport>() else {
|
||||
return;
|
||||
};
|
||||
|
||||
if let Some(TemporaryMetric {
|
||||
data: TemporaryMetricData::Histogram(histogram),
|
||||
..
|
||||
}) = &mut temp.metric
|
||||
{
|
||||
let data_point = histogram.data_points.last_mut().unwrap();
|
||||
data_point.bucket_counts.push(count1);
|
||||
data_point.bounds.push(bound1);
|
||||
data_point.bucket_counts.push(count2);
|
||||
data_point.bounds.push(bound2);
|
||||
data_point.bucket_counts.push(count3);
|
||||
data_point.bounds.push(bound3);
|
||||
}
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
fn op_otel_metrics_data_point_attribute<'s>(
|
||||
scope: &mut v8::HandleScope<'s>,
|
||||
state: &mut OpState,
|
||||
#[smi] capacity: u32,
|
||||
key: v8::Local<'s, v8::Value>,
|
||||
value: v8::Local<'s, v8::Value>,
|
||||
) {
|
||||
if let Some(TemporaryMetricsExport {
|
||||
metric: Some(metric),
|
||||
..
|
||||
}) = state.try_borrow_mut::<TemporaryMetricsExport>()
|
||||
{
|
||||
let attributes = match &mut metric.data {
|
||||
TemporaryMetricData::Sum(sum) => {
|
||||
&mut sum.data_points.last_mut().unwrap().attributes
|
||||
}
|
||||
TemporaryMetricData::Gauge(gauge) => {
|
||||
&mut gauge.data_points.last_mut().unwrap().attributes
|
||||
}
|
||||
TemporaryMetricData::Histogram(histogram) => {
|
||||
&mut histogram.data_points.last_mut().unwrap().attributes
|
||||
}
|
||||
};
|
||||
attributes.reserve_exact((capacity as usize) - attributes.capacity());
|
||||
attr!(scope, attributes, key, value);
|
||||
}
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
fn op_otel_metrics_data_point_attribute2<'s>(
|
||||
scope: &mut v8::HandleScope<'s>,
|
||||
state: &mut OpState,
|
||||
#[smi] capacity: u32,
|
||||
key1: v8::Local<'s, v8::Value>,
|
||||
value1: v8::Local<'s, v8::Value>,
|
||||
key2: v8::Local<'s, v8::Value>,
|
||||
value2: v8::Local<'s, v8::Value>,
|
||||
) {
|
||||
if let Some(TemporaryMetricsExport {
|
||||
metric: Some(metric),
|
||||
..
|
||||
}) = state.try_borrow_mut::<TemporaryMetricsExport>()
|
||||
{
|
||||
let attributes = match &mut metric.data {
|
||||
TemporaryMetricData::Sum(sum) => {
|
||||
&mut sum.data_points.last_mut().unwrap().attributes
|
||||
}
|
||||
TemporaryMetricData::Gauge(gauge) => {
|
||||
&mut gauge.data_points.last_mut().unwrap().attributes
|
||||
}
|
||||
TemporaryMetricData::Histogram(histogram) => {
|
||||
&mut histogram.data_points.last_mut().unwrap().attributes
|
||||
}
|
||||
};
|
||||
attributes.reserve_exact((capacity as usize) - attributes.capacity());
|
||||
attr!(scope, attributes, key1, value1);
|
||||
attr!(scope, attributes, key2, value2);
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[op2(fast)]
|
||||
fn op_otel_metrics_data_point_attribute3<'s>(
|
||||
scope: &mut v8::HandleScope<'s>,
|
||||
state: &mut OpState,
|
||||
#[smi] capacity: u32,
|
||||
key1: v8::Local<'s, v8::Value>,
|
||||
value1: v8::Local<'s, v8::Value>,
|
||||
key2: v8::Local<'s, v8::Value>,
|
||||
value2: v8::Local<'s, v8::Value>,
|
||||
key3: v8::Local<'s, v8::Value>,
|
||||
value3: v8::Local<'s, v8::Value>,
|
||||
) {
|
||||
if let Some(TemporaryMetricsExport {
|
||||
metric: Some(metric),
|
||||
..
|
||||
}) = state.try_borrow_mut::<TemporaryMetricsExport>()
|
||||
{
|
||||
let attributes = match &mut metric.data {
|
||||
TemporaryMetricData::Sum(sum) => {
|
||||
&mut sum.data_points.last_mut().unwrap().attributes
|
||||
}
|
||||
TemporaryMetricData::Gauge(gauge) => {
|
||||
&mut gauge.data_points.last_mut().unwrap().attributes
|
||||
}
|
||||
TemporaryMetricData::Histogram(histogram) => {
|
||||
&mut histogram.data_points.last_mut().unwrap().attributes
|
||||
}
|
||||
};
|
||||
attributes.reserve_exact((capacity as usize) - attributes.capacity());
|
||||
attr!(scope, attributes, key1, value1);
|
||||
attr!(scope, attributes, key2, value2);
|
||||
attr!(scope, attributes, key3, value3);
|
||||
}
|
||||
}
|
||||
|
||||
#[op2(fast)]
|
||||
fn op_otel_metrics_submit(state: &mut OpState) {
|
||||
let Some(mut temp) = state.try_take::<TemporaryMetricsExport>() else {
|
||||
return;
|
||||
};
|
||||
|
||||
let Some(Processors { metrics, .. }) = OTEL_PROCESSORS.get() else {
|
||||
return;
|
||||
};
|
||||
|
||||
if let Some(current_metric) = temp.metric {
|
||||
let metric = Metric::from(current_metric);
|
||||
temp.scope_metrics.last_mut().unwrap().metrics.push(metric);
|
||||
}
|
||||
|
||||
let resource = Resource::new(temp.resource_attributes);
|
||||
let scope_metrics = temp.scope_metrics;
|
||||
|
||||
metrics.submit(ResourceMetrics {
|
||||
resource,
|
||||
scope_metrics,
|
||||
});
|
||||
}
|
||||
|
|
|
@ -7,6 +7,23 @@ import {
|
|||
op_otel_instrumentation_scope_enter,
|
||||
op_otel_instrumentation_scope_enter_builtin,
|
||||
op_otel_log,
|
||||
op_otel_metrics_data_point_attribute,
|
||||
op_otel_metrics_data_point_attribute2,
|
||||
op_otel_metrics_data_point_attribute3,
|
||||
op_otel_metrics_gauge,
|
||||
op_otel_metrics_histogram,
|
||||
op_otel_metrics_histogram_data_point,
|
||||
op_otel_metrics_histogram_data_point_entry1,
|
||||
op_otel_metrics_histogram_data_point_entry2,
|
||||
op_otel_metrics_histogram_data_point_entry3,
|
||||
op_otel_metrics_histogram_data_point_entry_final,
|
||||
op_otel_metrics_resource_attribute,
|
||||
op_otel_metrics_resource_attribute2,
|
||||
op_otel_metrics_resource_attribute3,
|
||||
op_otel_metrics_scope,
|
||||
op_otel_metrics_submit,
|
||||
op_otel_metrics_sum,
|
||||
op_otel_metrics_sum_or_gauge_data_point,
|
||||
op_otel_span_attribute,
|
||||
op_otel_span_attribute2,
|
||||
op_otel_span_attribute3,
|
||||
|
@ -186,7 +203,7 @@ const instrumentationScopes = new SafeWeakMap<
|
|||
>();
|
||||
let activeInstrumentationLibrary: WeakRef<InstrumentationLibrary> | null = null;
|
||||
|
||||
function submit(
|
||||
function submitSpan(
|
||||
spanId: string | Uint8Array,
|
||||
traceId: string | Uint8Array,
|
||||
traceFlags: number,
|
||||
|
@ -411,7 +428,7 @@ export class Span {
|
|||
|
||||
endSpan = (span: Span) => {
|
||||
const endTime = now();
|
||||
submit(
|
||||
submitSpan(
|
||||
span.#spanId,
|
||||
span.#traceId,
|
||||
span.#traceFlags,
|
||||
|
@ -571,7 +588,7 @@ class SpanExporter {
|
|||
for (let i = 0; i < spans.length; i += 1) {
|
||||
const span = spans[i];
|
||||
const context = span.spanContext();
|
||||
submit(
|
||||
submitSpan(
|
||||
context.spanId,
|
||||
context.traceId,
|
||||
context.traceFlags,
|
||||
|
@ -671,6 +688,262 @@ class ContextManager {
|
|||
}
|
||||
}
|
||||
|
||||
function attributeValue(value: IAnyValue) {
|
||||
return value.boolValue ?? value.stringValue ?? value.doubleValue ??
|
||||
value.intValue;
|
||||
}
|
||||
|
||||
function submitMetrics(resource, scopeMetrics) {
|
||||
let i = 0;
|
||||
while (i < resource.attributes.length) {
|
||||
if (i + 2 < resource.attributes.length) {
|
||||
op_otel_metrics_resource_attribute3(
|
||||
resource.attributes.length,
|
||||
resource.attributes[i].key,
|
||||
attributeValue(resource.attributes[i].value),
|
||||
resource.attributes[i + 1].key,
|
||||
attributeValue(resource.attributes[i + 1].value),
|
||||
resource.attributes[i + 2].key,
|
||||
attributeValue(resource.attributes[i + 2].value),
|
||||
);
|
||||
i += 3;
|
||||
} else if (i + 1 < resource.attributes.length) {
|
||||
op_otel_metrics_resource_attribute2(
|
||||
resource.attributes.length,
|
||||
resource.attributes[i].key,
|
||||
attributeValue(resource.attributes[i].value),
|
||||
resource.attributes[i + 1].key,
|
||||
attributeValue(resource.attributes[i + 1].value),
|
||||
);
|
||||
i += 2;
|
||||
} else {
|
||||
op_otel_metrics_resource_attribute(
|
||||
resource.attributes.length,
|
||||
resource.attributes[i].key,
|
||||
attributeValue(resource.attributes[i].value),
|
||||
);
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
|
||||
for (let smi = 0; smi < scopeMetrics.length; smi += 1) {
|
||||
const { scope, metrics } = scopeMetrics[smi];
|
||||
|
||||
op_otel_metrics_scope(scope.name, scope.schemaUrl, scope.version);
|
||||
|
||||
for (let mi = 0; mi < metrics.length; mi += 1) {
|
||||
const metric = metrics[mi];
|
||||
switch (metric.dataPointType) {
|
||||
case 3:
|
||||
op_otel_metrics_sum(
|
||||
metric.descriptor.name,
|
||||
// deno-lint-ignore prefer-primordials
|
||||
metric.descriptor.description,
|
||||
metric.descriptor.unit,
|
||||
metric.aggregationTemporality,
|
||||
metric.isMonotonic,
|
||||
);
|
||||
for (let di = 0; di < metric.dataPoints.length; di += 1) {
|
||||
const dataPoint = metric.dataPoints[di];
|
||||
op_otel_metrics_sum_or_gauge_data_point(
|
||||
dataPoint.value,
|
||||
hrToSecs(dataPoint.startTime),
|
||||
hrToSecs(dataPoint.endTime),
|
||||
);
|
||||
const attributes = ObjectEntries(dataPoint.attributes);
|
||||
let i = 0;
|
||||
while (i < attributes.length) {
|
||||
if (i + 2 < attributes.length) {
|
||||
op_otel_metrics_data_point_attribute3(
|
||||
attributes.length,
|
||||
attributes[i][0],
|
||||
attributes[i][1],
|
||||
attributes[i + 1][0],
|
||||
attributes[i + 1][1],
|
||||
attributes[i + 2][0],
|
||||
attributes[i + 2][1],
|
||||
);
|
||||
i += 3;
|
||||
} else if (i + 1 < attributes.length) {
|
||||
op_otel_metrics_data_point_attribute2(
|
||||
attributes.length,
|
||||
attributes[i][0],
|
||||
attributes[i][1],
|
||||
attributes[i + 1][0],
|
||||
attributes[i + 1][1],
|
||||
);
|
||||
i += 2;
|
||||
} else {
|
||||
op_otel_metrics_data_point_attribute(
|
||||
attributes.length,
|
||||
attributes[i][0],
|
||||
attributes[i][1],
|
||||
);
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
op_otel_metrics_gauge(
|
||||
metric.descriptor.name,
|
||||
// deno-lint-ignore prefer-primordials
|
||||
metric.descriptor.description,
|
||||
metric.descriptor.unit,
|
||||
);
|
||||
for (let di = 0; di < metric.dataPoints.length; di += 1) {
|
||||
const dataPoint = metric.dataPoints[di];
|
||||
op_otel_metrics_sum_or_gauge_data_point(
|
||||
dataPoint.value,
|
||||
hrToSecs(dataPoint.startTime),
|
||||
hrToSecs(dataPoint.endTime),
|
||||
);
|
||||
const attributes = ObjectEntries(dataPoint.attributes);
|
||||
let i = 0;
|
||||
while (i < attributes.length) {
|
||||
if (i + 2 < attributes.length) {
|
||||
op_otel_metrics_data_point_attribute3(
|
||||
attributes.length,
|
||||
attributes[i][0],
|
||||
attributes[i][1],
|
||||
attributes[i + 1][0],
|
||||
attributes[i + 1][1],
|
||||
attributes[i + 2][0],
|
||||
attributes[i + 2][1],
|
||||
);
|
||||
i += 3;
|
||||
} else if (i + 1 < attributes.length) {
|
||||
op_otel_metrics_data_point_attribute2(
|
||||
attributes.length,
|
||||
attributes[i][0],
|
||||
attributes[i][1],
|
||||
attributes[i + 1][0],
|
||||
attributes[i + 1][1],
|
||||
);
|
||||
i += 2;
|
||||
} else {
|
||||
op_otel_metrics_data_point_attribute(
|
||||
attributes.length,
|
||||
attributes[i][0],
|
||||
attributes[i][1],
|
||||
);
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 0:
|
||||
op_otel_metrics_histogram(
|
||||
metric.descriptor.name,
|
||||
// deno-lint-ignore prefer-primordials
|
||||
metric.descriptor.description,
|
||||
metric.descriptor.unit,
|
||||
metric.aggregationTemporality,
|
||||
);
|
||||
for (let di = 0; di < metric.dataPoints.length; di += 1) {
|
||||
const dataPoint = metric.dataPoints[di];
|
||||
const { boundaries, counts } = dataPoint.value.buckets;
|
||||
op_otel_metrics_histogram_data_point(
|
||||
dataPoint.value.count,
|
||||
dataPoint.value.min ?? NaN,
|
||||
dataPoint.value.max ?? NaN,
|
||||
dataPoint.value.sum,
|
||||
hrToSecs(dataPoint.startTime),
|
||||
hrToSecs(dataPoint.endTime),
|
||||
boundaries.length,
|
||||
);
|
||||
let j = 0;
|
||||
while (j < boundaries.length) {
|
||||
if (j + 3 < boundaries.length) {
|
||||
op_otel_metrics_histogram_data_point_entry3(
|
||||
counts[j],
|
||||
boundaries[j],
|
||||
counts[j + 1],
|
||||
boundaries[j + 1],
|
||||
counts[j + 2],
|
||||
boundaries[j + 2],
|
||||
);
|
||||
j += 3;
|
||||
} else if (j + 2 < boundaries.length) {
|
||||
op_otel_metrics_histogram_data_point_entry2(
|
||||
counts[j],
|
||||
boundaries[j],
|
||||
counts[j + 1],
|
||||
boundaries[j + 1],
|
||||
);
|
||||
j += 2;
|
||||
} else {
|
||||
op_otel_metrics_histogram_data_point_entry1(
|
||||
counts[j],
|
||||
boundaries[j],
|
||||
);
|
||||
j += 1;
|
||||
}
|
||||
}
|
||||
op_otel_metrics_histogram_data_point_entry_final(counts[j]);
|
||||
const attributes = ObjectEntries(dataPoint.attributes);
|
||||
let i = 0;
|
||||
while (i < attributes.length) {
|
||||
if (i + 2 < attributes.length) {
|
||||
op_otel_metrics_data_point_attribute3(
|
||||
attributes.length,
|
||||
attributes[i][0],
|
||||
attributes[i][1],
|
||||
attributes[i + 1][0],
|
||||
attributes[i + 1][1],
|
||||
attributes[i + 2][0],
|
||||
attributes[i + 2][1],
|
||||
);
|
||||
i += 3;
|
||||
} else if (i + 1 < attributes.length) {
|
||||
op_otel_metrics_data_point_attribute2(
|
||||
attributes.length,
|
||||
attributes[i][0],
|
||||
attributes[i][1],
|
||||
attributes[i + 1][0],
|
||||
attributes[i + 1][1],
|
||||
);
|
||||
i += 2;
|
||||
} else {
|
||||
op_otel_metrics_data_point_attribute(
|
||||
attributes.length,
|
||||
attributes[i][0],
|
||||
attributes[i][1],
|
||||
);
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
op_otel_metrics_submit();
|
||||
}
|
||||
|
||||
class MetricExporter {
|
||||
export(metrics, resultCallback: (result: ExportResult) => void) {
|
||||
try {
|
||||
submitMetrics(metrics.resource, metrics.scopeMetrics);
|
||||
resultCallback({ code: 0 });
|
||||
} catch (error) {
|
||||
resultCallback({
|
||||
code: 1,
|
||||
error: ObjectPrototypeIsPrototypeOf(error, Error)
|
||||
? error as Error
|
||||
: new Error(String(error)),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async forceFlush() {}
|
||||
|
||||
async shutdown() {}
|
||||
}
|
||||
|
||||
const otelConsoleConfig = {
|
||||
ignore: 0,
|
||||
capture: 1,
|
||||
|
@ -708,4 +981,5 @@ export function bootstrap(
|
|||
export const telemetry = {
|
||||
SpanExporter,
|
||||
ContextManager,
|
||||
MetricExporter,
|
||||
};
|
||||
|
|
|
@ -271,7 +271,7 @@ function addPaddingToBase64url(base64url) {
|
|||
if (base64url.length % 4 === 2) return base64url + "==";
|
||||
if (base64url.length % 4 === 3) return base64url + "=";
|
||||
if (base64url.length % 4 === 1) {
|
||||
throw new TypeError("Illegal base64url string!");
|
||||
throw new TypeError("Illegal base64url string");
|
||||
}
|
||||
return base64url;
|
||||
}
|
||||
|
@ -382,7 +382,7 @@ function assert(cond, msg = "Assertion failed.") {
|
|||
function serializeJSValueToJSONString(value) {
|
||||
const result = JSONStringify(value);
|
||||
if (result === undefined) {
|
||||
throw new TypeError("Value is not JSON serializable.");
|
||||
throw new TypeError("Value is not JSON serializable");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -429,7 +429,7 @@ function pathFromURLWin32(url) {
|
|||
*/
|
||||
function pathFromURLPosix(url) {
|
||||
if (url.hostname !== "") {
|
||||
throw new TypeError(`Host must be empty.`);
|
||||
throw new TypeError("Host must be empty");
|
||||
}
|
||||
|
||||
return decodeURIComponent(
|
||||
|
@ -444,7 +444,7 @@ function pathFromURLPosix(url) {
|
|||
function pathFromURL(pathOrUrl) {
|
||||
if (ObjectPrototypeIsPrototypeOf(URLPrototype, pathOrUrl)) {
|
||||
if (pathOrUrl.protocol != "file:") {
|
||||
throw new TypeError("Must be a file URL.");
|
||||
throw new TypeError("Must be a file URL");
|
||||
}
|
||||
|
||||
return core.build.os == "windows"
|
||||
|
|
|
@ -1031,11 +1031,11 @@ class EventTarget {
|
|||
}
|
||||
|
||||
if (getDispatched(event)) {
|
||||
throw new DOMException("Invalid event state.", "InvalidStateError");
|
||||
throw new DOMException("Invalid event state", "InvalidStateError");
|
||||
}
|
||||
|
||||
if (event.eventPhase !== Event.NONE) {
|
||||
throw new DOMException("Invalid event state.", "InvalidStateError");
|
||||
throw new DOMException("Invalid event state", "InvalidStateError");
|
||||
}
|
||||
|
||||
return dispatch(self, event);
|
||||
|
|
|
@ -196,7 +196,7 @@ class AbortSignal extends EventTarget {
|
|||
|
||||
constructor(key = null) {
|
||||
if (key !== illegalConstructorKey) {
|
||||
throw new TypeError("Illegal constructor.");
|
||||
throw new TypeError("Illegal constructor");
|
||||
}
|
||||
super();
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ const illegalConstructorKey = Symbol("illegalConstructorKey");
|
|||
class Window extends EventTarget {
|
||||
constructor(key = null) {
|
||||
if (key !== illegalConstructorKey) {
|
||||
throw new TypeError("Illegal constructor.");
|
||||
throw new TypeError("Illegal constructor");
|
||||
}
|
||||
super();
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ class Window extends EventTarget {
|
|||
class WorkerGlobalScope extends EventTarget {
|
||||
constructor(key = null) {
|
||||
if (key != illegalConstructorKey) {
|
||||
throw new TypeError("Illegal constructor.");
|
||||
throw new TypeError("Illegal constructor");
|
||||
}
|
||||
super();
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ class WorkerGlobalScope extends EventTarget {
|
|||
class DedicatedWorkerGlobalScope extends WorkerGlobalScope {
|
||||
constructor(key = null) {
|
||||
if (key != illegalConstructorKey) {
|
||||
throw new TypeError("Illegal constructor.");
|
||||
throw new TypeError("Illegal constructor");
|
||||
}
|
||||
super();
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ function btoa(data) {
|
|||
} catch (e) {
|
||||
if (ObjectPrototypeIsPrototypeOf(TypeErrorPrototype, e)) {
|
||||
throw new DOMException(
|
||||
"The string to be encoded contains characters outside of the Latin1 range.",
|
||||
"Cannot encode string: string contains characters outside of the Latin1 range",
|
||||
"InvalidCharacterError",
|
||||
);
|
||||
}
|
||||
|
|
|
@ -523,10 +523,14 @@ function dequeueValue(container) {
|
|||
function enqueueValueWithSize(container, value, size) {
|
||||
assert(container[_queue] && typeof container[_queueTotalSize] === "number");
|
||||
if (isNonNegativeNumber(size) === false) {
|
||||
throw new RangeError("chunk size isn't a positive number");
|
||||
throw new RangeError(
|
||||
"Cannot enqueue value with size: chunk size must be a positive number",
|
||||
);
|
||||
}
|
||||
if (size === Infinity) {
|
||||
throw new RangeError("chunk size is invalid");
|
||||
throw new RangeError(
|
||||
"Cannot enqueue value with size: chunk size is invalid",
|
||||
);
|
||||
}
|
||||
container[_queue].enqueue({ value, size });
|
||||
container[_queueTotalSize] += size;
|
||||
|
@ -1097,7 +1101,7 @@ async function readableStreamCollectIntoUint8Array(stream) {
|
|||
|
||||
if (TypedArrayPrototypeGetSymbolToStringTag(chunk) !== "Uint8Array") {
|
||||
throw new TypeError(
|
||||
"Can't convert value to Uint8Array while consuming the stream",
|
||||
"Cannot convert value to Uint8Array while consuming the stream",
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -1347,7 +1351,7 @@ function readableByteStreamControllerEnqueue(controller, chunk) {
|
|||
|
||||
if (isDetachedBuffer(buffer)) {
|
||||
throw new TypeError(
|
||||
"chunk's buffer is detached and so cannot be enqueued",
|
||||
"Chunk's buffer is detached and so cannot be enqueued",
|
||||
);
|
||||
}
|
||||
const transferredBuffer = ArrayBufferPrototypeTransferToFixedLength(buffer);
|
||||
|
@ -2095,14 +2099,14 @@ function readableByteStreamControllerRespond(controller, bytesWritten) {
|
|||
if (state === "closed") {
|
||||
if (bytesWritten !== 0) {
|
||||
throw new TypeError(
|
||||
"bytesWritten must be 0 when calling respond() on a closed stream",
|
||||
`"bytesWritten" must be 0 when calling respond() on a closed stream: received ${bytesWritten}`,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
assert(state === "readable");
|
||||
if (bytesWritten === 0) {
|
||||
throw new TypeError(
|
||||
"bytesWritten must be greater than 0 when calling respond() on a readable stream",
|
||||
'"bytesWritten" must be greater than 0 when calling respond() on a readable stream',
|
||||
);
|
||||
}
|
||||
if (
|
||||
|
@ -2110,7 +2114,7 @@ function readableByteStreamControllerRespond(controller, bytesWritten) {
|
|||
// deno-lint-ignore prefer-primordials
|
||||
firstDescriptor.byteLength
|
||||
) {
|
||||
throw new RangeError("bytesWritten out of range");
|
||||
throw new RangeError('"bytesWritten" out of range');
|
||||
}
|
||||
}
|
||||
firstDescriptor.buffer = ArrayBufferPrototypeTransferToFixedLength(
|
||||
|
@ -2305,7 +2309,7 @@ function readableByteStreamControllerRespondWithNewView(controller, view) {
|
|||
if (state === "closed") {
|
||||
if (byteLength !== 0) {
|
||||
throw new TypeError(
|
||||
"The view's length must be 0 when calling respondWithNewView() on a closed stream",
|
||||
`The view's length must be 0 when calling respondWithNewView() on a closed stream: received ${byteLength}`,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
|
@ -3577,7 +3581,7 @@ function setUpReadableByteStreamControllerFromUnderlyingSource(
|
|||
}
|
||||
const autoAllocateChunkSize = underlyingSourceDict["autoAllocateChunkSize"];
|
||||
if (autoAllocateChunkSize === 0) {
|
||||
throw new TypeError("autoAllocateChunkSize must be greater than 0");
|
||||
throw new TypeError('"autoAllocateChunkSize" must be greater than 0');
|
||||
}
|
||||
setUpReadableByteStreamController(
|
||||
stream,
|
||||
|
@ -3706,7 +3710,7 @@ function setUpReadableStreamDefaultControllerFromUnderlyingSource(
|
|||
*/
|
||||
function setUpReadableStreamBYOBReader(reader, stream) {
|
||||
if (isReadableStreamLocked(stream)) {
|
||||
throw new TypeError("ReadableStream is locked.");
|
||||
throw new TypeError("ReadableStream is locked");
|
||||
}
|
||||
if (
|
||||
!(ObjectPrototypeIsPrototypeOf(
|
||||
|
@ -3727,7 +3731,7 @@ function setUpReadableStreamBYOBReader(reader, stream) {
|
|||
*/
|
||||
function setUpReadableStreamDefaultReader(reader, stream) {
|
||||
if (isReadableStreamLocked(stream)) {
|
||||
throw new TypeError("ReadableStream is locked.");
|
||||
throw new TypeError("ReadableStream is locked");
|
||||
}
|
||||
readableStreamReaderGenericInitialize(reader, stream);
|
||||
reader[_readRequests] = new Queue();
|
||||
|
@ -3961,7 +3965,7 @@ function setUpWritableStreamDefaultControllerFromUnderlyingSink(
|
|||
*/
|
||||
function setUpWritableStreamDefaultWriter(writer, stream) {
|
||||
if (isWritableStreamLocked(stream) === true) {
|
||||
throw new TypeError("The stream is already locked.");
|
||||
throw new TypeError("The stream is already locked");
|
||||
}
|
||||
writer[_stream] = stream;
|
||||
stream[_writer] = writer;
|
||||
|
@ -4019,7 +4023,7 @@ function transformStreamDefaultControllerEnqueue(controller, chunk) {
|
|||
/** @type {ReadableStreamDefaultController<O>} */ readableController,
|
||||
) === false
|
||||
) {
|
||||
throw new TypeError("Readable stream is unavailable.");
|
||||
throw new TypeError("Readable stream is unavailable");
|
||||
}
|
||||
try {
|
||||
readableStreamDefaultControllerEnqueue(
|
||||
|
@ -5143,7 +5147,7 @@ class ReadableStream {
|
|||
if (underlyingSourceDict.type === "bytes") {
|
||||
if (strategy.size !== undefined) {
|
||||
throw new RangeError(
|
||||
`${prefix}: When underlying source is "bytes", strategy.size must be undefined.`,
|
||||
`${prefix}: When underlying source is "bytes", strategy.size must be 'undefined'`,
|
||||
);
|
||||
}
|
||||
const highWaterMark = extractHighWaterMark(strategy, 0);
|
||||
|
@ -5273,10 +5277,10 @@ class ReadableStream {
|
|||
const { readable, writable } = transform;
|
||||
const { preventClose, preventAbort, preventCancel, signal } = options;
|
||||
if (isReadableStreamLocked(this)) {
|
||||
throw new TypeError("ReadableStream is already locked.");
|
||||
throw new TypeError("ReadableStream is already locked");
|
||||
}
|
||||
if (isWritableStreamLocked(writable)) {
|
||||
throw new TypeError("Target WritableStream is already locked.");
|
||||
throw new TypeError("Target WritableStream is already locked");
|
||||
}
|
||||
const promise = readableStreamPipeTo(
|
||||
this,
|
||||
|
@ -5814,7 +5818,7 @@ class ReadableByteStreamController {
|
|||
}
|
||||
if (this[_stream][_state] !== "readable") {
|
||||
throw new TypeError(
|
||||
"ReadableByteStreamController's stream is not in a readable state.",
|
||||
"ReadableByteStreamController's stream is not in a readable state",
|
||||
);
|
||||
}
|
||||
readableByteStreamControllerClose(this);
|
||||
|
@ -5846,7 +5850,7 @@ class ReadableByteStreamController {
|
|||
if (byteLength === 0) {
|
||||
throw webidl.makeException(
|
||||
TypeError,
|
||||
"length must be non-zero",
|
||||
"Length must be non-zero",
|
||||
prefix,
|
||||
arg1,
|
||||
);
|
||||
|
@ -5854,19 +5858,19 @@ class ReadableByteStreamController {
|
|||
if (getArrayBufferByteLength(buffer) === 0) {
|
||||
throw webidl.makeException(
|
||||
TypeError,
|
||||
"buffer length must be non-zero",
|
||||
"Buffer length must be non-zero",
|
||||
prefix,
|
||||
arg1,
|
||||
);
|
||||
}
|
||||
if (this[_closeRequested] === true) {
|
||||
throw new TypeError(
|
||||
"Cannot enqueue chunk after a close has been requested.",
|
||||
"Cannot enqueue chunk after a close has been requested",
|
||||
);
|
||||
}
|
||||
if (this[_stream][_state] !== "readable") {
|
||||
throw new TypeError(
|
||||
"Cannot enqueue chunk when underlying stream is not readable.",
|
||||
"Cannot enqueue chunk when underlying stream is not readable",
|
||||
);
|
||||
}
|
||||
return readableByteStreamControllerEnqueue(this, chunk);
|
||||
|
@ -6006,7 +6010,7 @@ class ReadableStreamDefaultController {
|
|||
close() {
|
||||
webidl.assertBranded(this, ReadableStreamDefaultControllerPrototype);
|
||||
if (readableStreamDefaultControllerCanCloseOrEnqueue(this) === false) {
|
||||
throw new TypeError("The stream controller cannot close or enqueue.");
|
||||
throw new TypeError("The stream controller cannot close or enqueue");
|
||||
}
|
||||
readableStreamDefaultControllerClose(this);
|
||||
}
|
||||
|
@ -6021,7 +6025,7 @@ class ReadableStreamDefaultController {
|
|||
chunk = webidl.converters.any(chunk);
|
||||
}
|
||||
if (readableStreamDefaultControllerCanCloseOrEnqueue(this) === false) {
|
||||
throw new TypeError("The stream controller cannot close or enqueue.");
|
||||
throw new TypeError("The stream controller cannot close or enqueue");
|
||||
}
|
||||
readableStreamDefaultControllerEnqueue(this, chunk);
|
||||
}
|
||||
|
@ -6146,12 +6150,12 @@ class TransformStream {
|
|||
);
|
||||
if (transformerDict.readableType !== undefined) {
|
||||
throw new RangeError(
|
||||
`${prefix}: readableType transformers not supported.`,
|
||||
`${prefix}: readableType transformers not supported`,
|
||||
);
|
||||
}
|
||||
if (transformerDict.writableType !== undefined) {
|
||||
throw new RangeError(
|
||||
`${prefix}: writableType transformers not supported.`,
|
||||
`${prefix}: writableType transformers not supported`,
|
||||
);
|
||||
}
|
||||
const readableHighWaterMark = extractHighWaterMark(readableStrategy, 0);
|
||||
|
@ -6356,7 +6360,7 @@ class WritableStream {
|
|||
);
|
||||
if (underlyingSinkDict.type != null) {
|
||||
throw new RangeError(
|
||||
`${prefix}: WritableStream does not support 'type' in the underlying sink.`,
|
||||
`${prefix}: WritableStream does not support 'type' in the underlying sink`,
|
||||
);
|
||||
}
|
||||
initializeWritableStream(this);
|
||||
|
@ -6483,7 +6487,7 @@ class WritableStreamDefaultWriter {
|
|||
webidl.assertBranded(this, WritableStreamDefaultWriterPrototype);
|
||||
if (this[_stream] === undefined) {
|
||||
throw new TypeError(
|
||||
"A writable stream is not associated with the writer.",
|
||||
"A writable stream is not associated with the writer",
|
||||
);
|
||||
}
|
||||
return writableStreamDefaultWriterGetDesiredSize(this);
|
||||
|
|
|
@ -65,7 +65,7 @@ class FileReader extends EventTarget {
|
|||
// 1. If fr's state is "loading", throw an InvalidStateError DOMException.
|
||||
if (this[state] === "loading") {
|
||||
throw new DOMException(
|
||||
"Invalid FileReader state.",
|
||||
"Invalid FileReader state",
|
||||
"InvalidStateError",
|
||||
);
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ const locationConstructorKey = Symbol("locationConstructorKey");
|
|||
class Location {
|
||||
constructor(href = null, key = null) {
|
||||
if (key != locationConstructorKey) {
|
||||
throw new TypeError("Illegal constructor.");
|
||||
throw new TypeError("Illegal constructor");
|
||||
}
|
||||
const url = new URL(href);
|
||||
url.username = "";
|
||||
|
@ -41,7 +41,7 @@ class Location {
|
|||
},
|
||||
set() {
|
||||
throw new DOMException(
|
||||
`Cannot set "location.hash".`,
|
||||
`Cannot set "location.hash"`,
|
||||
"NotSupportedError",
|
||||
);
|
||||
},
|
||||
|
@ -54,7 +54,7 @@ class Location {
|
|||
},
|
||||
set() {
|
||||
throw new DOMException(
|
||||
`Cannot set "location.host".`,
|
||||
`Cannot set "location.host"`,
|
||||
"NotSupportedError",
|
||||
);
|
||||
},
|
||||
|
@ -67,7 +67,7 @@ class Location {
|
|||
},
|
||||
set() {
|
||||
throw new DOMException(
|
||||
`Cannot set "location.hostname".`,
|
||||
`Cannot set "location.hostname"`,
|
||||
"NotSupportedError",
|
||||
);
|
||||
},
|
||||
|
@ -80,7 +80,7 @@ class Location {
|
|||
},
|
||||
set() {
|
||||
throw new DOMException(
|
||||
`Cannot set "location.href".`,
|
||||
`Cannot set "location.href"`,
|
||||
"NotSupportedError",
|
||||
);
|
||||
},
|
||||
|
@ -100,7 +100,7 @@ class Location {
|
|||
},
|
||||
set() {
|
||||
throw new DOMException(
|
||||
`Cannot set "location.pathname".`,
|
||||
`Cannot set "location.pathname"`,
|
||||
"NotSupportedError",
|
||||
);
|
||||
},
|
||||
|
@ -113,7 +113,7 @@ class Location {
|
|||
},
|
||||
set() {
|
||||
throw new DOMException(
|
||||
`Cannot set "location.port".`,
|
||||
`Cannot set "location.port"`,
|
||||
"NotSupportedError",
|
||||
);
|
||||
},
|
||||
|
@ -126,7 +126,7 @@ class Location {
|
|||
},
|
||||
set() {
|
||||
throw new DOMException(
|
||||
`Cannot set "location.protocol".`,
|
||||
`Cannot set "location.protocol"`,
|
||||
"NotSupportedError",
|
||||
);
|
||||
},
|
||||
|
@ -139,7 +139,7 @@ class Location {
|
|||
},
|
||||
set() {
|
||||
throw new DOMException(
|
||||
`Cannot set "location.search".`,
|
||||
`Cannot set "location.search"`,
|
||||
"NotSupportedError",
|
||||
);
|
||||
},
|
||||
|
@ -161,7 +161,7 @@ class Location {
|
|||
__proto__: null,
|
||||
value: function assign() {
|
||||
throw new DOMException(
|
||||
`Cannot call "location.assign()".`,
|
||||
`Cannot call "location.assign()"`,
|
||||
"NotSupportedError",
|
||||
);
|
||||
},
|
||||
|
@ -171,7 +171,7 @@ class Location {
|
|||
__proto__: null,
|
||||
value: function reload() {
|
||||
throw new DOMException(
|
||||
`Cannot call "location.reload()".`,
|
||||
`Cannot call "location.reload()"`,
|
||||
"NotSupportedError",
|
||||
);
|
||||
},
|
||||
|
@ -181,7 +181,7 @@ class Location {
|
|||
__proto__: null,
|
||||
value: function replace() {
|
||||
throw new DOMException(
|
||||
`Cannot call "location.replace()".`,
|
||||
`Cannot call "location.replace()"`,
|
||||
"NotSupportedError",
|
||||
);
|
||||
},
|
||||
|
@ -229,7 +229,7 @@ const workerLocationUrls = new SafeWeakMap();
|
|||
class WorkerLocation {
|
||||
constructor(href = null, key = null) {
|
||||
if (key != locationConstructorKey) {
|
||||
throw new TypeError("Illegal constructor.");
|
||||
throw new TypeError("Illegal constructor");
|
||||
}
|
||||
const url = new URL(href);
|
||||
url.username = "";
|
||||
|
@ -244,7 +244,7 @@ ObjectDefineProperties(WorkerLocation.prototype, {
|
|||
get() {
|
||||
const url = WeakMapPrototypeGet(workerLocationUrls, this);
|
||||
if (url == null) {
|
||||
throw new TypeError("Illegal invocation.");
|
||||
throw new TypeError("Illegal invocation");
|
||||
}
|
||||
return url.hash;
|
||||
},
|
||||
|
@ -256,7 +256,7 @@ ObjectDefineProperties(WorkerLocation.prototype, {
|
|||
get() {
|
||||
const url = WeakMapPrototypeGet(workerLocationUrls, this);
|
||||
if (url == null) {
|
||||
throw new TypeError("Illegal invocation.");
|
||||
throw new TypeError("Illegal invocation");
|
||||
}
|
||||
return url.host;
|
||||
},
|
||||
|
@ -268,7 +268,7 @@ ObjectDefineProperties(WorkerLocation.prototype, {
|
|||
get() {
|
||||
const url = WeakMapPrototypeGet(workerLocationUrls, this);
|
||||
if (url == null) {
|
||||
throw new TypeError("Illegal invocation.");
|
||||
throw new TypeError("Illegal invocation");
|
||||
}
|
||||
return url.hostname;
|
||||
},
|
||||
|
@ -280,7 +280,7 @@ ObjectDefineProperties(WorkerLocation.prototype, {
|
|||
get() {
|
||||
const url = WeakMapPrototypeGet(workerLocationUrls, this);
|
||||
if (url == null) {
|
||||
throw new TypeError("Illegal invocation.");
|
||||
throw new TypeError("Illegal invocation");
|
||||
}
|
||||
return url.href;
|
||||
},
|
||||
|
@ -292,7 +292,7 @@ ObjectDefineProperties(WorkerLocation.prototype, {
|
|||
get() {
|
||||
const url = WeakMapPrototypeGet(workerLocationUrls, this);
|
||||
if (url == null) {
|
||||
throw new TypeError("Illegal invocation.");
|
||||
throw new TypeError("Illegal invocation");
|
||||
}
|
||||
return url.origin;
|
||||
},
|
||||
|
@ -304,7 +304,7 @@ ObjectDefineProperties(WorkerLocation.prototype, {
|
|||
get() {
|
||||
const url = WeakMapPrototypeGet(workerLocationUrls, this);
|
||||
if (url == null) {
|
||||
throw new TypeError("Illegal invocation.");
|
||||
throw new TypeError("Illegal invocation");
|
||||
}
|
||||
return url.pathname;
|
||||
},
|
||||
|
@ -316,7 +316,7 @@ ObjectDefineProperties(WorkerLocation.prototype, {
|
|||
get() {
|
||||
const url = WeakMapPrototypeGet(workerLocationUrls, this);
|
||||
if (url == null) {
|
||||
throw new TypeError("Illegal invocation.");
|
||||
throw new TypeError("Illegal invocation");
|
||||
}
|
||||
return url.port;
|
||||
},
|
||||
|
@ -328,7 +328,7 @@ ObjectDefineProperties(WorkerLocation.prototype, {
|
|||
get() {
|
||||
const url = WeakMapPrototypeGet(workerLocationUrls, this);
|
||||
if (url == null) {
|
||||
throw new TypeError("Illegal invocation.");
|
||||
throw new TypeError("Illegal invocation");
|
||||
}
|
||||
return url.protocol;
|
||||
},
|
||||
|
@ -340,7 +340,7 @@ ObjectDefineProperties(WorkerLocation.prototype, {
|
|||
get() {
|
||||
const url = WeakMapPrototypeGet(workerLocationUrls, this);
|
||||
if (url == null) {
|
||||
throw new TypeError("Illegal invocation.");
|
||||
throw new TypeError("Illegal invocation");
|
||||
}
|
||||
return url.search;
|
||||
},
|
||||
|
@ -352,7 +352,7 @@ ObjectDefineProperties(WorkerLocation.prototype, {
|
|||
value: function toString() {
|
||||
const url = WeakMapPrototypeGet(workerLocationUrls, this);
|
||||
if (url == null) {
|
||||
throw new TypeError("Illegal invocation.");
|
||||
throw new TypeError("Illegal invocation");
|
||||
}
|
||||
return url.href;
|
||||
},
|
||||
|
@ -414,7 +414,7 @@ const locationDescriptor = {
|
|||
return location;
|
||||
},
|
||||
set() {
|
||||
throw new DOMException(`Cannot set "location".`, "NotSupportedError");
|
||||
throw new DOMException(`Cannot set "location"`, "NotSupportedError");
|
||||
},
|
||||
enumerable: true,
|
||||
};
|
||||
|
@ -422,7 +422,7 @@ const workerLocationDescriptor = {
|
|||
get() {
|
||||
if (workerLocation == null) {
|
||||
throw new Error(
|
||||
`Assertion: "globalThis.location" must be defined in a worker.`,
|
||||
`Assertion: "globalThis.location" must be defined in a worker`,
|
||||
);
|
||||
}
|
||||
return workerLocation;
|
||||
|
|
|
@ -123,14 +123,14 @@ function convertMarkToTimestamp(mark) {
|
|||
const entry = findMostRecent(mark, "mark");
|
||||
if (!entry) {
|
||||
throw new DOMException(
|
||||
`Cannot find mark: "${mark}".`,
|
||||
`Cannot find mark: "${mark}"`,
|
||||
"SyntaxError",
|
||||
);
|
||||
}
|
||||
return entry.startTime;
|
||||
}
|
||||
if (mark < 0) {
|
||||
throw new TypeError("Mark cannot be negative.");
|
||||
throw new TypeError(`Mark cannot be negative: received ${mark}`);
|
||||
}
|
||||
return mark;
|
||||
}
|
||||
|
@ -261,7 +261,9 @@ class PerformanceMark extends PerformanceEntry {
|
|||
super(name, "mark", startTime, 0, illegalConstructorKey);
|
||||
this[webidl.brand] = webidl.brand;
|
||||
if (startTime < 0) {
|
||||
throw new TypeError("startTime cannot be negative");
|
||||
throw new TypeError(
|
||||
`Cannot construct PerformanceMark: startTime cannot be negative, received ${startTime}`,
|
||||
);
|
||||
}
|
||||
this[_detail] = structuredClone(detail);
|
||||
}
|
||||
|
@ -504,14 +506,14 @@ class Performance extends EventTarget {
|
|||
ObjectKeys(startOrMeasureOptions).length > 0
|
||||
) {
|
||||
if (endMark) {
|
||||
throw new TypeError("Options cannot be passed with endMark.");
|
||||
throw new TypeError('Options cannot be passed with "endMark"');
|
||||
}
|
||||
if (
|
||||
!ReflectHas(startOrMeasureOptions, "start") &&
|
||||
!ReflectHas(startOrMeasureOptions, "end")
|
||||
) {
|
||||
throw new TypeError(
|
||||
"A start or end mark must be supplied in options.",
|
||||
'A "start" or "end" mark must be supplied in options',
|
||||
);
|
||||
}
|
||||
if (
|
||||
|
@ -520,7 +522,7 @@ class Performance extends EventTarget {
|
|||
ReflectHas(startOrMeasureOptions, "end")
|
||||
) {
|
||||
throw new TypeError(
|
||||
"Cannot specify start, end, and duration together in options.",
|
||||
'Cannot specify "start", "end", and "duration" together in options',
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -84,35 +84,35 @@ class ImageData {
|
|||
|
||||
if (dataLength === 0) {
|
||||
throw new DOMException(
|
||||
"Failed to construct 'ImageData': The input data has zero elements.",
|
||||
"Failed to construct 'ImageData': the input data has zero elements",
|
||||
"InvalidStateError",
|
||||
);
|
||||
}
|
||||
|
||||
if (dataLength % 4 !== 0) {
|
||||
throw new DOMException(
|
||||
"Failed to construct 'ImageData': The input data length is not a multiple of 4.",
|
||||
`Failed to construct 'ImageData': the input data length is not a multiple of 4, received ${dataLength}`,
|
||||
"InvalidStateError",
|
||||
);
|
||||
}
|
||||
|
||||
if (sourceWidth < 1) {
|
||||
throw new DOMException(
|
||||
"Failed to construct 'ImageData': The source width is zero or not a number.",
|
||||
"Failed to construct 'ImageData': the source width is zero or not a number",
|
||||
"IndexSizeError",
|
||||
);
|
||||
}
|
||||
|
||||
if (webidl.type(sourceHeight) !== "Undefined" && sourceHeight < 1) {
|
||||
throw new DOMException(
|
||||
"Failed to construct 'ImageData': The source height is zero or not a number.",
|
||||
"Failed to construct 'ImageData': the source height is zero or not a number",
|
||||
"IndexSizeError",
|
||||
);
|
||||
}
|
||||
|
||||
if (dataLength / 4 % sourceWidth !== 0) {
|
||||
throw new DOMException(
|
||||
"Failed to construct 'ImageData': The input data length is not a multiple of (4 * width).",
|
||||
"Failed to construct 'ImageData': the input data length is not a multiple of (4 * width)",
|
||||
"IndexSizeError",
|
||||
);
|
||||
}
|
||||
|
@ -122,7 +122,7 @@ class ImageData {
|
|||
(sourceWidth * sourceHeight * 4 !== dataLength)
|
||||
) {
|
||||
throw new DOMException(
|
||||
"Failed to construct 'ImageData': The input data length is not equal to (4 * width * height).",
|
||||
"Failed to construct 'ImageData': the input data length is not equal to (4 * width * height)",
|
||||
"IndexSizeError",
|
||||
);
|
||||
}
|
||||
|
@ -159,14 +159,14 @@ class ImageData {
|
|||
|
||||
if (sourceWidth < 1) {
|
||||
throw new DOMException(
|
||||
"Failed to construct 'ImageData': The source width is zero or not a number.",
|
||||
"Failed to construct 'ImageData': the source width is zero or not a number",
|
||||
"IndexSizeError",
|
||||
);
|
||||
}
|
||||
|
||||
if (sourceHeight < 1) {
|
||||
throw new DOMException(
|
||||
"Failed to construct 'ImageData': The source height is zero or not a number.",
|
||||
"Failed to construct 'ImageData': the source height is zero or not a number",
|
||||
"IndexSizeError",
|
||||
);
|
||||
}
|
||||
|
|
42
resolvers/npm_cache/Cargo.toml
Normal file
42
resolvers/npm_cache/Cargo.toml
Normal file
|
@ -0,0 +1,42 @@
|
|||
# Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
[package]
|
||||
name = "deno_npm_cache"
|
||||
version = "0.0.1"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
readme = "README.md"
|
||||
repository.workspace = true
|
||||
description = "Helpers for downloading and caching npm dependencies for Deno"
|
||||
|
||||
[lib]
|
||||
path = "lib.rs"
|
||||
|
||||
[dependencies]
|
||||
# todo(dsherret): remove this dependency
|
||||
anyhow.workspace = true
|
||||
# todo(dsherret): remove this dependency
|
||||
deno_core.workspace = true
|
||||
|
||||
async-trait.workspace = true
|
||||
base64.workspace = true
|
||||
boxed_error.workspace = true
|
||||
deno_cache_dir.workspace = true
|
||||
deno_npm.workspace = true
|
||||
deno_semver.workspace = true
|
||||
deno_unsync = { workspace = true, features = ["tokio"] }
|
||||
faster-hex.workspace = true
|
||||
flate2 = { workspace = true, features = ["zlib-ng-compat"] }
|
||||
futures.workspace = true
|
||||
http.workspace = true
|
||||
log.workspace = true
|
||||
parking_lot.workspace = true
|
||||
percent-encoding.workspace = true
|
||||
rand.workspace = true
|
||||
ring.workspace = true
|
||||
serde_json.workspace = true
|
||||
tar.workspace = true
|
||||
tempfile = "3.4.0"
|
||||
thiserror.workspace = true
|
||||
url.workspace = true
|
6
resolvers/npm_cache/README.md
Normal file
6
resolvers/npm_cache/README.md
Normal file
|
@ -0,0 +1,6 @@
|
|||
# deno_npm_cache
|
||||
|
||||
[![crates](https://img.shields.io/crates/v/deno_npm_cache.svg)](https://crates.io/crates/deno_npm_cache)
|
||||
[![docs](https://docs.rs/deno_npm_cache/badge.svg)](https://docs.rs/deno_npm_cache)
|
||||
|
||||
Helpers for downloading and caching npm dependencies for Deno.
|
|
@ -1,63 +1,133 @@
|
|||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::fs;
|
||||
use std::io::ErrorKind;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use deno_ast::ModuleSpecifier;
|
||||
use anyhow::bail;
|
||||
use anyhow::Context;
|
||||
use anyhow::Error as AnyError;
|
||||
use deno_cache_dir::npm::NpmCacheDir;
|
||||
use deno_core::anyhow::bail;
|
||||
use deno_core::anyhow::Context;
|
||||
use deno_core::error::AnyError;
|
||||
use deno_core::parking_lot::Mutex;
|
||||
use deno_core::serde_json;
|
||||
use deno_core::url::Url;
|
||||
use deno_npm::npm_rc::ResolvedNpmRc;
|
||||
use deno_npm::registry::NpmPackageInfo;
|
||||
use deno_npm::NpmPackageCacheFolderId;
|
||||
use deno_semver::package::PackageNv;
|
||||
use deno_semver::Version;
|
||||
use http::HeaderName;
|
||||
use http::HeaderValue;
|
||||
use http::StatusCode;
|
||||
use parking_lot::Mutex;
|
||||
use url::Url;
|
||||
|
||||
use crate::args::CacheSetting;
|
||||
use crate::cache::CACHE_PERM;
|
||||
use crate::util::fs::atomic_write_file_with_retries;
|
||||
use crate::util::fs::hard_link_dir_recursive;
|
||||
|
||||
pub mod registry_info;
|
||||
mod registry_info;
|
||||
mod remote;
|
||||
mod tarball;
|
||||
mod tarball_extract;
|
||||
|
||||
pub use registry_info::RegistryInfoDownloader;
|
||||
pub use registry_info::RegistryInfoProvider;
|
||||
pub use tarball::TarballCache;
|
||||
|
||||
// todo(#27198): make both of these private and get the rest of the code
|
||||
// using RegistryInfoProvider.
|
||||
pub use registry_info::get_package_url;
|
||||
pub use remote::maybe_auth_header_for_npm_registry;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct DownloadError {
|
||||
pub status_code: Option<StatusCode>,
|
||||
pub error: AnyError,
|
||||
}
|
||||
|
||||
impl std::error::Error for DownloadError {
|
||||
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
||||
self.error.source()
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for DownloadError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
self.error.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait(?Send)]
|
||||
pub trait NpmCacheEnv: Send + Sync + 'static {
|
||||
fn exists(&self, path: &Path) -> bool;
|
||||
fn hard_link_dir_recursive(
|
||||
&self,
|
||||
from: &Path,
|
||||
to: &Path,
|
||||
) -> Result<(), AnyError>;
|
||||
fn atomic_write_file_with_retries(
|
||||
&self,
|
||||
file_path: &Path,
|
||||
data: &[u8],
|
||||
) -> std::io::Result<()>;
|
||||
async fn download_with_retries_on_any_tokio_runtime(
|
||||
&self,
|
||||
url: Url,
|
||||
maybe_auth_header: Option<(HeaderName, HeaderValue)>,
|
||||
) -> Result<Option<Vec<u8>>, DownloadError>;
|
||||
}
|
||||
|
||||
/// Indicates how cached source files should be handled.
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub enum NpmCacheSetting {
|
||||
/// Only the cached files should be used. Any files not in the cache will
|
||||
/// error. This is the equivalent of `--cached-only` in the CLI.
|
||||
Only,
|
||||
/// No cached source files should be used, and all files should be reloaded.
|
||||
/// This is the equivalent of `--reload` in the CLI.
|
||||
ReloadAll,
|
||||
/// Only some cached resources should be used. This is the equivalent of
|
||||
/// `--reload=npm:chalk`
|
||||
ReloadSome { npm_package_names: Vec<String> },
|
||||
/// The cached source files should be used for local modules. This is the
|
||||
/// default behavior of the CLI.
|
||||
Use,
|
||||
}
|
||||
|
||||
impl NpmCacheSetting {
|
||||
pub fn should_use_for_npm_package(&self, package_name: &str) -> bool {
|
||||
match self {
|
||||
NpmCacheSetting::ReloadAll => false,
|
||||
NpmCacheSetting::ReloadSome { npm_package_names } => {
|
||||
!npm_package_names.iter().any(|n| n == package_name)
|
||||
}
|
||||
_ => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores a single copy of npm packages in a cache.
|
||||
#[derive(Debug)]
|
||||
pub struct NpmCache {
|
||||
pub struct NpmCache<TEnv: NpmCacheEnv> {
|
||||
env: Arc<TEnv>,
|
||||
cache_dir: Arc<NpmCacheDir>,
|
||||
cache_setting: CacheSetting,
|
||||
cache_setting: NpmCacheSetting,
|
||||
npmrc: Arc<ResolvedNpmRc>,
|
||||
/// ensures a package is only downloaded once per run
|
||||
previously_reloaded_packages: Mutex<HashSet<PackageNv>>,
|
||||
}
|
||||
|
||||
impl NpmCache {
|
||||
impl<TEnv: NpmCacheEnv> NpmCache<TEnv> {
|
||||
pub fn new(
|
||||
cache_dir: Arc<NpmCacheDir>,
|
||||
cache_setting: CacheSetting,
|
||||
cache_setting: NpmCacheSetting,
|
||||
env: Arc<TEnv>,
|
||||
npmrc: Arc<ResolvedNpmRc>,
|
||||
) -> Self {
|
||||
Self {
|
||||
cache_dir,
|
||||
cache_setting,
|
||||
env,
|
||||
previously_reloaded_packages: Default::default(),
|
||||
npmrc,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cache_setting(&self) -> &CacheSetting {
|
||||
pub fn cache_setting(&self) -> &NpmCacheSetting {
|
||||
&self.cache_setting
|
||||
}
|
||||
|
||||
|
@ -118,7 +188,9 @@ impl NpmCache {
|
|||
// it seems Windows does an "AccessDenied" error when moving a
|
||||
// directory with hard links, so that's why this solution is done
|
||||
with_folder_sync_lock(&folder_id.nv, &package_folder, || {
|
||||
hard_link_dir_recursive(&original_package_folder, &package_folder)
|
||||
self
|
||||
.env
|
||||
.hard_link_dir_recursive(&original_package_folder, &package_folder)
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -158,7 +230,7 @@ impl NpmCache {
|
|||
|
||||
pub fn resolve_package_folder_id_from_specifier(
|
||||
&self,
|
||||
specifier: &ModuleSpecifier,
|
||||
specifier: &Url,
|
||||
) -> Option<NpmPackageCacheFolderId> {
|
||||
self
|
||||
.cache_dir
|
||||
|
@ -180,7 +252,7 @@ impl NpmCache {
|
|||
) -> Result<Option<NpmPackageInfo>, AnyError> {
|
||||
let file_cache_path = self.get_registry_package_info_file_cache_path(name);
|
||||
|
||||
let file_text = match fs::read_to_string(file_cache_path) {
|
||||
let file_text = match std::fs::read_to_string(file_cache_path) {
|
||||
Ok(file_text) => file_text,
|
||||
Err(err) if err.kind() == ErrorKind::NotFound => return Ok(None),
|
||||
Err(err) => return Err(err.into()),
|
||||
|
@ -195,7 +267,9 @@ impl NpmCache {
|
|||
) -> Result<(), AnyError> {
|
||||
let file_cache_path = self.get_registry_package_info_file_cache_path(name);
|
||||
let file_text = serde_json::to_string(&package_info)?;
|
||||
atomic_write_file_with_retries(&file_cache_path, file_text, CACHE_PERM)?;
|
||||
self
|
||||
.env
|
||||
.atomic_write_file_with_retries(&file_cache_path, file_text.as_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -216,7 +290,7 @@ fn with_folder_sync_lock(
|
|||
output_folder: &Path,
|
||||
action: impl FnOnce() -> Result<(), AnyError>,
|
||||
) -> Result<(), AnyError> {
|
||||
fs::create_dir_all(output_folder).with_context(|| {
|
||||
std::fs::create_dir_all(output_folder).with_context(|| {
|
||||
format!("Error creating '{}'.", output_folder.display())
|
||||
})?;
|
||||
|
||||
|
@ -229,7 +303,7 @@ fn with_folder_sync_lock(
|
|||
// then wait until the other process finishes with a timeout), but
|
||||
// for now this is good enough.
|
||||
let sync_lock_path = output_folder.join(NPM_PACKAGE_SYNC_LOCK_FILENAME);
|
||||
match fs::OpenOptions::new()
|
||||
match std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.truncate(false)
|
||||
|
@ -257,7 +331,7 @@ fn with_folder_sync_lock(
|
|||
match inner(output_folder, action) {
|
||||
Ok(()) => Ok(()),
|
||||
Err(err) => {
|
||||
if let Err(remove_err) = fs::remove_dir_all(output_folder) {
|
||||
if let Err(remove_err) = std::fs::remove_dir_all(output_folder) {
|
||||
if remove_err.kind() != std::io::ErrorKind::NotFound {
|
||||
bail!(
|
||||
concat!(
|
|
@ -1,30 +1,29 @@
|
|||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use deno_core::anyhow::anyhow;
|
||||
use deno_core::anyhow::bail;
|
||||
use deno_core::anyhow::Context;
|
||||
use deno_core::error::custom_error;
|
||||
use deno_core::error::AnyError;
|
||||
use deno_core::futures::future::LocalBoxFuture;
|
||||
use deno_core::futures::FutureExt;
|
||||
use deno_core::parking_lot::Mutex;
|
||||
use deno_core::serde_json;
|
||||
use deno_core::url::Url;
|
||||
use anyhow::anyhow;
|
||||
use anyhow::bail;
|
||||
use anyhow::Context;
|
||||
use anyhow::Error as AnyError;
|
||||
use async_trait::async_trait;
|
||||
use deno_npm::npm_rc::ResolvedNpmRc;
|
||||
use deno_npm::registry::NpmPackageInfo;
|
||||
use deno_npm::registry::NpmRegistryApi;
|
||||
use deno_npm::registry::NpmRegistryPackageInfoLoadError;
|
||||
use deno_unsync::sync::AtomicFlag;
|
||||
use deno_unsync::sync::MultiRuntimeAsyncValueCreator;
|
||||
use futures::future::LocalBoxFuture;
|
||||
use futures::FutureExt;
|
||||
use parking_lot::Mutex;
|
||||
use url::Url;
|
||||
|
||||
use crate::args::CacheSetting;
|
||||
use crate::http_util::HttpClientProvider;
|
||||
use crate::npm::common::maybe_auth_header_for_npm_registry;
|
||||
use crate::util::progress_bar::ProgressBar;
|
||||
use crate::util::sync::MultiRuntimeAsyncValueCreator;
|
||||
|
||||
use super::NpmCache;
|
||||
|
||||
// todo(dsherret): create seams and unit test this
|
||||
use crate::remote::maybe_auth_header_for_npm_registry;
|
||||
use crate::NpmCache;
|
||||
use crate::NpmCacheEnv;
|
||||
use crate::NpmCacheSetting;
|
||||
|
||||
type LoadResult = Result<FutureResult, Arc<AnyError>>;
|
||||
type LoadFuture = LocalBoxFuture<'static, LoadResult>;
|
||||
|
@ -49,41 +48,128 @@ enum MemoryCacheItem {
|
|||
MemoryCached(Result<Option<Arc<NpmPackageInfo>>, Arc<AnyError>>),
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct MemoryCache {
|
||||
clear_id: usize,
|
||||
items: HashMap<String, MemoryCacheItem>,
|
||||
}
|
||||
|
||||
impl MemoryCache {
|
||||
#[inline(always)]
|
||||
pub fn clear(&mut self) {
|
||||
self.clear_id += 1;
|
||||
self.items.clear();
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn get(&self, key: &str) -> Option<&MemoryCacheItem> {
|
||||
self.items.get(key)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn insert(&mut self, key: String, value: MemoryCacheItem) {
|
||||
self.items.insert(key, value);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn try_insert(
|
||||
&mut self,
|
||||
clear_id: usize,
|
||||
key: &str,
|
||||
value: MemoryCacheItem,
|
||||
) -> bool {
|
||||
if clear_id != self.clear_id {
|
||||
return false;
|
||||
}
|
||||
// if the clear_id is the same then the item should exist
|
||||
debug_assert!(self.items.contains_key(key));
|
||||
if let Some(item) = self.items.get_mut(key) {
|
||||
*item = value;
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
// todo(#27198): refactor to store this only in the http cache
|
||||
|
||||
/// Downloads packuments from the npm registry.
|
||||
///
|
||||
/// This is shared amongst all the workers.
|
||||
#[derive(Debug)]
|
||||
pub struct RegistryInfoDownloader {
|
||||
cache: Arc<NpmCache>,
|
||||
http_client_provider: Arc<HttpClientProvider>,
|
||||
pub struct RegistryInfoProvider<TEnv: NpmCacheEnv> {
|
||||
// todo(#27198): remove this
|
||||
cache: Arc<NpmCache<TEnv>>,
|
||||
env: Arc<TEnv>,
|
||||
npmrc: Arc<ResolvedNpmRc>,
|
||||
progress_bar: ProgressBar,
|
||||
memory_cache: Mutex<HashMap<String, MemoryCacheItem>>,
|
||||
force_reload_flag: AtomicFlag,
|
||||
memory_cache: Mutex<MemoryCache>,
|
||||
previously_loaded_packages: Mutex<HashSet<String>>,
|
||||
}
|
||||
|
||||
impl RegistryInfoDownloader {
|
||||
impl<TEnv: NpmCacheEnv> RegistryInfoProvider<TEnv> {
|
||||
pub fn new(
|
||||
cache: Arc<NpmCache>,
|
||||
http_client_provider: Arc<HttpClientProvider>,
|
||||
cache: Arc<NpmCache<TEnv>>,
|
||||
env: Arc<TEnv>,
|
||||
npmrc: Arc<ResolvedNpmRc>,
|
||||
progress_bar: ProgressBar,
|
||||
) -> Self {
|
||||
Self {
|
||||
cache,
|
||||
http_client_provider,
|
||||
env,
|
||||
npmrc,
|
||||
progress_bar,
|
||||
force_reload_flag: AtomicFlag::lowered(),
|
||||
memory_cache: Default::default(),
|
||||
previously_loaded_packages: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn load_package_info(
|
||||
/// Clears the internal memory cache.
|
||||
pub fn clear_memory_cache(&self) {
|
||||
self.memory_cache.lock().clear();
|
||||
}
|
||||
|
||||
fn mark_force_reload(&self) -> bool {
|
||||
// never force reload the registry information if reloading
|
||||
// is disabled or if we're already reloading
|
||||
if matches!(
|
||||
self.cache.cache_setting(),
|
||||
NpmCacheSetting::Only | NpmCacheSetting::ReloadAll
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
if self.force_reload_flag.raise() {
|
||||
self.clear_memory_cache();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_npm_registry_api(self: &Arc<Self>) -> NpmRegistryApiAdapter<TEnv> {
|
||||
NpmRegistryApiAdapter(self.clone())
|
||||
}
|
||||
|
||||
pub async fn package_info(
|
||||
self: &Arc<Self>,
|
||||
name: &str,
|
||||
) -> Result<Arc<NpmPackageInfo>, NpmRegistryPackageInfoLoadError> {
|
||||
match self.maybe_package_info(name).await {
|
||||
Ok(Some(info)) => Ok(info),
|
||||
Ok(None) => Err(NpmRegistryPackageInfoLoadError::PackageNotExists {
|
||||
package_name: name.to_string(),
|
||||
}),
|
||||
Err(err) => {
|
||||
Err(NpmRegistryPackageInfoLoadError::LoadError(Arc::new(err)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn maybe_package_info(
|
||||
self: &Arc<Self>,
|
||||
name: &str,
|
||||
) -> Result<Option<Arc<NpmPackageInfo>>, AnyError> {
|
||||
self.load_package_info_inner(name).await.with_context(|| {
|
||||
format!(
|
||||
"Error getting response at {} for package \"{}\"",
|
||||
"Failed loading {} for package \"{}\"",
|
||||
get_package_url(&self.npmrc, name),
|
||||
name
|
||||
)
|
||||
|
@ -94,18 +180,9 @@ impl RegistryInfoDownloader {
|
|||
self: &Arc<Self>,
|
||||
name: &str,
|
||||
) -> Result<Option<Arc<NpmPackageInfo>>, AnyError> {
|
||||
if *self.cache.cache_setting() == CacheSetting::Only {
|
||||
return Err(custom_error(
|
||||
"NotCached",
|
||||
format!(
|
||||
"An npm specifier not found in cache: \"{name}\", --cached-only is specified."
|
||||
)
|
||||
));
|
||||
}
|
||||
|
||||
let cache_item = {
|
||||
let (cache_item, clear_id) = {
|
||||
let mut mem_cache = self.memory_cache.lock();
|
||||
if let Some(cache_item) = mem_cache.get(name) {
|
||||
let cache_item = if let Some(cache_item) = mem_cache.get(name) {
|
||||
cache_item.clone()
|
||||
} else {
|
||||
let value_creator = MultiRuntimeAsyncValueCreator::new({
|
||||
|
@ -116,7 +193,8 @@ impl RegistryInfoDownloader {
|
|||
let cache_item = MemoryCacheItem::Pending(Arc::new(value_creator));
|
||||
mem_cache.insert(name.to_string(), cache_item.clone());
|
||||
cache_item
|
||||
}
|
||||
};
|
||||
(cache_item, mem_cache.clear_id)
|
||||
};
|
||||
|
||||
match cache_item {
|
||||
|
@ -135,25 +213,37 @@ impl RegistryInfoDownloader {
|
|||
Ok(FutureResult::SavedFsCache(info)) => {
|
||||
// return back the future and mark this package as having
|
||||
// been saved in the cache for next time it's requested
|
||||
*self.memory_cache.lock().get_mut(name).unwrap() =
|
||||
MemoryCacheItem::FsCached;
|
||||
self.memory_cache.lock().try_insert(
|
||||
clear_id,
|
||||
name,
|
||||
MemoryCacheItem::FsCached,
|
||||
);
|
||||
Ok(Some(info))
|
||||
}
|
||||
Ok(FutureResult::ErroredFsCache(info)) => {
|
||||
// since saving to the fs cache failed, keep the package information in memory
|
||||
*self.memory_cache.lock().get_mut(name).unwrap() =
|
||||
MemoryCacheItem::MemoryCached(Ok(Some(info.clone())));
|
||||
self.memory_cache.lock().try_insert(
|
||||
clear_id,
|
||||
name,
|
||||
MemoryCacheItem::MemoryCached(Ok(Some(info.clone()))),
|
||||
);
|
||||
Ok(Some(info))
|
||||
}
|
||||
Ok(FutureResult::PackageNotExists) => {
|
||||
*self.memory_cache.lock().get_mut(name).unwrap() =
|
||||
MemoryCacheItem::MemoryCached(Ok(None));
|
||||
self.memory_cache.lock().try_insert(
|
||||
clear_id,
|
||||
name,
|
||||
MemoryCacheItem::MemoryCached(Ok(None)),
|
||||
);
|
||||
Ok(None)
|
||||
}
|
||||
Err(err) => {
|
||||
let return_err = anyhow!("{}", err);
|
||||
*self.memory_cache.lock().get_mut(name).unwrap() =
|
||||
MemoryCacheItem::MemoryCached(Err(err));
|
||||
let return_err = anyhow!("{:#}", err);
|
||||
self.memory_cache.lock().try_insert(
|
||||
clear_id,
|
||||
name,
|
||||
MemoryCacheItem::MemoryCached(Err(err)),
|
||||
);
|
||||
Err(return_err)
|
||||
}
|
||||
}
|
||||
|
@ -167,7 +257,7 @@ impl RegistryInfoDownloader {
|
|||
) -> Result<NpmPackageInfo, AnyError> {
|
||||
// this scenario failing should be exceptionally rare so let's
|
||||
// deal with improving it only when anyone runs into an issue
|
||||
let maybe_package_info = deno_core::unsync::spawn_blocking({
|
||||
let maybe_package_info = deno_unsync::spawn_blocking({
|
||||
let cache = self.cache.clone();
|
||||
let name = name.to_string();
|
||||
move || cache.load_package_info(&name)
|
||||
|
@ -199,20 +289,41 @@ impl RegistryInfoDownloader {
|
|||
return std::future::ready(Err(Arc::new(err))).boxed_local()
|
||||
}
|
||||
};
|
||||
let guard = self.progress_bar.update(package_url.as_str());
|
||||
let name = name.to_string();
|
||||
async move {
|
||||
let client = downloader.http_client_provider.get_or_create()?;
|
||||
let maybe_bytes = client
|
||||
.download_with_progress_and_retries(
|
||||
if (downloader.cache.cache_setting().should_use_for_npm_package(&name) && !downloader.force_reload_flag.is_raised())
|
||||
// if this has been previously reloaded, then try loading from the
|
||||
// file system cache
|
||||
|| downloader.previously_loaded_packages.lock().contains(&name)
|
||||
{
|
||||
// attempt to load from the file cache
|
||||
if let Some(info) = downloader.cache.load_package_info(&name)? {
|
||||
let result = Arc::new(info);
|
||||
return Ok(FutureResult::SavedFsCache(result));
|
||||
}
|
||||
}
|
||||
|
||||
if *downloader.cache.cache_setting() == NpmCacheSetting::Only {
|
||||
return Err(deno_core::error::custom_error(
|
||||
"NotCached",
|
||||
format!(
|
||||
"npm package not found in cache: \"{name}\", --cached-only is specified."
|
||||
)
|
||||
));
|
||||
}
|
||||
|
||||
downloader.previously_loaded_packages.lock().insert(name.to_string());
|
||||
|
||||
let maybe_bytes = downloader
|
||||
.env
|
||||
.download_with_retries_on_any_tokio_runtime(
|
||||
package_url,
|
||||
maybe_auth_header,
|
||||
&guard,
|
||||
)
|
||||
.await?;
|
||||
match maybe_bytes {
|
||||
Some(bytes) => {
|
||||
let future_result = deno_core::unsync::spawn_blocking(
|
||||
let future_result = deno_unsync::spawn_blocking(
|
||||
move || -> Result<FutureResult, AnyError> {
|
||||
let package_info = serde_json::from_slice(&bytes)?;
|
||||
match downloader.cache.save_package_info(&name, &package_info) {
|
||||
|
@ -241,6 +352,26 @@ impl RegistryInfoDownloader {
|
|||
}
|
||||
}
|
||||
|
||||
pub struct NpmRegistryApiAdapter<TEnv: NpmCacheEnv>(
|
||||
Arc<RegistryInfoProvider<TEnv>>,
|
||||
);
|
||||
|
||||
#[async_trait(?Send)]
|
||||
impl<TEnv: NpmCacheEnv> NpmRegistryApi for NpmRegistryApiAdapter<TEnv> {
|
||||
async fn package_info(
|
||||
&self,
|
||||
name: &str,
|
||||
) -> Result<Arc<NpmPackageInfo>, NpmRegistryPackageInfoLoadError> {
|
||||
self.0.package_info(name).await
|
||||
}
|
||||
|
||||
fn mark_force_reload(&self) -> bool {
|
||||
self.0.mark_force_reload()
|
||||
}
|
||||
}
|
||||
|
||||
// todo(#27198): make this private and only use RegistryInfoProvider in the rest of
|
||||
// the code
|
||||
pub fn get_package_url(npmrc: &ResolvedNpmRc, name: &str) -> Url {
|
||||
let registry_url = npmrc.get_registry_url(name);
|
||||
// The '/' character in scoped package names "@scope/name" must be
|
|
@ -1,10 +1,10 @@
|
|||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use anyhow::bail;
|
||||
use anyhow::Context;
|
||||
use anyhow::Error as AnyError;
|
||||
use base64::prelude::BASE64_STANDARD;
|
||||
use base64::Engine;
|
||||
use deno_core::anyhow::bail;
|
||||
use deno_core::anyhow::Context;
|
||||
use deno_core::error::AnyError;
|
||||
use deno_npm::npm_rc::RegistryConfig;
|
||||
use http::header;
|
||||
|
|
@ -3,33 +3,26 @@
|
|||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use deno_core::anyhow::anyhow;
|
||||
use deno_core::anyhow::bail;
|
||||
use deno_core::anyhow::Context;
|
||||
use deno_core::error::custom_error;
|
||||
use deno_core::error::AnyError;
|
||||
use deno_core::futures::future::LocalBoxFuture;
|
||||
use deno_core::futures::FutureExt;
|
||||
use deno_core::parking_lot::Mutex;
|
||||
use deno_core::url::Url;
|
||||
use anyhow::anyhow;
|
||||
use anyhow::bail;
|
||||
use anyhow::Context;
|
||||
use anyhow::Error as AnyError;
|
||||
use deno_npm::npm_rc::ResolvedNpmRc;
|
||||
use deno_npm::registry::NpmPackageVersionDistInfo;
|
||||
use deno_runtime::deno_fs::FileSystem;
|
||||
use deno_semver::package::PackageNv;
|
||||
use deno_unsync::sync::MultiRuntimeAsyncValueCreator;
|
||||
use futures::future::LocalBoxFuture;
|
||||
use futures::FutureExt;
|
||||
use http::StatusCode;
|
||||
use parking_lot::Mutex;
|
||||
use url::Url;
|
||||
|
||||
use crate::args::CacheSetting;
|
||||
use crate::http_util::DownloadError;
|
||||
use crate::http_util::HttpClientProvider;
|
||||
use crate::npm::common::maybe_auth_header_for_npm_registry;
|
||||
use crate::util::progress_bar::ProgressBar;
|
||||
use crate::util::sync::MultiRuntimeAsyncValueCreator;
|
||||
|
||||
use super::tarball_extract::verify_and_extract_tarball;
|
||||
use super::tarball_extract::TarballExtractionMode;
|
||||
use super::NpmCache;
|
||||
|
||||
// todo(dsherret): create seams and unit test this
|
||||
use crate::remote::maybe_auth_header_for_npm_registry;
|
||||
use crate::tarball_extract::verify_and_extract_tarball;
|
||||
use crate::tarball_extract::TarballExtractionMode;
|
||||
use crate::NpmCache;
|
||||
use crate::NpmCacheEnv;
|
||||
use crate::NpmCacheSetting;
|
||||
|
||||
type LoadResult = Result<(), Arc<AnyError>>;
|
||||
type LoadFuture = LocalBoxFuture<'static, LoadResult>;
|
||||
|
@ -49,42 +42,36 @@ enum MemoryCacheItem {
|
|||
///
|
||||
/// This is shared amongst all the workers.
|
||||
#[derive(Debug)]
|
||||
pub struct TarballCache {
|
||||
cache: Arc<NpmCache>,
|
||||
fs: Arc<dyn FileSystem>,
|
||||
http_client_provider: Arc<HttpClientProvider>,
|
||||
pub struct TarballCache<TEnv: NpmCacheEnv> {
|
||||
cache: Arc<NpmCache<TEnv>>,
|
||||
env: Arc<TEnv>,
|
||||
npmrc: Arc<ResolvedNpmRc>,
|
||||
progress_bar: ProgressBar,
|
||||
memory_cache: Mutex<HashMap<PackageNv, MemoryCacheItem>>,
|
||||
}
|
||||
|
||||
impl TarballCache {
|
||||
impl<TEnv: NpmCacheEnv> TarballCache<TEnv> {
|
||||
pub fn new(
|
||||
cache: Arc<NpmCache>,
|
||||
fs: Arc<dyn FileSystem>,
|
||||
http_client_provider: Arc<HttpClientProvider>,
|
||||
cache: Arc<NpmCache<TEnv>>,
|
||||
env: Arc<TEnv>,
|
||||
npmrc: Arc<ResolvedNpmRc>,
|
||||
progress_bar: ProgressBar,
|
||||
) -> Self {
|
||||
Self {
|
||||
cache,
|
||||
fs,
|
||||
http_client_provider,
|
||||
env,
|
||||
npmrc,
|
||||
progress_bar,
|
||||
memory_cache: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn ensure_package(
|
||||
self: &Arc<Self>,
|
||||
package: &PackageNv,
|
||||
package_nv: &PackageNv,
|
||||
dist: &NpmPackageVersionDistInfo,
|
||||
) -> Result<(), AnyError> {
|
||||
self
|
||||
.ensure_package_inner(package, dist)
|
||||
.ensure_package_inner(package_nv, dist)
|
||||
.await
|
||||
.with_context(|| format!("Failed caching npm package '{}'.", package))
|
||||
.with_context(|| format!("Failed caching npm package '{}'.", package_nv))
|
||||
}
|
||||
|
||||
async fn ensure_package_inner(
|
||||
|
@ -113,7 +100,7 @@ impl TarballCache {
|
|||
|
||||
match cache_item {
|
||||
MemoryCacheItem::Cached => Ok(()),
|
||||
MemoryCacheItem::Errored(err) => Err(anyhow!("{}", err)),
|
||||
MemoryCacheItem::Errored(err) => Err(anyhow!("{:#}", err)),
|
||||
MemoryCacheItem::Pending(creator) => {
|
||||
let result = creator.get().await;
|
||||
match result {
|
||||
|
@ -123,7 +110,7 @@ impl TarballCache {
|
|||
Ok(())
|
||||
}
|
||||
Err(err) => {
|
||||
let result_err = anyhow!("{}", err);
|
||||
let result_err = anyhow!("{:#}", err);
|
||||
*self.memory_cache.lock().get_mut(package_nv).unwrap() =
|
||||
MemoryCacheItem::Errored(err);
|
||||
Err(result_err)
|
||||
|
@ -144,14 +131,14 @@ impl TarballCache {
|
|||
let package_folder =
|
||||
tarball_cache.cache.package_folder_for_nv_and_url(&package_nv, registry_url);
|
||||
let should_use_cache = tarball_cache.cache.should_use_cache_for_package(&package_nv);
|
||||
let package_folder_exists = tarball_cache.fs.exists_sync(&package_folder);
|
||||
let package_folder_exists = tarball_cache.env.exists(&package_folder);
|
||||
if should_use_cache && package_folder_exists {
|
||||
return Ok(());
|
||||
} else if tarball_cache.cache.cache_setting() == &CacheSetting::Only {
|
||||
return Err(custom_error(
|
||||
} else if tarball_cache.cache.cache_setting() == &NpmCacheSetting::Only {
|
||||
return Err(deno_core::error::custom_error(
|
||||
"NotCached",
|
||||
format!(
|
||||
"An npm specifier not found in cache: \"{}\", --cached-only is specified.",
|
||||
"npm package not found in cache: \"{}\", --cached-only is specified.",
|
||||
&package_nv.name
|
||||
)
|
||||
)
|
||||
|
@ -169,15 +156,13 @@ impl TarballCache {
|
|||
tarball_cache.npmrc.tarball_config(&tarball_uri);
|
||||
let maybe_auth_header = maybe_registry_config.and_then(|c| maybe_auth_header_for_npm_registry(c).ok()?);
|
||||
|
||||
let guard = tarball_cache.progress_bar.update(&dist.tarball);
|
||||
let result = tarball_cache.http_client_provider
|
||||
.get_or_create()?
|
||||
.download_with_progress_and_retries(tarball_uri, maybe_auth_header, &guard)
|
||||
let result = tarball_cache.env
|
||||
.download_with_retries_on_any_tokio_runtime(tarball_uri, maybe_auth_header)
|
||||
.await;
|
||||
let maybe_bytes = match result {
|
||||
Ok(maybe_bytes) => maybe_bytes,
|
||||
Err(DownloadError::BadResponse(err)) => {
|
||||
if err.status_code == StatusCode::UNAUTHORIZED
|
||||
Err(err) => {
|
||||
if err.status_code == Some(StatusCode::UNAUTHORIZED)
|
||||
&& maybe_registry_config.is_none()
|
||||
&& tarball_cache.npmrc.get_registry_config(&package_nv.name).auth_token.is_some()
|
||||
{
|
||||
|
@ -194,7 +179,6 @@ impl TarballCache {
|
|||
}
|
||||
return Err(err.into())
|
||||
},
|
||||
Err(err) => return Err(err.into()),
|
||||
};
|
||||
match maybe_bytes {
|
||||
Some(bytes) => {
|
||||
|
@ -213,7 +197,7 @@ impl TarballCache {
|
|||
};
|
||||
let dist = dist.clone();
|
||||
let package_nv = package_nv.clone();
|
||||
deno_core::unsync::spawn_blocking(move || {
|
||||
deno_unsync::spawn_blocking(move || {
|
||||
verify_and_extract_tarball(
|
||||
&package_nv,
|
||||
&bytes,
|
|
@ -1,16 +1,17 @@
|
|||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::collections::HashSet;
|
||||
use std::fs;
|
||||
use std::io::ErrorKind;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::bail;
|
||||
use anyhow::Context;
|
||||
use anyhow::Error as AnyError;
|
||||
use base64::prelude::BASE64_STANDARD;
|
||||
use base64::Engine;
|
||||
use deno_core::anyhow::bail;
|
||||
use deno_core::anyhow::Context;
|
||||
use deno_core::error::AnyError;
|
||||
use deno_npm::registry::NpmPackageVersionDistInfo;
|
||||
use deno_npm::registry::NpmPackageVersionDistInfoIntegrity;
|
||||
use deno_semver::package::PackageNv;
|
||||
|
@ -18,8 +19,6 @@ use flate2::read::GzDecoder;
|
|||
use tar::Archive;
|
||||
use tar::EntryType;
|
||||
|
||||
use crate::util::path::get_atomic_dir_path;
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub enum TarballExtractionMode {
|
||||
/// Overwrites the destination directory without deleting any files.
|
||||
|
@ -206,10 +205,30 @@ fn extract_tarball(data: &[u8], output_folder: &Path) -> Result<(), AnyError> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn get_atomic_dir_path(file_path: &Path) -> PathBuf {
|
||||
let rand = gen_rand_path_component();
|
||||
let new_file_name = format!(
|
||||
".{}_{}",
|
||||
file_path
|
||||
.file_name()
|
||||
.map(|f| f.to_string_lossy())
|
||||
.unwrap_or(Cow::Borrowed("")),
|
||||
rand
|
||||
);
|
||||
file_path.with_file_name(new_file_name)
|
||||
}
|
||||
|
||||
fn gen_rand_path_component() -> String {
|
||||
(0..4).fold(String::new(), |mut output, _| {
|
||||
output.push_str(&format!("{:02x}", rand::random::<u8>()));
|
||||
output
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use deno_semver::Version;
|
||||
use test_util::TempDir;
|
||||
use tempfile::TempDir;
|
||||
|
||||
use super::*;
|
||||
|
||||
|
@ -303,21 +322,21 @@ mod test {
|
|||
|
||||
#[test]
|
||||
fn rename_with_retries_succeeds_exists() {
|
||||
let temp_dir = TempDir::new();
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let folder_1 = temp_dir.path().join("folder_1");
|
||||
let folder_2 = temp_dir.path().join("folder_2");
|
||||
|
||||
folder_1.create_dir_all();
|
||||
folder_1.join("a.txt").write("test");
|
||||
folder_2.create_dir_all();
|
||||
std::fs::create_dir_all(&folder_1).unwrap();
|
||||
std::fs::write(folder_1.join("a.txt"), "test").unwrap();
|
||||
std::fs::create_dir_all(&folder_2).unwrap();
|
||||
// this will not end up in the output as rename_with_retries assumes
|
||||
// the folders ending up at the destination are the same
|
||||
folder_2.join("b.txt").write("test2");
|
||||
std::fs::write(folder_2.join("b.txt"), "test2").unwrap();
|
||||
|
||||
let dest_folder = temp_dir.path().join("dest_folder");
|
||||
|
||||
rename_with_retries(folder_1.as_path(), dest_folder.as_path()).unwrap();
|
||||
rename_with_retries(folder_2.as_path(), dest_folder.as_path()).unwrap();
|
||||
rename_with_retries(folder_1.as_path(), &dest_folder).unwrap();
|
||||
rename_with_retries(folder_2.as_path(), &dest_folder).unwrap();
|
||||
assert!(dest_folder.join("a.txt").exists());
|
||||
assert!(!dest_folder.join("b.txt").exists());
|
||||
}
|
9
resolvers/npm_cache/todo.md
Normal file
9
resolvers/npm_cache/todo.md
Normal file
|
@ -0,0 +1,9 @@
|
|||
This crate is a work in progress:
|
||||
|
||||
1. Remove `deno_core` dependency.
|
||||
1. Remove `anyhow` dependency.
|
||||
1. Add a clippy.toml file that bans accessing the file system directory and
|
||||
instead does it through a trait.
|
||||
1. Make this crate work in Wasm.
|
||||
1. Refactor to store npm packument in a single place:
|
||||
https://github.com/denoland/deno/issues/27198
|
|
@ -6009,7 +6009,7 @@ fn lsp_code_actions_deno_cache_npm() {
|
|||
"severity": 1,
|
||||
"code": "not-installed-npm",
|
||||
"source": "deno",
|
||||
"message": "NPM package \"chalk\" is not installed or doesn't exist.",
|
||||
"message": "npm package \"chalk\" is not installed or doesn't exist.",
|
||||
"data": { "specifier": "npm:chalk" }
|
||||
}],
|
||||
"version": 1
|
||||
|
@ -6036,7 +6036,7 @@ fn lsp_code_actions_deno_cache_npm() {
|
|||
"severity": 1,
|
||||
"code": "not-installed-npm",
|
||||
"source": "deno",
|
||||
"message": "NPM package \"chalk\" is not installed or doesn't exist.",
|
||||
"message": "npm package \"chalk\" is not installed or doesn't exist.",
|
||||
"data": { "specifier": "npm:chalk" }
|
||||
}],
|
||||
"only": ["quickfix"]
|
||||
|
@ -6056,7 +6056,7 @@ fn lsp_code_actions_deno_cache_npm() {
|
|||
"severity": 1,
|
||||
"code": "not-installed-npm",
|
||||
"source": "deno",
|
||||
"message": "NPM package \"chalk\" is not installed or doesn't exist.",
|
||||
"message": "npm package \"chalk\" is not installed or doesn't exist.",
|
||||
"data": { "specifier": "npm:chalk" }
|
||||
}],
|
||||
"command": {
|
||||
|
@ -6111,7 +6111,7 @@ fn lsp_code_actions_deno_cache_all() {
|
|||
"severity": 1,
|
||||
"code": "not-installed-npm",
|
||||
"source": "deno",
|
||||
"message": "NPM package \"chalk\" is not installed or doesn't exist.",
|
||||
"message": "npm package \"chalk\" is not installed or doesn't exist.",
|
||||
"data": { "specifier": "npm:chalk" },
|
||||
},
|
||||
],
|
||||
|
@ -6199,7 +6199,7 @@ fn lsp_code_actions_deno_cache_all() {
|
|||
"severity": 1,
|
||||
"code": "not-installed-npm",
|
||||
"source": "deno",
|
||||
"message": "NPM package \"chalk\" is not installed or doesn't exist.",
|
||||
"message": "npm package \"chalk\" is not installed or doesn't exist.",
|
||||
"data": { "specifier": "npm:chalk" },
|
||||
},
|
||||
],
|
||||
|
@ -9860,7 +9860,7 @@ fn lsp_completions_node_builtin() {
|
|||
"severity": 1,
|
||||
"code": "not-installed-npm",
|
||||
"source": "deno",
|
||||
"message": "NPM package \"@types/node\" is not installed or doesn't exist."
|
||||
"message": "npm package \"@types/node\" is not installed or doesn't exist."
|
||||
}
|
||||
])
|
||||
);
|
||||
|
|
|
@ -102,7 +102,7 @@ fn cached_only_after_first_run() {
|
|||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
assert_contains!(
|
||||
stderr,
|
||||
"An npm specifier not found in cache: \"ansi-styles\", --cached-only is specified."
|
||||
"npm package not found in cache: \"ansi-styles\", --cached-only is specified."
|
||||
);
|
||||
assert!(stdout.is_empty());
|
||||
assert!(!output.status.success());
|
||||
|
|
|
@ -19,7 +19,7 @@ const encoder = new TextEncoder();
|
|||
|
||||
const NODE_VERSION = version;
|
||||
|
||||
const NODE_IGNORED_TEST_DIRS = [
|
||||
export const NODE_IGNORED_TEST_DIRS = [
|
||||
"addons",
|
||||
"async-hooks",
|
||||
"cctest",
|
||||
|
@ -40,13 +40,13 @@ const NODE_IGNORED_TEST_DIRS = [
|
|||
"wpt",
|
||||
];
|
||||
|
||||
const VENDORED_NODE_TEST = new URL("./suite/test/", import.meta.url);
|
||||
const NODE_COMPAT_TEST_DEST_URL = new URL(
|
||||
export const VENDORED_NODE_TEST = new URL("./suite/test/", import.meta.url);
|
||||
export const NODE_COMPAT_TEST_DEST_URL = new URL(
|
||||
"../test/",
|
||||
import.meta.url,
|
||||
);
|
||||
|
||||
async function getNodeTests(): Promise<string[]> {
|
||||
export async function getNodeTests(): Promise<string[]> {
|
||||
const paths: string[] = [];
|
||||
const rootPath = VENDORED_NODE_TEST.href.slice(7);
|
||||
for await (
|
||||
|
@ -61,7 +61,7 @@ async function getNodeTests(): Promise<string[]> {
|
|||
return paths.sort();
|
||||
}
|
||||
|
||||
function getDenoTests() {
|
||||
export function getDenoTests() {
|
||||
return Object.entries(config.tests)
|
||||
.filter(([testDir]) => !NODE_IGNORED_TEST_DIRS.includes(testDir))
|
||||
.flatMap(([testDir, tests]) => tests.map((test) => testDir + "/" + test));
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -15,6 +15,10 @@
|
|||
{
|
||||
"args": "run -A main.ts uncaught.ts",
|
||||
"output": "uncaught.out"
|
||||
},
|
||||
{
|
||||
"args": "run -A main.ts metric.ts",
|
||||
"output": "metric.out"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -188,5 +188,6 @@
|
|||
"traceId": "00000000000000000000000000000003",
|
||||
"spanId": "1000000000000002"
|
||||
}
|
||||
]
|
||||
],
|
||||
"metrics": []
|
||||
}
|
||||
|
|
|
@ -15,5 +15,6 @@
|
|||
"traceId": "",
|
||||
"spanId": ""
|
||||
}
|
||||
]
|
||||
],
|
||||
"metrics": []
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
const data = {
|
||||
spans: [],
|
||||
logs: [],
|
||||
metrics: [],
|
||||
};
|
||||
|
||||
const server = Deno.serve(
|
||||
|
@ -45,6 +46,11 @@ const server = Deno.serve(
|
|||
data.spans.push(...sSpans.spans);
|
||||
});
|
||||
});
|
||||
body.resourceMetrics?.forEach((rMetrics) => {
|
||||
rMetrics.scopeMetrics.forEach((sMetrics) => {
|
||||
data.metrics.push(...sMetrics.metrics);
|
||||
});
|
||||
});
|
||||
return Response.json({ partialSuccess: {} }, { status: 200 });
|
||||
},
|
||||
},
|
||||
|
|
124
tests/specs/cli/otel_basic/metric.out
Normal file
124
tests/specs/cli/otel_basic/metric.out
Normal file
|
@ -0,0 +1,124 @@
|
|||
{
|
||||
"spans": [],
|
||||
"logs": [],
|
||||
"metrics": [
|
||||
{
|
||||
"name": "counter",
|
||||
"description": "Example of a Counter",
|
||||
"unit": "",
|
||||
"metadata": [],
|
||||
"sum": {
|
||||
"dataPoints": [
|
||||
{
|
||||
"attributes": [
|
||||
{
|
||||
"key": "attribute",
|
||||
"value": {
|
||||
"doubleValue": 1
|
||||
}
|
||||
}
|
||||
],
|
||||
"startTimeUnixNano": "[WILDCARD]",
|
||||
"timeUnixNano": "[WILDCARD]",
|
||||
"exemplars": [],
|
||||
"flags": 0,
|
||||
"asDouble": 1
|
||||
}
|
||||
],
|
||||
"aggregationTemporality": 2,
|
||||
"isMonotonic": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "up_down_counter",
|
||||
"description": "Example of a UpDownCounter",
|
||||
"unit": "",
|
||||
"metadata": [],
|
||||
"sum": {
|
||||
"dataPoints": [
|
||||
{
|
||||
"attributes": [
|
||||
{
|
||||
"key": "attribute",
|
||||
"value": {
|
||||
"doubleValue": 1
|
||||
}
|
||||
}
|
||||
],
|
||||
"startTimeUnixNano": "[WILDCARD]",
|
||||
"timeUnixNano": "[WILDCARD]",
|
||||
"exemplars": [],
|
||||
"flags": 0,
|
||||
"asDouble": -1
|
||||
}
|
||||
],
|
||||
"aggregationTemporality": 2,
|
||||
"isMonotonic": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "histogram",
|
||||
"description": "Example of a Histogram",
|
||||
"unit": "",
|
||||
"metadata": [],
|
||||
"histogram": {
|
||||
"dataPoints": [
|
||||
{
|
||||
"attributes": [
|
||||
{
|
||||
"key": "attribute",
|
||||
"value": {
|
||||
"doubleValue": 1
|
||||
}
|
||||
}
|
||||
],
|
||||
"startTimeUnixNano": "[WILDCARD]",
|
||||
"timeUnixNano": "[WILDCARD]",
|
||||
"count": 1,
|
||||
"sum": 1,
|
||||
"bucketCounts": [
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0
|
||||
],
|
||||
"explicitBounds": [
|
||||
0,
|
||||
5,
|
||||
10,
|
||||
25,
|
||||
50,
|
||||
75,
|
||||
100,
|
||||
250,
|
||||
500,
|
||||
750,
|
||||
1000,
|
||||
2500,
|
||||
5000,
|
||||
7500,
|
||||
10000
|
||||
],
|
||||
"exemplars": [],
|
||||
"flags": 0,
|
||||
"min": 1,
|
||||
"max": 1
|
||||
}
|
||||
],
|
||||
"aggregationTemporality": 2
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
34
tests/specs/cli/otel_basic/metric.ts
Normal file
34
tests/specs/cli/otel_basic/metric.ts
Normal file
|
@ -0,0 +1,34 @@
|
|||
import {
|
||||
MeterProvider,
|
||||
PeriodicExportingMetricReader,
|
||||
} from "npm:@opentelemetry/sdk-metrics@1.28.0";
|
||||
|
||||
const meterProvider = new MeterProvider();
|
||||
|
||||
meterProvider.addMetricReader(
|
||||
new PeriodicExportingMetricReader({
|
||||
exporter: new Deno.telemetry.MetricExporter(),
|
||||
exportIntervalMillis: 100,
|
||||
}),
|
||||
);
|
||||
|
||||
const meter = meterProvider.getMeter("m");
|
||||
|
||||
const counter = meter.createCounter("counter", {
|
||||
description: "Example of a Counter",
|
||||
});
|
||||
|
||||
const upDownCounter = meter.createUpDownCounter("up_down_counter", {
|
||||
description: "Example of a UpDownCounter",
|
||||
});
|
||||
|
||||
const histogram = meter.createHistogram("histogram", {
|
||||
description: "Example of a Histogram",
|
||||
});
|
||||
|
||||
const attributes = { attribute: 1 };
|
||||
counter.add(1, attributes);
|
||||
upDownCounter.add(-1, attributes);
|
||||
histogram.record(1, attributes);
|
||||
|
||||
await meterProvider.forceFlush();
|
|
@ -15,5 +15,6 @@
|
|||
"traceId": "",
|
||||
"spanId": ""
|
||||
}
|
||||
]
|
||||
],
|
||||
"metrics": []
|
||||
}
|
||||
|
|
|
@ -33,5 +33,6 @@ throw new Error("uncaught");
|
|||
"traceId": "",
|
||||
"spanId": ""
|
||||
}
|
||||
]
|
||||
],
|
||||
"metrics": []
|
||||
}
|
||||
|
|
|
@ -12,6 +12,10 @@
|
|||
"broken": {
|
||||
"args": "fmt broken.html",
|
||||
"output": "broken.out"
|
||||
},
|
||||
"with_js": {
|
||||
"args": "fmt --check with_js.html",
|
||||
"output": "Checked 1 file\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
9
tests/specs/fmt/html/with_js.html
Normal file
9
tests/specs/fmt/html/with_js.html
Normal file
|
@ -0,0 +1,9 @@
|
|||
<html>
|
||||
<body>
|
||||
<script>
|
||||
/* some multi-line comment
|
||||
with function below it */
|
||||
someFunc();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
|
@ -1,2 +1,3 @@
|
|||
error: Error getting response at http://localhost:4260/chalk for package "chalk": An npm specifier not found in cache: "chalk", --cached-only is specified.
|
||||
error: Failed loading http://localhost:4260/chalk for package "chalk"
|
||||
0: npm package not found in cache: "chalk", --cached-only is specified.
|
||||
at file:///[WILDCARD]/main.ts:1:19
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{
|
||||
"tempDir": true,
|
||||
"tests": {
|
||||
"cjs_with_deps": {
|
||||
"args": "run --allow-read --allow-env main.js",
|
||||
|
|
|
@ -3,7 +3,7 @@ type: JavaScript
|
|||
dependencies: 14 unique
|
||||
size: [WILDCARD]
|
||||
|
||||
file:///[WILDCARD]/cjs_with_deps/main.js ([WILDCARD])
|
||||
file:///[WILDCARD]/main.js ([WILDCARD])
|
||||
├─┬ npm:/chalk@4.1.2 ([WILDCARD])
|
||||
│ ├─┬ npm:/ansi-styles@4.3.0 ([WILDCARD])
|
||||
│ │ └─┬ npm:/color-convert@2.0.1 ([WILDCARD])
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
Download http://localhost:4261/@denotest%2fbasic
|
||||
error: Error getting response at http://localhost:4261/@denotest%2fbasic for package "@denotest/basic": Bad response: 401
|
||||
[WILDCARD]
|
||||
error: Failed loading http://localhost:4261/@denotest%2fbasic for package "@denotest/basic"
|
||||
|
||||
Caused by:
|
||||
Bad response: 401
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
Download http://localhost:4261/@denotest%2fbasic
|
||||
error: Error getting response at http://localhost:4261/@denotest%2fbasic for package "@denotest/basic": Bad response: 401
|
||||
[WILDCARD]
|
||||
error: Failed loading http://localhost:4261/@denotest%2fbasic for package "@denotest/basic"
|
||||
|
||||
Caused by:
|
||||
Bad response: 401
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
[UNORDERED_START]
|
||||
error: Error getting response at http://localhost:4261/@denotest%2fbasic for package "@denotest/basic": Both the username and password must be provided for basic auth
|
||||
[UNORDERED_END]
|
||||
error: Failed loading http://localhost:4261/@denotest%2fbasic for package "@denotest/basic"
|
||||
|
||||
Caused by:
|
||||
Both the username and password must be provided for basic auth
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
[UNORDERED_START]
|
||||
error: Error getting response at http://localhost:4261/@denotest%2fbasic for package "@denotest/basic": Both the username and password must be provided for basic auth
|
||||
[UNORDERED_END]
|
||||
error: Failed loading http://localhost:4261/@denotest%2fbasic for package "@denotest/basic"
|
||||
|
||||
Caused by:
|
||||
Both the username and password must be provided for basic auth
|
||||
|
|
|
@ -11,5 +11,5 @@ Location {
|
|||
protocol: "https:",
|
||||
search: "?baz"
|
||||
}
|
||||
NotSupportedError: Cannot set "location".
|
||||
NotSupportedError: Cannot set "location.hostname".
|
||||
NotSupportedError: Cannot set "location"
|
||||
NotSupportedError: Cannot set "location.hostname"
|
||||
|
|
|
@ -56,6 +56,11 @@
|
|||
"args": "task a",
|
||||
"output": "./cycle_2.out",
|
||||
"exitCode": 1
|
||||
},
|
||||
"arg_task_with_deps": {
|
||||
"cwd": "arg_task_with_deps",
|
||||
"args": "task a a",
|
||||
"output": "./arg_task_with_deps.out"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
4
tests/specs/task/dependencies/arg_task_with_deps.out
Normal file
4
tests/specs/task/dependencies/arg_task_with_deps.out
Normal file
|
@ -0,0 +1,4 @@
|
|||
Task b echo 'b'
|
||||
b
|
||||
Task a echo "a"
|
||||
a
|
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
"tasks": {
|
||||
"a": {
|
||||
"command": "echo",
|
||||
"dependencies": ["b"]
|
||||
},
|
||||
"b": "echo 'b'"
|
||||
}
|
||||
}
|
2
tests/testdata/npm/cached_only/main.out
vendored
2
tests/testdata/npm/cached_only/main.out
vendored
|
@ -1,2 +1,2 @@
|
|||
error: Error getting response at http://localhost:4260/chalk for package "chalk": An npm specifier not found in cache: "chalk", --cached-only is specified.
|
||||
error: Failed loading http://localhost:4260/chalk for package "chalk": npm package not found in cache: "chalk", --cached-only is specified.
|
||||
at file:///[WILDCARD]/testdata/npm/cached_only/main.ts:1:19
|
||||
|
|
|
@ -53,6 +53,29 @@ Deno.test({
|
|||
},
|
||||
});
|
||||
|
||||
Deno.test("ASYNC: read dirs recursively", async () => {
|
||||
const dir = Deno.makeTempDirSync();
|
||||
Deno.writeTextFileSync(join(dir, "file1.txt"), "hi");
|
||||
Deno.mkdirSync(join(dir, "sub"));
|
||||
Deno.writeTextFileSync(join(dir, "sub", "file2.txt"), "hi");
|
||||
|
||||
try {
|
||||
const files = await new Promise<string[]>((resolve, reject) => {
|
||||
readdir(dir, { recursive: true }, (err, files) => {
|
||||
if (err) reject(err);
|
||||
resolve(files.map((f) => f.toString()));
|
||||
});
|
||||
});
|
||||
|
||||
assertEqualsArrayAnyOrder(
|
||||
files,
|
||||
["file1.txt", "sub", join("sub", "file2.txt")],
|
||||
);
|
||||
} finally {
|
||||
Deno.removeSync(dir, { recursive: true });
|
||||
}
|
||||
});
|
||||
|
||||
Deno.test({
|
||||
name: "SYNC: reading empty the directory",
|
||||
fn() {
|
||||
|
@ -75,6 +98,26 @@ Deno.test({
|
|||
},
|
||||
});
|
||||
|
||||
Deno.test("SYNC: read dirs recursively", () => {
|
||||
const dir = Deno.makeTempDirSync();
|
||||
Deno.writeTextFileSync(join(dir, "file1.txt"), "hi");
|
||||
Deno.mkdirSync(join(dir, "sub"));
|
||||
Deno.writeTextFileSync(join(dir, "sub", "file2.txt"), "hi");
|
||||
|
||||
try {
|
||||
const files = readdirSync(dir, { recursive: true }).map((f) =>
|
||||
f.toString()
|
||||
);
|
||||
|
||||
assertEqualsArrayAnyOrder(
|
||||
files,
|
||||
["file1.txt", "sub", join("sub", "file2.txt")],
|
||||
);
|
||||
} finally {
|
||||
Deno.removeSync(dir, { recursive: true });
|
||||
}
|
||||
});
|
||||
|
||||
Deno.test("[std/node/fs] readdir callback isn't called twice if error is thrown", async () => {
|
||||
// The correct behaviour is not to catch any errors thrown,
|
||||
// but that means there'll be an uncaught error and the test will fail.
|
||||
|
|
Loading…
Add table
Reference in a new issue