1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2025-01-21 13:00:36 -05:00

Merge remote-tracking branch 'upstream/main' into check-workspace-member-compiler-options

This commit is contained in:
Nayeem Rahman 2024-12-13 03:34:42 +00:00
commit 161468a0f5
85 changed files with 2530 additions and 1320 deletions

View file

@ -35,7 +35,7 @@ jobs:
- name: Install deno - name: Install deno
uses: denoland/setup-deno@v2 uses: denoland/setup-deno@v2
with: with:
deno-version: v1.x deno-version: v2.x
- name: Publish - name: Publish
env: env:

View file

@ -42,7 +42,7 @@ const Runners = {
os: "macos", os: "macos",
arch: "aarch64", arch: "aarch64",
runner: runner:
`\${{ github.repository == 'denoland/deno' && startsWith(github.ref, 'refs/tags/') && '${selfHostedMacosArmRunner}' || '${macosArmRunner}' }}`, `\${{ github.repository == 'denoland/deno' && github.ref == 'refs/heads/main' && '${selfHostedMacosArmRunner}' || '${macosArmRunner}' }}`,
}, },
windowsX86: { windowsX86: {
os: "windows", os: "windows",
@ -59,6 +59,15 @@ const Runners = {
const prCacheKeyPrefix = const prCacheKeyPrefix =
`${cacheVersion}-cargo-target-\${{ matrix.os }}-\${{ matrix.arch }}-\${{ matrix.profile }}-\${{ matrix.job }}-`; `${cacheVersion}-cargo-target-\${{ matrix.os }}-\${{ matrix.arch }}-\${{ matrix.profile }}-\${{ matrix.job }}-`;
const prCacheKey = `${prCacheKeyPrefix}\${{ github.sha }}`;
const prCachePath = [
// this must match for save and restore (https://github.com/actions/cache/issues/1444)
"./target",
"!./target/*/gn_out",
"!./target/*/gn_root",
"!./target/*/*.zip",
"!./target/*/*.tar.gz",
].join("\n");
// Note that you may need to add more version to the `apt-get remove` line below if you change this // Note that you may need to add more version to the `apt-get remove` line below if you change this
const llvmVersion = 19; const llvmVersion = 19;
@ -196,7 +205,7 @@ const installNodeStep = {
const installDenoStep = { const installDenoStep = {
name: "Install Deno", name: "Install Deno",
uses: "denoland/setup-deno@v2", uses: "denoland/setup-deno@v2",
with: { "deno-version": "v1.x" }, with: { "deno-version": "v2.x" },
}; };
const authenticateWithGoogleCloud = { const authenticateWithGoogleCloud = {
@ -612,7 +621,7 @@ const ci = {
`${cacheVersion}-cargo-home-\${{ matrix.os }}-\${{ matrix.arch }}-\${{ hashFiles('Cargo.lock') }}`, `${cacheVersion}-cargo-home-\${{ matrix.os }}-\${{ matrix.arch }}-\${{ hashFiles('Cargo.lock') }}`,
// We will try to restore from the closest cargo-home we can find // We will try to restore from the closest cargo-home we can find
"restore-keys": "restore-keys":
`${cacheVersion}-cargo-home-\${{ matrix.os }}-\${{ matrix.arch }}`, `${cacheVersion}-cargo-home-\${{ matrix.os }}-\${{ matrix.arch }}-`,
}, },
}, },
{ {
@ -622,13 +631,7 @@ const ci = {
if: if:
"github.ref != 'refs/heads/main' && !startsWith(github.ref, 'refs/tags/')", "github.ref != 'refs/heads/main' && !startsWith(github.ref, 'refs/tags/')",
with: { with: {
path: [ path: prCachePath,
"./target",
"!./target/*/gn_out",
"!./target/*/gn_root",
"!./target/*/*.zip",
"!./target/*/*.tar.gz",
].join("\n"),
key: "never_saved", key: "never_saved",
"restore-keys": prCacheKeyPrefix, "restore-keys": prCacheKeyPrefix,
}, },
@ -1080,14 +1083,8 @@ const ci = {
if: if:
"(matrix.job == 'test' || matrix.job == 'lint') && github.ref == 'refs/heads/main'", "(matrix.job == 'test' || matrix.job == 'lint') && github.ref == 'refs/heads/main'",
with: { with: {
path: [ path: prCachePath,
"./target", key: prCacheKey,
"!./target/*/gn_out",
"!./target/*/*.zip",
"!./target/*/*.sha256sum",
"!./target/*/*.tar.gz",
].join("\n"),
key: prCacheKeyPrefix + "${{ github.sha }}",
}, },
}, },
]), ]),

View file

@ -68,12 +68,12 @@ jobs:
skip: '${{ !contains(github.event.pull_request.labels.*.name, ''ci-full'') && (github.event_name == ''pull_request'') }}' skip: '${{ !contains(github.event.pull_request.labels.*.name, ''ci-full'') && (github.event_name == ''pull_request'') }}'
- os: macos - os: macos
arch: aarch64 arch: aarch64
runner: '${{ github.repository == ''denoland/deno'' && startsWith(github.ref, ''refs/tags/'') && ''self-hosted'' || ''macos-14'' }}' runner: '${{ github.repository == ''denoland/deno'' && github.ref == ''refs/heads/main'' && ''self-hosted'' || ''macos-14'' }}'
job: test job: test
profile: debug profile: debug
- os: macos - os: macos
arch: aarch64 arch: aarch64
runner: '${{ (!contains(github.event.pull_request.labels.*.name, ''ci-full'') && (github.event_name == ''pull_request'')) && ''ubuntu-24.04'' || github.repository == ''denoland/deno'' && startsWith(github.ref, ''refs/tags/'') && ''self-hosted'' || ''macos-14'' }}' runner: '${{ (!contains(github.event.pull_request.labels.*.name, ''ci-full'') && (github.event_name == ''pull_request'')) && ''ubuntu-24.04'' || github.repository == ''denoland/deno'' && github.ref == ''refs/heads/main'' && ''self-hosted'' || ''macos-14'' }}'
job: test job: test
profile: release profile: release
skip: '${{ !contains(github.event.pull_request.labels.*.name, ''ci-full'') && (github.event_name == ''pull_request'') }}' skip: '${{ !contains(github.event.pull_request.labels.*.name, ''ci-full'') && (github.event_name == ''pull_request'') }}'
@ -180,7 +180,7 @@ jobs:
name: Install Deno name: Install Deno
uses: denoland/setup-deno@v2 uses: denoland/setup-deno@v2
with: with:
deno-version: v1.x deno-version: v2.x
- name: Install Python - name: Install Python
uses: actions/setup-python@v5 uses: actions/setup-python@v5
with: with:
@ -362,7 +362,7 @@ jobs:
~/.cargo/registry/index ~/.cargo/registry/index
~/.cargo/registry/cache ~/.cargo/registry/cache
key: '30-cargo-home-${{ matrix.os }}-${{ matrix.arch }}-${{ hashFiles(''Cargo.lock'') }}' key: '30-cargo-home-${{ matrix.os }}-${{ matrix.arch }}-${{ hashFiles(''Cargo.lock'') }}'
restore-keys: '30-cargo-home-${{ matrix.os }}-${{ matrix.arch }}' restore-keys: '30-cargo-home-${{ matrix.os }}-${{ matrix.arch }}-'
if: '!(matrix.skip)' if: '!(matrix.skip)'
- name: Restore cache build output (PR) - name: Restore cache build output (PR)
uses: actions/cache/restore@v4 uses: actions/cache/restore@v4
@ -682,8 +682,8 @@ jobs:
path: |- path: |-
./target ./target
!./target/*/gn_out !./target/*/gn_out
!./target/*/gn_root
!./target/*/*.zip !./target/*/*.zip
!./target/*/*.sha256sum
!./target/*/*.tar.gz !./target/*/*.tar.gz
key: '30-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-${{ github.sha }}' key: '30-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-${{ github.sha }}'
publish-canary: publish-canary:

45
.github/workflows/npm_publish.yml vendored Normal file
View file

@ -0,0 +1,45 @@
name: npm_publish
on:
workflow_dispatch:
inputs:
version:
description: 'Version'
type: string
release:
types: [published]
permissions:
id-token: write
jobs:
build:
name: npm publish
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Configure git
run: |
git config --global core.symlinks true
git config --global fetch.parallel 32
- name: Clone repository
uses: actions/checkout@v4
with:
submodules: recursive
- name: Install Deno
uses: denoland/setup-deno@v2
with:
deno-version: v2.x
- name: Install Node
uses: actions/setup-node@v4
with:
node-version: '22.x'
registry-url: 'https://registry.npmjs.org'
- name: Publish
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
run: ./tools/release/npm/build.ts ${{ github.event.inputs.version }} --publish

View file

@ -42,7 +42,7 @@ jobs:
- name: Install deno - name: Install deno
uses: denoland/setup-deno@v2 uses: denoland/setup-deno@v2
with: with:
deno-version: v1.x deno-version: v2.x
- name: Install rust-codesign - name: Install rust-codesign
run: |- run: |-

View file

@ -36,7 +36,7 @@ jobs:
- name: Install deno - name: Install deno
uses: denoland/setup-deno@v2 uses: denoland/setup-deno@v2
with: with:
deno-version: v1.x deno-version: v2.x
- name: Create Gist URL - name: Create Gist URL
env: env:

View file

@ -41,7 +41,7 @@ jobs:
- name: Install deno - name: Install deno
uses: denoland/setup-deno@v2 uses: denoland/setup-deno@v2
with: with:
deno-version: v1.x deno-version: v2.x
- name: Run version bump - name: Run version bump
run: | run: |

View file

@ -37,6 +37,7 @@ use deno_path_util::url_to_file_path;
use deno_runtime::deno_permissions::PermissionsOptions; use deno_runtime::deno_permissions::PermissionsOptions;
use deno_runtime::deno_permissions::SysDescriptor; use deno_runtime::deno_permissions::SysDescriptor;
use deno_telemetry::OtelConfig; use deno_telemetry::OtelConfig;
use deno_telemetry::OtelConsoleConfig;
use log::debug; use log::debug;
use log::Level; use log::Level;
use serde::Deserialize; use serde::Deserialize;
@ -986,21 +987,41 @@ impl Flags {
args args
} }
pub fn otel_config(&self) -> Option<OtelConfig> { pub fn otel_config(&self) -> OtelConfig {
if self let has_unstable_flag = self
.unstable_config .unstable_config
.features .features
.contains(&String::from("otel")) .contains(&String::from("otel"));
{
Some(OtelConfig { let otel_var = |name| match std::env::var(name) {
runtime_name: Cow::Borrowed("deno"), Ok(s) if s.to_lowercase() == "true" => Some(true),
runtime_version: Cow::Borrowed(crate::version::DENO_VERSION_INFO.deno), Ok(s) if s.to_lowercase() == "false" => Some(false),
deterministic: std::env::var("DENO_UNSTABLE_OTEL_DETERMINISTIC") _ => None,
.is_ok(), };
..Default::default()
}) let disabled =
} else { !has_unstable_flag || otel_var("OTEL_SDK_DISABLED").unwrap_or(false);
None let default = !disabled && otel_var("OTEL_DENO").unwrap_or(false);
OtelConfig {
tracing_enabled: !disabled
&& otel_var("OTEL_DENO_TRACING").unwrap_or(default),
console: match std::env::var("OTEL_DENO_CONSOLE").as_deref() {
Ok(_) if disabled => OtelConsoleConfig::Ignore,
Ok("ignore") => OtelConsoleConfig::Ignore,
Ok("capture") => OtelConsoleConfig::Capture,
Ok("replace") => OtelConsoleConfig::Replace,
_ => {
if default {
OtelConsoleConfig::Capture
} else {
OtelConsoleConfig::Ignore
}
}
},
deterministic: std::env::var("DENO_UNSTABLE_OTEL_DETERMINISTIC")
.as_deref()
== Ok("1"),
} }
} }

View file

@ -31,6 +31,7 @@ use deno_npm_cache::NpmCacheSetting;
use deno_path_util::normalize_path; use deno_path_util::normalize_path;
use deno_semver::npm::NpmPackageReqReference; use deno_semver::npm::NpmPackageReqReference;
use deno_telemetry::OtelConfig; use deno_telemetry::OtelConfig;
use deno_telemetry::OtelRuntimeConfig;
use import_map::resolve_import_map_value_from_specifier; use import_map::resolve_import_map_value_from_specifier;
pub use deno_config::deno_json::BenchConfig; pub use deno_config::deno_json::BenchConfig;
@ -1162,7 +1163,7 @@ impl CliOptions {
} }
} }
pub fn otel_config(&self) -> Option<OtelConfig> { pub fn otel_config(&self) -> OtelConfig {
self.flags.otel_config() self.flags.otel_config()
} }
@ -2089,6 +2090,13 @@ pub enum NpmCachingStrategy {
Manual, Manual,
} }
pub(crate) fn otel_runtime_config() -> OtelRuntimeConfig {
OtelRuntimeConfig {
runtime_name: Cow::Borrowed("deno"),
runtime_version: Cow::Borrowed(crate::version::DENO_VERSION_INFO.deno),
}
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use pretty_assertions::assert_eq; use pretty_assertions::assert_eq;

View file

@ -437,20 +437,18 @@ fn resolve_flags_and_init(
if err.kind() == clap::error::ErrorKind::DisplayVersion => if err.kind() == clap::error::ErrorKind::DisplayVersion =>
{ {
// Ignore results to avoid BrokenPipe errors. // Ignore results to avoid BrokenPipe errors.
util::logger::init(None); util::logger::init(None, None);
let _ = err.print(); let _ = err.print();
deno_runtime::exit(0); deno_runtime::exit(0);
} }
Err(err) => { Err(err) => {
util::logger::init(None); util::logger::init(None, None);
exit_for_error(AnyError::from(err)) exit_for_error(AnyError::from(err))
} }
}; };
if let Some(otel_config) = flags.otel_config() { deno_telemetry::init(crate::args::otel_runtime_config())?;
deno_telemetry::init(otel_config)?; util::logger::init(flags.log_level, Some(flags.otel_config()));
}
util::logger::init(flags.log_level);
// TODO(bartlomieju): remove in Deno v2.5 and hard error then. // TODO(bartlomieju): remove in Deno v2.5 and hard error then.
if flags.unstable_config.legacy_flag_enabled { if flags.unstable_config.legacy_flag_enabled {

View file

@ -87,17 +87,18 @@ fn main() {
let future = async move { let future = async move {
match standalone { match standalone {
Ok(Some(data)) => { Ok(Some(data)) => {
if let Some(otel_config) = data.metadata.otel_config.clone() { deno_telemetry::init(crate::args::otel_runtime_config())?;
deno_telemetry::init(otel_config)?; util::logger::init(
} data.metadata.log_level,
util::logger::init(data.metadata.log_level); Some(data.metadata.otel_config.clone()),
);
load_env_vars(&data.metadata.env_vars_from_env_file); load_env_vars(&data.metadata.env_vars_from_env_file);
let exit_code = standalone::run(data).await?; let exit_code = standalone::run(data).await?;
deno_runtime::exit(exit_code); deno_runtime::exit(exit_code);
} }
Ok(None) => Ok(()), Ok(None) => Ok(()),
Err(err) => { Err(err) => {
util::logger::init(None); util::logger::init(None, None);
Err(err) Err(err)
} }
} }

View file

@ -44,6 +44,9 @@ use deno_npm::resolution::SerializedNpmResolutionSnapshotPackage;
use deno_npm::resolution::ValidSerializedNpmResolutionSnapshot; use deno_npm::resolution::ValidSerializedNpmResolutionSnapshot;
use deno_npm::NpmPackageId; use deno_npm::NpmPackageId;
use deno_npm::NpmSystemInfo; use deno_npm::NpmSystemInfo;
use deno_path_util::url_from_directory_path;
use deno_path_util::url_from_file_path;
use deno_path_util::url_to_file_path;
use deno_runtime::deno_fs; use deno_runtime::deno_fs;
use deno_runtime::deno_fs::FileSystem; use deno_runtime::deno_fs::FileSystem;
use deno_runtime::deno_fs::RealFs; use deno_runtime::deno_fs::RealFs;
@ -76,6 +79,7 @@ use crate::resolver::CjsTracker;
use crate::shared::ReleaseChannel; use crate::shared::ReleaseChannel;
use crate::standalone::virtual_fs::VfsEntry; use crate::standalone::virtual_fs::VfsEntry;
use crate::util::archive; use crate::util::archive;
use crate::util::fs::canonicalize_path;
use crate::util::fs::canonicalize_path_maybe_not_exists; use crate::util::fs::canonicalize_path_maybe_not_exists;
use crate::util::progress_bar::ProgressBar; use crate::util::progress_bar::ProgressBar;
use crate::util::progress_bar::ProgressBarStyle; use crate::util::progress_bar::ProgressBarStyle;
@ -88,31 +92,28 @@ use super::serialization::DeserializedDataSection;
use super::serialization::RemoteModulesStore; use super::serialization::RemoteModulesStore;
use super::serialization::RemoteModulesStoreBuilder; use super::serialization::RemoteModulesStoreBuilder;
use super::virtual_fs::output_vfs; use super::virtual_fs::output_vfs;
use super::virtual_fs::BuiltVfs;
use super::virtual_fs::FileBackedVfs; use super::virtual_fs::FileBackedVfs;
use super::virtual_fs::VfsBuilder; use super::virtual_fs::VfsBuilder;
use super::virtual_fs::VfsFileSubDataKind; use super::virtual_fs::VfsFileSubDataKind;
use super::virtual_fs::VfsRoot; use super::virtual_fs::VfsRoot;
use super::virtual_fs::VirtualDirectory; use super::virtual_fs::VirtualDirectory;
use super::virtual_fs::WindowsSystemRootablePath;
pub static DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME: &str =
".deno_compile_node_modules";
/// A URL that can be designated as the base for relative URLs. /// A URL that can be designated as the base for relative URLs.
/// ///
/// After creation, this URL may be used to get the key for a /// After creation, this URL may be used to get the key for a
/// module in the binary. /// module in the binary.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct StandaloneRelativeFileBaseUrl<'a>(&'a Url); pub enum StandaloneRelativeFileBaseUrl<'a> {
WindowsSystemRoot,
impl<'a> From<&'a Url> for StandaloneRelativeFileBaseUrl<'a> { Path(&'a Url),
fn from(url: &'a Url) -> Self {
Self(url)
}
} }
impl<'a> StandaloneRelativeFileBaseUrl<'a> { impl<'a> StandaloneRelativeFileBaseUrl<'a> {
pub fn new(url: &'a Url) -> Self {
debug_assert_eq!(url.scheme(), "file");
Self(url)
}
/// Gets the module map key of the provided specifier. /// Gets the module map key of the provided specifier.
/// ///
/// * Descendant file specifiers will be made relative to the base. /// * Descendant file specifiers will be made relative to the base.
@ -122,22 +123,29 @@ impl<'a> StandaloneRelativeFileBaseUrl<'a> {
if target.scheme() != "file" { if target.scheme() != "file" {
return Cow::Borrowed(target.as_str()); return Cow::Borrowed(target.as_str());
} }
let base = match self {
Self::Path(base) => base,
Self::WindowsSystemRoot => return Cow::Borrowed(target.path()),
};
match self.0.make_relative(target) { match base.make_relative(target) {
Some(relative) => { Some(relative) => {
if relative.starts_with("../") { // This is not a great scenario to have because it means that the
Cow::Borrowed(target.as_str()) // specifier is outside the vfs and could cause the binary to act
} else { // strangely. If you encounter this, the fix is to add more paths
Cow::Owned(relative) // to the vfs builder by calling `add_possible_min_root_dir`.
} debug_assert!(
!relative.starts_with("../"),
"{} -> {} ({})",
base.as_str(),
target.as_str(),
relative,
);
Cow::Owned(relative)
} }
None => Cow::Borrowed(target.as_str()), None => Cow::Borrowed(target.as_str()),
} }
} }
pub fn inner(&self) -> &Url {
self.0
}
} }
#[derive(Deserialize, Serialize)] #[derive(Deserialize, Serialize)]
@ -192,7 +200,7 @@ pub struct Metadata {
pub entrypoint_key: String, pub entrypoint_key: String,
pub node_modules: Option<NodeModules>, pub node_modules: Option<NodeModules>,
pub unstable_config: UnstableConfig, pub unstable_config: UnstableConfig,
pub otel_config: Option<OtelConfig>, // None means disabled. pub otel_config: OtelConfig,
} }
fn write_binary_bytes( fn write_binary_bytes(
@ -201,7 +209,7 @@ fn write_binary_bytes(
metadata: &Metadata, metadata: &Metadata,
npm_snapshot: Option<SerializedNpmResolutionSnapshot>, npm_snapshot: Option<SerializedNpmResolutionSnapshot>,
remote_modules: &RemoteModulesStoreBuilder, remote_modules: &RemoteModulesStoreBuilder,
vfs: VfsBuilder, vfs: &BuiltVfs,
compile_flags: &CompileFlags, compile_flags: &CompileFlags,
) -> Result<(), AnyError> { ) -> Result<(), AnyError> {
let data_section_bytes = let data_section_bytes =
@ -372,7 +380,6 @@ pub struct WriteBinOptions<'a> {
pub writer: File, pub writer: File,
pub display_output_filename: &'a str, pub display_output_filename: &'a str,
pub graph: &'a ModuleGraph, pub graph: &'a ModuleGraph,
pub root_dir_url: StandaloneRelativeFileBaseUrl<'a>,
pub entrypoint: &'a ModuleSpecifier, pub entrypoint: &'a ModuleSpecifier,
pub include_files: &'a [ModuleSpecifier], pub include_files: &'a [ModuleSpecifier],
pub compile_flags: &'a CompileFlags, pub compile_flags: &'a CompileFlags,
@ -556,7 +563,6 @@ impl<'a> DenoCompileBinaryWriter<'a> {
writer, writer,
display_output_filename, display_output_filename,
graph, graph,
root_dir_url,
entrypoint, entrypoint,
include_files, include_files,
compile_flags, compile_flags,
@ -568,74 +574,28 @@ impl<'a> DenoCompileBinaryWriter<'a> {
Some(CaData::Bytes(bytes)) => Some(bytes.clone()), Some(CaData::Bytes(bytes)) => Some(bytes.clone()),
None => None, None => None,
}; };
let root_path = root_dir_url.inner().to_file_path().unwrap(); let mut vfs = VfsBuilder::new();
let (maybe_npm_vfs, node_modules, npm_snapshot) = let npm_snapshot = match self.npm_resolver.as_inner() {
match self.npm_resolver.as_inner() { InnerCliNpmResolverRef::Managed(managed) => {
InnerCliNpmResolverRef::Managed(managed) => { let snapshot =
let snapshot = managed.serialized_valid_snapshot_for_system(&self.npm_system_info);
managed.serialized_valid_snapshot_for_system(&self.npm_system_info); if !snapshot.as_serialized().packages.is_empty() {
if !snapshot.as_serialized().packages.is_empty() { self.fill_npm_vfs(&mut vfs).context("Building npm vfs.")?;
let npm_vfs_builder = self Some(snapshot)
.build_npm_vfs(&root_path) } else {
.context("Building npm vfs.")?; None
(
Some(npm_vfs_builder),
Some(NodeModules::Managed {
node_modules_dir: self
.npm_resolver
.root_node_modules_path()
.map(|path| {
root_dir_url
.specifier_key(
&ModuleSpecifier::from_directory_path(path).unwrap(),
)
.into_owned()
}),
}),
Some(snapshot),
)
} else {
(None, None, None)
}
} }
InnerCliNpmResolverRef::Byonm(resolver) => { }
let npm_vfs_builder = self.build_npm_vfs(&root_path)?; InnerCliNpmResolverRef::Byonm(_) => {
( self.fill_npm_vfs(&mut vfs)?;
Some(npm_vfs_builder), None
Some(NodeModules::Byonm { }
root_node_modules_dir: resolver.root_node_modules_path().map(
|node_modules_dir| {
root_dir_url
.specifier_key(
&ModuleSpecifier::from_directory_path(node_modules_dir)
.unwrap(),
)
.into_owned()
},
),
}),
None,
)
}
};
let mut vfs = if let Some(npm_vfs) = maybe_npm_vfs {
npm_vfs
} else {
VfsBuilder::new(root_path.clone())?
}; };
for include_file in include_files { for include_file in include_files {
let path = deno_path_util::url_to_file_path(include_file)?; let path = deno_path_util::url_to_file_path(include_file)?;
if path.is_dir() { vfs
// TODO(#26941): we should analyze if any of these are .add_file_at_path(&path)
// modules in order to include their dependencies .with_context(|| format!("Including {}", path.display()))?;
vfs
.add_dir_recursive(&path)
.with_context(|| format!("Including {}", path.display()))?;
} else {
vfs
.add_file_at_path(&path)
.with_context(|| format!("Including {}", path.display()))?;
}
} }
let mut remote_modules_store = RemoteModulesStoreBuilder::default(); let mut remote_modules_store = RemoteModulesStoreBuilder::default();
let mut code_cache_key_hasher = if self.cli_options.code_cache_enabled() { let mut code_cache_key_hasher = if self.cli_options.code_cache_enabled() {
@ -707,6 +667,62 @@ impl<'a> DenoCompileBinaryWriter<'a> {
} }
remote_modules_store.add_redirects(&graph.redirects); remote_modules_store.add_redirects(&graph.redirects);
if let Some(import_map) = self.workspace_resolver.maybe_import_map() {
if let Ok(file_path) = url_to_file_path(import_map.base_url()) {
if let Some(import_map_parent_dir) = file_path.parent() {
// tell the vfs about the import map's parent directory in case it
// falls outside what the root of where the VFS will be based
vfs.add_possible_min_root_dir(import_map_parent_dir);
}
}
}
if let Some(node_modules_dir) = self.npm_resolver.root_node_modules_path() {
// ensure the vfs doesn't go below the node_modules directory's parent
if let Some(parent) = node_modules_dir.parent() {
vfs.add_possible_min_root_dir(parent);
}
}
let vfs = self.build_vfs_consolidating_global_npm_cache(vfs);
let root_dir_url = match &vfs.root_path {
WindowsSystemRootablePath::Path(dir) => {
Some(url_from_directory_path(dir)?)
}
WindowsSystemRootablePath::WindowSystemRoot => None,
};
let root_dir_url = match &root_dir_url {
Some(url) => StandaloneRelativeFileBaseUrl::Path(url),
None => StandaloneRelativeFileBaseUrl::WindowsSystemRoot,
};
let node_modules = match self.npm_resolver.as_inner() {
InnerCliNpmResolverRef::Managed(_) => {
npm_snapshot.as_ref().map(|_| NodeModules::Managed {
node_modules_dir: self.npm_resolver.root_node_modules_path().map(
|path| {
root_dir_url
.specifier_key(
&ModuleSpecifier::from_directory_path(path).unwrap(),
)
.into_owned()
},
),
})
}
InnerCliNpmResolverRef::Byonm(resolver) => Some(NodeModules::Byonm {
root_node_modules_dir: resolver.root_node_modules_path().map(
|node_modules_dir| {
root_dir_url
.specifier_key(
&ModuleSpecifier::from_directory_path(node_modules_dir)
.unwrap(),
)
.into_owned()
},
),
}),
};
let env_vars_from_env_file = match self.cli_options.env_file_name() { let env_vars_from_env_file = match self.cli_options.env_file_name() {
Some(env_filenames) => { Some(env_filenames) => {
let mut aggregated_env_vars = IndexMap::new(); let mut aggregated_env_vars = IndexMap::new();
@ -721,6 +737,8 @@ impl<'a> DenoCompileBinaryWriter<'a> {
None => Default::default(), None => Default::default(),
}; };
output_vfs(&vfs, display_output_filename);
let metadata = Metadata { let metadata = Metadata {
argv: compile_flags.args.clone(), argv: compile_flags.args.clone(),
seed: self.cli_options.seed(), seed: self.cli_options.seed(),
@ -785,21 +803,19 @@ impl<'a> DenoCompileBinaryWriter<'a> {
otel_config: self.cli_options.otel_config(), otel_config: self.cli_options.otel_config(),
}; };
output_vfs(&vfs, display_output_filename);
write_binary_bytes( write_binary_bytes(
writer, writer,
original_bin, original_bin,
&metadata, &metadata,
npm_snapshot.map(|s| s.into_serialized()), npm_snapshot.map(|s| s.into_serialized()),
&remote_modules_store, &remote_modules_store,
vfs, &vfs,
compile_flags, compile_flags,
) )
.context("Writing binary bytes") .context("Writing binary bytes")
} }
fn build_npm_vfs(&self, root_path: &Path) -> Result<VfsBuilder, AnyError> { fn fill_npm_vfs(&self, builder: &mut VfsBuilder) -> Result<(), AnyError> {
fn maybe_warn_different_system(system_info: &NpmSystemInfo) { fn maybe_warn_different_system(system_info: &NpmSystemInfo) {
if system_info != &NpmSystemInfo::default() { if system_info != &NpmSystemInfo::default() {
log::warn!("{} The node_modules directory may be incompatible with the target system.", crate::colors::yellow("Warning")); log::warn!("{} The node_modules directory may be incompatible with the target system.", crate::colors::yellow("Warning"));
@ -810,15 +826,10 @@ impl<'a> DenoCompileBinaryWriter<'a> {
InnerCliNpmResolverRef::Managed(npm_resolver) => { InnerCliNpmResolverRef::Managed(npm_resolver) => {
if let Some(node_modules_path) = npm_resolver.root_node_modules_path() { if let Some(node_modules_path) = npm_resolver.root_node_modules_path() {
maybe_warn_different_system(&self.npm_system_info); maybe_warn_different_system(&self.npm_system_info);
let mut builder = VfsBuilder::new(root_path.to_path_buf())?;
builder.add_dir_recursive(node_modules_path)?; builder.add_dir_recursive(node_modules_path)?;
Ok(builder) Ok(())
} else { } else {
// DO NOT include the user's registry url as it may contain credentials, // we'll flatten to remove any custom registries later
// but also don't make this dependent on the registry url
let global_cache_root_path = npm_resolver.global_cache_root_path();
let mut builder =
VfsBuilder::new(global_cache_root_path.to_path_buf())?;
let mut packages = let mut packages =
npm_resolver.all_system_packages(&self.npm_system_info); npm_resolver.all_system_packages(&self.npm_system_info);
packages.sort_by(|a, b| a.id.cmp(&b.id)); // determinism packages.sort_by(|a, b| a.id.cmp(&b.id)); // determinism
@ -827,55 +838,11 @@ impl<'a> DenoCompileBinaryWriter<'a> {
npm_resolver.resolve_pkg_folder_from_pkg_id(&package.id)?; npm_resolver.resolve_pkg_folder_from_pkg_id(&package.id)?;
builder.add_dir_recursive(&folder)?; builder.add_dir_recursive(&folder)?;
} }
Ok(())
// Flatten all the registries folders into a single ".deno_compile_node_modules/localhost" folder
// that will be used by denort when loading the npm cache. This avoids us exposing
// the user's private registry information and means we don't have to bother
// serializing all the different registry config into the binary.
builder.with_root_dir(|root_dir| {
root_dir.name = ".deno_compile_node_modules".to_string();
let mut new_entries = Vec::with_capacity(root_dir.entries.len());
let mut localhost_entries = IndexMap::new();
for entry in std::mem::take(&mut root_dir.entries) {
match entry {
VfsEntry::Dir(dir) => {
for entry in dir.entries {
log::debug!(
"Flattening {} into node_modules",
entry.name()
);
if let Some(existing) =
localhost_entries.insert(entry.name().to_string(), entry)
{
panic!(
"Unhandled scenario where a duplicate entry was found: {:?}",
existing
);
}
}
}
VfsEntry::File(_) | VfsEntry::Symlink(_) => {
new_entries.push(entry);
}
}
}
new_entries.push(VfsEntry::Dir(VirtualDirectory {
name: "localhost".to_string(),
entries: localhost_entries.into_iter().map(|(_, v)| v).collect(),
}));
// needs to be sorted by name
new_entries.sort_by(|a, b| a.name().cmp(b.name()));
root_dir.entries = new_entries;
});
builder.set_new_root_path(root_path.to_path_buf())?;
Ok(builder)
} }
} }
InnerCliNpmResolverRef::Byonm(_) => { InnerCliNpmResolverRef::Byonm(_) => {
maybe_warn_different_system(&self.npm_system_info); maybe_warn_different_system(&self.npm_system_info);
let mut builder = VfsBuilder::new(root_path.to_path_buf())?;
for pkg_json in self.cli_options.workspace().package_jsons() { for pkg_json in self.cli_options.workspace().package_jsons() {
builder.add_file_at_path(&pkg_json.path)?; builder.add_file_at_path(&pkg_json.path)?;
} }
@ -908,10 +875,102 @@ impl<'a> DenoCompileBinaryWriter<'a> {
} }
} }
} }
Ok(builder) Ok(())
} }
} }
} }
fn build_vfs_consolidating_global_npm_cache(
&self,
mut vfs: VfsBuilder,
) -> BuiltVfs {
match self.npm_resolver.as_inner() {
InnerCliNpmResolverRef::Managed(npm_resolver) => {
if npm_resolver.root_node_modules_path().is_some() {
return vfs.build();
}
let global_cache_root_path = npm_resolver.global_cache_root_path();
// Flatten all the registries folders into a single ".deno_compile_node_modules/localhost" folder
// that will be used by denort when loading the npm cache. This avoids us exposing
// the user's private registry information and means we don't have to bother
// serializing all the different registry config into the binary.
let Some(root_dir) = vfs.get_dir_mut(global_cache_root_path) else {
return vfs.build();
};
root_dir.name = DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME.to_string();
let mut new_entries = Vec::with_capacity(root_dir.entries.len());
let mut localhost_entries = IndexMap::new();
for entry in std::mem::take(&mut root_dir.entries) {
match entry {
VfsEntry::Dir(dir) => {
for entry in dir.entries {
log::debug!("Flattening {} into node_modules", entry.name());
if let Some(existing) =
localhost_entries.insert(entry.name().to_string(), entry)
{
panic!(
"Unhandled scenario where a duplicate entry was found: {:?}",
existing
);
}
}
}
VfsEntry::File(_) | VfsEntry::Symlink(_) => {
new_entries.push(entry);
}
}
}
new_entries.push(VfsEntry::Dir(VirtualDirectory {
name: "localhost".to_string(),
entries: localhost_entries.into_iter().map(|(_, v)| v).collect(),
}));
// needs to be sorted by name
new_entries.sort_by(|a, b| a.name().cmp(b.name()));
root_dir.entries = new_entries;
// it's better to not expose the user's cache directory, so take it out
// of there
let parent = global_cache_root_path.parent().unwrap();
let parent_dir = vfs.get_dir_mut(parent).unwrap();
let index = parent_dir
.entries
.iter()
.position(|entry| {
entry.name() == DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME
})
.unwrap();
let npm_global_cache_dir_entry = parent_dir.entries.remove(index);
// go up from the ancestors removing empty directories...
// this is not as optimized as it could be
let mut last_name =
Cow::Borrowed(DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME);
for ancestor in parent.ancestors() {
let dir = vfs.get_dir_mut(ancestor).unwrap();
if let Some(index) = dir
.entries
.iter()
.position(|entry| entry.name() == last_name)
{
dir.entries.remove(index);
}
last_name = Cow::Owned(dir.name.clone());
if !dir.entries.is_empty() {
break;
}
}
// now build the vfs and add the global cache dir entry there
let mut built_vfs = vfs.build();
built_vfs.root.insert_entry(npm_global_cache_dir_entry);
built_vfs
}
InnerCliNpmResolverRef::Byonm(_) => vfs.build(),
}
}
} }
fn get_denort_path(deno_exe: PathBuf) -> Option<OsString> { fn get_denort_path(deno_exe: PathBuf) -> Option<OsString> {

View file

@ -23,6 +23,7 @@ use deno_semver::package::PackageReq;
use crate::standalone::virtual_fs::VirtualDirectory; use crate::standalone::virtual_fs::VirtualDirectory;
use super::binary::Metadata; use super::binary::Metadata;
use super::virtual_fs::BuiltVfs;
use super::virtual_fs::VfsBuilder; use super::virtual_fs::VfsBuilder;
const MAGIC_BYTES: &[u8; 8] = b"d3n0l4nd"; const MAGIC_BYTES: &[u8; 8] = b"d3n0l4nd";
@ -39,7 +40,7 @@ pub fn serialize_binary_data_section(
metadata: &Metadata, metadata: &Metadata,
npm_snapshot: Option<SerializedNpmResolutionSnapshot>, npm_snapshot: Option<SerializedNpmResolutionSnapshot>,
remote_modules: &RemoteModulesStoreBuilder, remote_modules: &RemoteModulesStoreBuilder,
vfs: VfsBuilder, vfs: &BuiltVfs,
) -> Result<Vec<u8>, AnyError> { ) -> Result<Vec<u8>, AnyError> {
fn write_bytes_with_len(bytes: &mut Vec<u8>, data: &[u8]) { fn write_bytes_with_len(bytes: &mut Vec<u8>, data: &[u8]) {
bytes.extend_from_slice(&(data.len() as u64).to_le_bytes()); bytes.extend_from_slice(&(data.len() as u64).to_le_bytes());
@ -73,12 +74,11 @@ pub fn serialize_binary_data_section(
} }
// 4. VFS // 4. VFS
{ {
let (vfs, vfs_files) = vfs.into_dir_and_files(); let serialized_vfs = serde_json::to_string(&vfs.root)?;
let vfs = serde_json::to_string(&vfs)?; write_bytes_with_len(&mut bytes, serialized_vfs.as_bytes());
write_bytes_with_len(&mut bytes, vfs.as_bytes()); let vfs_bytes_len = vfs.files.iter().map(|f| f.len() as u64).sum::<u64>();
let vfs_bytes_len = vfs_files.iter().map(|f| f.len() as u64).sum::<u64>();
bytes.extend_from_slice(&vfs_bytes_len.to_le_bytes()); bytes.extend_from_slice(&vfs_bytes_len.to_le_bytes());
for file in &vfs_files { for file in &vfs.files {
bytes.extend_from_slice(file); bytes.extend_from_slice(file);
} }
} }

View file

@ -15,17 +15,21 @@ use std::rc::Rc;
use std::sync::Arc; use std::sync::Arc;
use deno_core::anyhow::anyhow; use deno_core::anyhow::anyhow;
use deno_core::anyhow::bail;
use deno_core::anyhow::Context; use deno_core::anyhow::Context;
use deno_core::error::AnyError; use deno_core::error::AnyError;
use deno_core::parking_lot::Mutex; use deno_core::parking_lot::Mutex;
use deno_core::BufMutView; use deno_core::BufMutView;
use deno_core::BufView; use deno_core::BufView;
use deno_core::ResourceHandleFd; use deno_core::ResourceHandleFd;
use deno_path_util::normalize_path;
use deno_path_util::strip_unc_prefix;
use deno_runtime::deno_fs::FsDirEntry; use deno_runtime::deno_fs::FsDirEntry;
use deno_runtime::deno_io; use deno_runtime::deno_io;
use deno_runtime::deno_io::fs::FsError; use deno_runtime::deno_io::fs::FsError;
use deno_runtime::deno_io::fs::FsResult; use deno_runtime::deno_io::fs::FsResult;
use deno_runtime::deno_io::fs::FsStat; use deno_runtime::deno_io::fs::FsStat;
use indexmap::IndexSet;
use serde::Deserialize; use serde::Deserialize;
use serde::Serialize; use serde::Serialize;
use thiserror::Error; use thiserror::Error;
@ -34,6 +38,38 @@ use crate::util;
use crate::util::display::DisplayTreeNode; use crate::util::display::DisplayTreeNode;
use crate::util::fs::canonicalize_path; use crate::util::fs::canonicalize_path;
use super::binary::DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME;
#[derive(Debug, PartialEq, Eq)]
pub enum WindowsSystemRootablePath {
/// The root of the system above any drive letters.
WindowSystemRoot,
Path(PathBuf),
}
impl WindowsSystemRootablePath {
pub fn join(&self, name_component: &str) -> PathBuf {
// this method doesn't handle multiple components
debug_assert!(!name_component.contains('\\'));
debug_assert!(!name_component.contains('/'));
match self {
WindowsSystemRootablePath::WindowSystemRoot => {
// windows drive letter
PathBuf::from(&format!("{}\\", name_component))
}
WindowsSystemRootablePath::Path(path) => path.join(name_component),
}
}
}
#[derive(Debug)]
pub struct BuiltVfs {
pub root_path: WindowsSystemRootablePath,
pub root: VirtualDirectory,
pub files: Vec<Vec<u8>>,
}
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub enum VfsFileSubDataKind { pub enum VfsFileSubDataKind {
/// Raw bytes of the file. /// Raw bytes of the file.
@ -43,84 +79,84 @@ pub enum VfsFileSubDataKind {
ModuleGraph, ModuleGraph,
} }
#[derive(Error, Debug)]
#[error(
"Failed to strip prefix '{}' from '{}'", root_path.display(), target.display()
)]
pub struct StripRootError {
root_path: PathBuf,
target: PathBuf,
}
#[derive(Debug)] #[derive(Debug)]
pub struct VfsBuilder { pub struct VfsBuilder {
root_path: PathBuf, executable_root: VirtualDirectory,
root_dir: VirtualDirectory,
files: Vec<Vec<u8>>, files: Vec<Vec<u8>>,
current_offset: u64, current_offset: u64,
file_offsets: HashMap<String, u64>, file_offsets: HashMap<String, u64>,
/// The minimum root directory that should be included in the VFS.
min_root_dir: Option<WindowsSystemRootablePath>,
} }
impl VfsBuilder { impl VfsBuilder {
pub fn new(root_path: PathBuf) -> Result<Self, AnyError> { pub fn new() -> Self {
let root_path = canonicalize_path(&root_path) Self {
.with_context(|| format!("Canonicalizing {}", root_path.display()))?; executable_root: VirtualDirectory {
log::debug!("Building vfs with root '{}'", root_path.display()); name: "/".to_string(),
Ok(Self {
root_dir: VirtualDirectory {
name: root_path
.file_stem()
.map(|s| s.to_string_lossy().into_owned())
.unwrap_or("root".to_string()),
entries: Vec::new(), entries: Vec::new(),
}, },
root_path,
files: Vec::new(), files: Vec::new(),
current_offset: 0, current_offset: 0,
file_offsets: Default::default(), file_offsets: Default::default(),
}) min_root_dir: Default::default(),
}
} }
pub fn set_new_root_path( /// Add a directory that might be the minimum root directory
&mut self, /// of the VFS.
root_path: PathBuf, ///
) -> Result<(), AnyError> { /// For example, say the user has a deno.json and specifies an
let root_path = canonicalize_path(&root_path)?; /// import map in a parent directory. The import map won't be
self.root_path = root_path; /// included in the VFS, but its base will meaning we need to
self.root_dir = VirtualDirectory { /// tell the VFS builder to include the base of the import map
name: self /// by calling this method.
.root_path pub fn add_possible_min_root_dir(&mut self, path: &Path) {
.file_stem() self.add_dir_raw(path);
.map(|s| s.to_string_lossy().into_owned())
.unwrap_or("root".to_string()),
entries: vec![VfsEntry::Dir(VirtualDirectory {
name: std::mem::take(&mut self.root_dir.name),
entries: std::mem::take(&mut self.root_dir.entries),
})],
};
Ok(())
}
pub fn with_root_dir<R>( match &self.min_root_dir {
&mut self, Some(WindowsSystemRootablePath::WindowSystemRoot) => {
with_root: impl FnOnce(&mut VirtualDirectory) -> R, // already the root dir
) -> R { }
with_root(&mut self.root_dir) Some(WindowsSystemRootablePath::Path(current_path)) => {
let mut common_components = Vec::new();
for (a, b) in current_path.components().zip(path.components()) {
if a != b {
break;
}
common_components.push(a);
}
if common_components.is_empty() {
if cfg!(windows) {
self.min_root_dir =
Some(WindowsSystemRootablePath::WindowSystemRoot);
} else {
self.min_root_dir =
Some(WindowsSystemRootablePath::Path(PathBuf::from("/")));
}
} else {
self.min_root_dir = Some(WindowsSystemRootablePath::Path(
common_components.iter().collect(),
));
}
}
None => {
self.min_root_dir =
Some(WindowsSystemRootablePath::Path(path.to_path_buf()));
}
}
} }
pub fn add_dir_recursive(&mut self, path: &Path) -> Result<(), AnyError> { pub fn add_dir_recursive(&mut self, path: &Path) -> Result<(), AnyError> {
let target_path = canonicalize_path(path)?; let target_path = self.resolve_target_path(path)?;
if path != target_path { self.add_dir_recursive_not_symlink(&target_path)
self.add_symlink(path, &target_path)?;
}
self.add_dir_recursive_internal(&target_path)
} }
fn add_dir_recursive_internal( fn add_dir_recursive_not_symlink(
&mut self, &mut self,
path: &Path, path: &Path,
) -> Result<(), AnyError> { ) -> Result<(), AnyError> {
self.add_dir(path)?; self.add_dir_raw(path);
let read_dir = std::fs::read_dir(path) let read_dir = std::fs::read_dir(path)
.with_context(|| format!("Reading {}", path.display()))?; .with_context(|| format!("Reading {}", path.display()))?;
@ -133,49 +169,26 @@ impl VfsBuilder {
let path = entry.path(); let path = entry.path();
if file_type.is_dir() { if file_type.is_dir() {
self.add_dir_recursive_internal(&path)?; self.add_dir_recursive_not_symlink(&path)?;
} else if file_type.is_file() { } else if file_type.is_file() {
self.add_file_at_path_not_symlink(&path)?; self.add_file_at_path_not_symlink(&path)?;
} else if file_type.is_symlink() { } else if file_type.is_symlink() {
match util::fs::canonicalize_path(&path) { match self.add_symlink(&path) {
Ok(target) => { Ok(target) => match target {
if let Err(StripRootError { .. }) = self.add_symlink(&path, &target) SymlinkTarget::File(target) => {
{ self.add_file_at_path_not_symlink(&target)?
if target.is_file() {
// this may change behavior, so warn the user about it
log::warn!(
"{} Symlink target is outside '{}'. Inlining symlink at '{}' to '{}' as file.",
crate::colors::yellow("Warning"),
self.root_path.display(),
path.display(),
target.display(),
);
// inline the symlink and make the target file
let file_bytes = std::fs::read(&target)
.with_context(|| format!("Reading {}", path.display()))?;
self.add_file_with_data_inner(
&path,
file_bytes,
VfsFileSubDataKind::Raw,
)?;
} else {
log::warn!(
"{} Symlink target is outside '{}'. Excluding symlink at '{}' with target '{}'.",
crate::colors::yellow("Warning"),
self.root_path.display(),
path.display(),
target.display(),
);
}
} }
} SymlinkTarget::Dir(target) => {
self.add_dir_recursive_not_symlink(&target)?;
}
},
Err(err) => { Err(err) => {
log::warn!( log::warn!(
"{} Failed resolving symlink. Ignoring.\n Path: {}\n Message: {:#}", "{} Failed resolving symlink. Ignoring.\n Path: {}\n Message: {:#}",
crate::colors::yellow("Warning"), crate::colors::yellow("Warning"),
path.display(), path.display(),
err err
); );
} }
} }
} }
@ -184,15 +197,15 @@ impl VfsBuilder {
Ok(()) Ok(())
} }
fn add_dir( fn add_dir_raw(&mut self, path: &Path) -> &mut VirtualDirectory {
&mut self,
path: &Path,
) -> Result<&mut VirtualDirectory, StripRootError> {
log::debug!("Ensuring directory '{}'", path.display()); log::debug!("Ensuring directory '{}'", path.display());
let path = self.path_relative_root(path)?; debug_assert!(path.is_absolute());
let mut current_dir = &mut self.root_dir; let mut current_dir = &mut self.executable_root;
for component in path.components() { for component in path.components() {
if matches!(component, std::path::Component::RootDir) {
continue;
}
let name = component.as_os_str().to_string_lossy(); let name = component.as_os_str().to_string_lossy();
let index = match current_dir let index = match current_dir
.entries .entries
@ -218,15 +231,44 @@ impl VfsBuilder {
}; };
} }
Ok(current_dir) current_dir
}
pub fn get_system_root_dir_mut(&mut self) -> &mut VirtualDirectory {
&mut self.executable_root
}
pub fn get_dir_mut(&mut self, path: &Path) -> Option<&mut VirtualDirectory> {
debug_assert!(path.is_absolute());
let mut current_dir = &mut self.executable_root;
for component in path.components() {
if matches!(component, std::path::Component::RootDir) {
continue;
}
let name = component.as_os_str().to_string_lossy();
let index = match current_dir
.entries
.binary_search_by(|e| e.name().cmp(&name))
{
Ok(index) => index,
Err(_) => return None,
};
match &mut current_dir.entries[index] {
VfsEntry::Dir(dir) => {
current_dir = dir;
}
_ => unreachable!(),
};
}
Some(current_dir)
} }
pub fn add_file_at_path(&mut self, path: &Path) -> Result<(), AnyError> { pub fn add_file_at_path(&mut self, path: &Path) -> Result<(), AnyError> {
let target_path = canonicalize_path(path)?; let file_bytes = std::fs::read(path)
if target_path != path { .with_context(|| format!("Reading {}", path.display()))?;
self.add_symlink(path, &target_path)?; self.add_file_with_data(path, file_bytes, VfsFileSubDataKind::Raw)
}
self.add_file_at_path_not_symlink(&target_path)
} }
fn add_file_at_path_not_symlink( fn add_file_at_path_not_symlink(
@ -244,11 +286,15 @@ impl VfsBuilder {
data: Vec<u8>, data: Vec<u8>,
sub_data_kind: VfsFileSubDataKind, sub_data_kind: VfsFileSubDataKind,
) -> Result<(), AnyError> { ) -> Result<(), AnyError> {
let target_path = canonicalize_path(path)?; let metadata = std::fs::symlink_metadata(path).with_context(|| {
if target_path != path { format!("Resolving target path for '{}'", path.display())
self.add_symlink(path, &target_path)?; })?;
if metadata.is_symlink() {
let target = self.add_symlink(path)?.into_path_buf();
self.add_file_with_data_inner(&target, data, sub_data_kind)
} else {
self.add_file_with_data_inner(path, data, sub_data_kind)
} }
self.add_file_with_data_inner(&target_path, data, sub_data_kind)
} }
fn add_file_with_data_inner( fn add_file_with_data_inner(
@ -267,7 +313,7 @@ impl VfsBuilder {
self.current_offset self.current_offset
}; };
let dir = self.add_dir(path.parent().unwrap())?; let dir = self.add_dir_raw(path.parent().unwrap());
let name = path.file_name().unwrap().to_string_lossy(); let name = path.file_name().unwrap().to_string_lossy();
let offset_and_len = OffsetWithLength { let offset_and_len = OffsetWithLength {
offset, offset,
@ -309,74 +355,162 @@ impl VfsBuilder {
Ok(()) Ok(())
} }
fn add_symlink( fn resolve_target_path(&mut self, path: &Path) -> Result<PathBuf, AnyError> {
let metadata = std::fs::symlink_metadata(path).with_context(|| {
format!("Resolving target path for '{}'", path.display())
})?;
if metadata.is_symlink() {
Ok(self.add_symlink(path)?.into_path_buf())
} else {
Ok(path.to_path_buf())
}
}
fn add_symlink(&mut self, path: &Path) -> Result<SymlinkTarget, AnyError> {
self.add_symlink_inner(path, &mut IndexSet::new())
}
fn add_symlink_inner(
&mut self, &mut self,
path: &Path, path: &Path,
target: &Path, visited: &mut IndexSet<PathBuf>,
) -> Result<(), StripRootError> { ) -> Result<SymlinkTarget, AnyError> {
log::debug!( log::debug!("Adding symlink '{}'", path.display());
"Adding symlink '{}' to '{}'", let target = strip_unc_prefix(
path.display(), std::fs::read_link(path)
target.display() .with_context(|| format!("Reading symlink '{}'", path.display()))?,
); );
let relative_target = self.path_relative_root(target)?; let target = normalize_path(path.parent().unwrap().join(&target));
let relative_path = match self.path_relative_root(path) { let dir = self.add_dir_raw(path.parent().unwrap());
Ok(path) => path,
Err(StripRootError { .. }) => {
// ignore if the original path is outside the root directory
return Ok(());
}
};
if relative_target == relative_path {
// it's the same, ignore
return Ok(());
}
let dir = self.add_dir(path.parent().unwrap())?;
let name = path.file_name().unwrap().to_string_lossy(); let name = path.file_name().unwrap().to_string_lossy();
match dir.entries.binary_search_by(|e| e.name().cmp(&name)) { match dir.entries.binary_search_by(|e| e.name().cmp(&name)) {
Ok(_) => Ok(()), // previously inserted Ok(_) => {} // previously inserted
Err(insert_index) => { Err(insert_index) => {
dir.entries.insert( dir.entries.insert(
insert_index, insert_index,
VfsEntry::Symlink(VirtualSymlink { VfsEntry::Symlink(VirtualSymlink {
name: name.to_string(), name: name.to_string(),
dest_parts: relative_target dest_parts: VirtualSymlinkParts::from_path(&target),
.components()
.map(|c| c.as_os_str().to_string_lossy().to_string())
.collect::<Vec<_>>(),
}), }),
); );
Ok(())
} }
} }
let target_metadata =
std::fs::symlink_metadata(&target).with_context(|| {
format!("Reading symlink target '{}'", target.display())
})?;
if target_metadata.is_symlink() {
if !visited.insert(target.clone()) {
// todo: probably don't error in this scenario
bail!(
"Circular symlink detected: {} -> {}",
visited
.iter()
.map(|p| p.display().to_string())
.collect::<Vec<_>>()
.join(" -> "),
target.display()
);
}
self.add_symlink_inner(&target, visited)
} else if target_metadata.is_dir() {
Ok(SymlinkTarget::Dir(target))
} else {
Ok(SymlinkTarget::File(target))
}
} }
pub fn into_dir_and_files(self) -> (VirtualDirectory, Vec<Vec<u8>>) { pub fn build(self) -> BuiltVfs {
(self.root_dir, self.files) fn strip_prefix_from_symlinks(
} dir: &mut VirtualDirectory,
parts: &[String],
) {
for entry in &mut dir.entries {
match entry {
VfsEntry::Dir(dir) => {
strip_prefix_from_symlinks(dir, parts);
}
VfsEntry::File(_) => {}
VfsEntry::Symlink(symlink) => {
let old_parts = std::mem::take(&mut symlink.dest_parts.0);
symlink.dest_parts.0 =
old_parts.into_iter().skip(parts.len()).collect();
}
}
}
}
fn path_relative_root(&self, path: &Path) -> Result<PathBuf, StripRootError> { let mut current_dir = self.executable_root;
match path.strip_prefix(&self.root_path) { let mut current_path = if cfg!(windows) {
Ok(p) => Ok(p.to_path_buf()), WindowsSystemRootablePath::WindowSystemRoot
Err(_) => Err(StripRootError { } else {
root_path: self.root_path.clone(), WindowsSystemRootablePath::Path(PathBuf::from("/"))
target: path.to_path_buf(), };
}), loop {
if current_dir.entries.len() != 1 {
break;
}
if self.min_root_dir.as_ref() == Some(&current_path) {
break;
}
match &current_dir.entries[0] {
VfsEntry::Dir(dir) => {
if dir.name == DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME {
// special directory we want to maintain
break;
}
match current_dir.entries.remove(0) {
VfsEntry::Dir(dir) => {
current_path =
WindowsSystemRootablePath::Path(current_path.join(&dir.name));
current_dir = dir;
}
_ => unreachable!(),
};
}
VfsEntry::File(_) | VfsEntry::Symlink(_) => break,
}
}
if let WindowsSystemRootablePath::Path(path) = &current_path {
strip_prefix_from_symlinks(
&mut current_dir,
&VirtualSymlinkParts::from_path(path).0,
);
}
BuiltVfs {
root_path: current_path,
root: current_dir,
files: self.files,
} }
} }
} }
pub fn output_vfs(builder: &VfsBuilder, executable_name: &str) { #[derive(Debug)]
enum SymlinkTarget {
File(PathBuf),
Dir(PathBuf),
}
impl SymlinkTarget {
pub fn into_path_buf(self) -> PathBuf {
match self {
Self::File(path) => path,
Self::Dir(path) => path,
}
}
}
pub fn output_vfs(vfs: &BuiltVfs, executable_name: &str) {
if !log::log_enabled!(log::Level::Info) { if !log::log_enabled!(log::Level::Info) {
return; // no need to compute if won't output return; // no need to compute if won't output
} }
if builder.root_dir.entries.is_empty() { if vfs.root.entries.is_empty() {
return; // nothing to output return; // nothing to output
} }
let mut text = String::new(); let mut text = String::new();
let display_tree = vfs_as_display_tree(builder, executable_name); let display_tree = vfs_as_display_tree(vfs, executable_name);
display_tree.print(&mut text).unwrap(); // unwrap ok because it's writing to a string display_tree.print(&mut text).unwrap(); // unwrap ok because it's writing to a string
log::info!( log::info!(
"\n{}\n", "\n{}\n",
@ -386,7 +520,7 @@ pub fn output_vfs(builder: &VfsBuilder, executable_name: &str) {
} }
fn vfs_as_display_tree( fn vfs_as_display_tree(
builder: &VfsBuilder, vfs: &BuiltVfs,
executable_name: &str, executable_name: &str,
) -> DisplayTreeNode { ) -> DisplayTreeNode {
enum EntryOutput<'a> { enum EntryOutput<'a> {
@ -398,20 +532,38 @@ fn vfs_as_display_tree(
impl<'a> EntryOutput<'a> { impl<'a> EntryOutput<'a> {
pub fn as_display_tree(&self, name: String) -> DisplayTreeNode { pub fn as_display_tree(&self, name: String) -> DisplayTreeNode {
let mut children = match self {
EntryOutput::Subset(vec) => vec
.iter()
.map(|e| e.output.as_display_tree(e.name.to_string()))
.collect(),
EntryOutput::All | EntryOutput::File | EntryOutput::Symlink(_) => {
vec![]
}
};
// we only want to collapse leafs so that nodes of the
// same depth have the same indentation
let collapse_single_child =
children.len() == 1 && children[0].children.is_empty();
DisplayTreeNode { DisplayTreeNode {
text: match self { text: match self {
EntryOutput::All | EntryOutput::Subset(_) | EntryOutput::File => name, EntryOutput::All => format!("{}/*", name),
EntryOutput::Subset(_) => {
if collapse_single_child {
format!("{}/{}", name, children[0].text)
} else {
name
}
}
EntryOutput::File => name,
EntryOutput::Symlink(parts) => { EntryOutput::Symlink(parts) => {
format!("{} --> {}", name, parts.join("/")) format!("{} --> {}", name, parts.join("/"))
} }
}, },
children: match self { children: if collapse_single_child {
EntryOutput::All => vec![DisplayTreeNode::from_text("*".to_string())], children.remove(0).children
EntryOutput::Subset(vec) => vec } else {
.iter() children
.map(|e| e.output.as_display_tree(e.name.to_string()))
.collect(),
EntryOutput::File | EntryOutput::Symlink(_) => vec![],
}, },
} }
} }
@ -422,37 +574,81 @@ fn vfs_as_display_tree(
output: EntryOutput<'a>, output: EntryOutput<'a>,
} }
fn include_all_entries<'a>( fn show_global_node_modules_dir(
dir: &Path, vfs_dir: &VirtualDirectory,
vfs_dir: &'a VirtualDirectory, ) -> Vec<DirEntryOutput> {
) -> EntryOutput<'a> { fn show_subset_deep(
EntryOutput::Subset( vfs_dir: &VirtualDirectory,
depth: usize,
) -> EntryOutput {
if depth == 0 {
EntryOutput::All
} else {
EntryOutput::Subset(show_subset(vfs_dir, depth))
}
}
fn show_subset(
vfs_dir: &VirtualDirectory,
depth: usize,
) -> Vec<DirEntryOutput> {
vfs_dir vfs_dir
.entries .entries
.iter() .iter()
.map(|entry| DirEntryOutput { .map(|entry| DirEntryOutput {
name: entry.name(), name: entry.name(),
output: analyze_entry(&dir.join(entry.name()), entry), output: match entry {
VfsEntry::Dir(virtual_directory) => {
show_subset_deep(virtual_directory, depth - 1)
}
VfsEntry::File(_) => EntryOutput::File,
VfsEntry::Symlink(virtual_symlink) => {
EntryOutput::Symlink(&virtual_symlink.dest_parts.0)
}
},
}) })
.collect(), .collect()
) }
// in this scenario, we want to show
// .deno_compile_node_modules/localhost/<package_name>/<version>/*
show_subset(vfs_dir, 3)
} }
fn analyze_entry<'a>(path: &Path, entry: &'a VfsEntry) -> EntryOutput<'a> { fn include_all_entries<'a>(
dir_path: &WindowsSystemRootablePath,
vfs_dir: &'a VirtualDirectory,
) -> Vec<DirEntryOutput<'a>> {
if vfs_dir.name == DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME {
return show_global_node_modules_dir(vfs_dir);
}
vfs_dir
.entries
.iter()
.map(|entry| DirEntryOutput {
name: entry.name(),
output: analyze_entry(dir_path.join(entry.name()), entry),
})
.collect()
}
fn analyze_entry(path: PathBuf, entry: &VfsEntry) -> EntryOutput {
match entry { match entry {
VfsEntry::Dir(virtual_directory) => analyze_dir(path, virtual_directory), VfsEntry::Dir(virtual_directory) => analyze_dir(path, virtual_directory),
VfsEntry::File(_) => EntryOutput::File, VfsEntry::File(_) => EntryOutput::File,
VfsEntry::Symlink(virtual_symlink) => { VfsEntry::Symlink(virtual_symlink) => {
EntryOutput::Symlink(&virtual_symlink.dest_parts) EntryOutput::Symlink(&virtual_symlink.dest_parts.0)
} }
} }
} }
fn analyze_dir<'a>( fn analyze_dir(dir: PathBuf, vfs_dir: &VirtualDirectory) -> EntryOutput {
dir: &Path, if vfs_dir.name == DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME {
vfs_dir: &'a VirtualDirectory, return EntryOutput::Subset(show_global_node_modules_dir(vfs_dir));
) -> EntryOutput<'a> { }
let real_entry_count = std::fs::read_dir(dir)
let real_entry_count = std::fs::read_dir(&dir)
.ok() .ok()
.map(|entries| entries.flat_map(|e| e.ok()).count()) .map(|entries| entries.flat_map(|e| e.ok()).count())
.unwrap_or(0); .unwrap_or(0);
@ -462,7 +658,7 @@ fn vfs_as_display_tree(
.iter() .iter()
.map(|entry| DirEntryOutput { .map(|entry| DirEntryOutput {
name: entry.name(), name: entry.name(),
output: analyze_entry(&dir.join(entry.name()), entry), output: analyze_entry(dir.join(entry.name()), entry),
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
if children if children
@ -474,15 +670,23 @@ fn vfs_as_display_tree(
EntryOutput::Subset(children) EntryOutput::Subset(children)
} }
} else { } else {
include_all_entries(dir, vfs_dir) EntryOutput::Subset(include_all_entries(
&WindowsSystemRootablePath::Path(dir),
vfs_dir,
))
} }
} }
// always include all the entries for the root directory, otherwise the // always include all the entries for the root directory, otherwise the
// user might not have context about what's being shown // user might not have context about what's being shown
let output = include_all_entries(&builder.root_path, &builder.root_dir); let child_entries = include_all_entries(&vfs.root_path, &vfs.root);
output DisplayTreeNode {
.as_display_tree(deno_terminal::colors::italic(executable_name).to_string()) text: deno_terminal::colors::italic(executable_name).to_string(),
children: child_entries
.iter()
.map(|entry| entry.output.as_display_tree(entry.name.to_string()))
.collect(),
}
} }
#[derive(Debug)] #[derive(Debug)]
@ -603,6 +807,20 @@ pub struct VirtualDirectory {
pub entries: Vec<VfsEntry>, pub entries: Vec<VfsEntry>,
} }
impl VirtualDirectory {
pub fn insert_entry(&mut self, entry: VfsEntry) {
let name = entry.name();
match self.entries.binary_search_by(|e| e.name().cmp(name)) {
Ok(index) => {
self.entries[index] = entry;
}
Err(insert_index) => {
self.entries.insert(insert_index, entry);
}
}
}
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)] #[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub struct OffsetWithLength { pub struct OffsetWithLength {
#[serde(rename = "o")] #[serde(rename = "o")]
@ -626,18 +844,33 @@ pub struct VirtualFile {
pub module_graph_offset: OffsetWithLength, pub module_graph_offset: OffsetWithLength,
} }
#[derive(Debug, Serialize, Deserialize)]
pub struct VirtualSymlinkParts(Vec<String>);
impl VirtualSymlinkParts {
pub fn from_path(path: &Path) -> Self {
Self(
path
.components()
.filter(|c| !matches!(c, std::path::Component::RootDir))
.map(|c| c.as_os_str().to_string_lossy().to_string())
.collect(),
)
}
}
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub struct VirtualSymlink { pub struct VirtualSymlink {
#[serde(rename = "n")] #[serde(rename = "n")]
pub name: String, pub name: String,
#[serde(rename = "p")] #[serde(rename = "p")]
pub dest_parts: Vec<String>, pub dest_parts: VirtualSymlinkParts,
} }
impl VirtualSymlink { impl VirtualSymlink {
pub fn resolve_dest_from_root(&self, root: &Path) -> PathBuf { pub fn resolve_dest_from_root(&self, root: &Path) -> PathBuf {
let mut dest = root.to_path_buf(); let mut dest = root.to_path_buf();
for part in &self.dest_parts { for part in &self.dest_parts.0 {
dest.push(part); dest.push(part);
} }
dest dest
@ -709,10 +942,10 @@ impl VfsRoot {
let mut final_path = self.root_path.clone(); let mut final_path = self.root_path.clone();
let mut current_entry = VfsEntryRef::Dir(&self.dir); let mut current_entry = VfsEntryRef::Dir(&self.dir);
for component in relative_path.components() { for component in relative_path.components() {
let component = component.as_os_str().to_string_lossy(); let component = component.as_os_str();
let current_dir = match current_entry { let current_dir = match current_entry {
VfsEntryRef::Dir(dir) => { VfsEntryRef::Dir(dir) => {
final_path.push(component.as_ref()); final_path.push(component);
dir dir
} }
VfsEntryRef::Symlink(symlink) => { VfsEntryRef::Symlink(symlink) => {
@ -721,7 +954,7 @@ impl VfsRoot {
final_path = resolved_path; // overwrite with the new resolved path final_path = resolved_path; // overwrite with the new resolved path
match entry { match entry {
VfsEntryRef::Dir(dir) => { VfsEntryRef::Dir(dir) => {
final_path.push(component.as_ref()); final_path.push(component);
dir dir
} }
_ => { _ => {
@ -739,6 +972,7 @@ impl VfsRoot {
)); ));
} }
}; };
let component = component.to_string_lossy();
match current_dir match current_dir
.entries .entries
.binary_search_by(|e| e.name().cmp(&component)) .binary_search_by(|e| e.name().cmp(&component))
@ -1136,6 +1370,7 @@ impl FileBackedVfs {
mod test { mod test {
use console_static_text::ansi::strip_ansi_codes; use console_static_text::ansi::strip_ansi_codes;
use std::io::Write; use std::io::Write;
use test_util::assert_contains;
use test_util::TempDir; use test_util::TempDir;
use super::*; use super::*;
@ -1159,8 +1394,11 @@ mod test {
// will canonicalize the root path // will canonicalize the root path
let src_path = temp_dir.path().canonicalize().join("src"); let src_path = temp_dir.path().canonicalize().join("src");
src_path.create_dir_all(); src_path.create_dir_all();
src_path.join("sub_dir").create_dir_all();
src_path.join("e.txt").write("e");
src_path.symlink_file("e.txt", "sub_dir/e.txt");
let src_path = src_path.to_path_buf(); let src_path = src_path.to_path_buf();
let mut builder = VfsBuilder::new(src_path.clone()).unwrap(); let mut builder = VfsBuilder::new();
builder builder
.add_file_with_data_inner( .add_file_with_data_inner(
&src_path.join("a.txt"), &src_path.join("a.txt"),
@ -1190,18 +1428,9 @@ mod test {
VfsFileSubDataKind::Raw, VfsFileSubDataKind::Raw,
) )
.unwrap(); .unwrap();
builder.add_file_at_path(&src_path.join("e.txt")).unwrap();
builder builder
.add_file_with_data_inner( .add_symlink(&src_path.join("sub_dir").join("e.txt"))
&src_path.join("e.txt"),
"e".into(),
VfsFileSubDataKind::Raw,
)
.unwrap();
builder
.add_symlink(
&src_path.join("sub_dir").join("e.txt"),
&src_path.join("e.txt"),
)
.unwrap(); .unwrap();
// get the virtual fs // get the virtual fs
@ -1262,7 +1491,7 @@ mod test {
// build and create the virtual fs // build and create the virtual fs
let src_path = temp_dir_path.join("src").to_path_buf(); let src_path = temp_dir_path.join("src").to_path_buf();
let mut builder = VfsBuilder::new(src_path.clone()).unwrap(); let mut builder = VfsBuilder::new();
builder.add_dir_recursive(&src_path).unwrap(); builder.add_dir_recursive(&src_path).unwrap();
let (dest_path, virtual_fs) = into_virtual_fs(builder, &temp_dir); let (dest_path, virtual_fs) = into_virtual_fs(builder, &temp_dir);
@ -1300,10 +1529,10 @@ mod test {
temp_dir: &TempDir, temp_dir: &TempDir,
) -> (PathBuf, FileBackedVfs) { ) -> (PathBuf, FileBackedVfs) {
let virtual_fs_file = temp_dir.path().join("virtual_fs"); let virtual_fs_file = temp_dir.path().join("virtual_fs");
let (root_dir, files) = builder.into_dir_and_files(); let vfs = builder.build();
{ {
let mut file = std::fs::File::create(&virtual_fs_file).unwrap(); let mut file = std::fs::File::create(&virtual_fs_file).unwrap();
for file_data in &files { for file_data in &vfs.files {
file.write_all(file_data).unwrap(); file.write_all(file_data).unwrap();
} }
} }
@ -1314,7 +1543,7 @@ mod test {
FileBackedVfs::new( FileBackedVfs::new(
Cow::Owned(data), Cow::Owned(data),
VfsRoot { VfsRoot {
dir: root_dir, dir: vfs.root,
root_path: dest_path.to_path_buf(), root_path: dest_path.to_path_buf(),
start_file_offset: 0, start_file_offset: 0,
}, },
@ -1327,41 +1556,22 @@ mod test {
let temp_dir = TempDir::new(); let temp_dir = TempDir::new();
let src_path = temp_dir.path().canonicalize().join("src"); let src_path = temp_dir.path().canonicalize().join("src");
src_path.create_dir_all(); src_path.create_dir_all();
src_path.symlink_file("a.txt", "b.txt");
src_path.symlink_file("b.txt", "c.txt");
src_path.symlink_file("c.txt", "a.txt");
let src_path = src_path.to_path_buf(); let src_path = src_path.to_path_buf();
let mut builder = VfsBuilder::new(src_path.clone()).unwrap(); let mut builder = VfsBuilder::new();
builder let err = builder
.add_symlink(&src_path.join("a.txt"), &src_path.join("b.txt")) .add_symlink(src_path.join("a.txt").as_path())
.unwrap(); .unwrap_err();
builder assert_contains!(err.to_string(), "Circular symlink detected",);
.add_symlink(&src_path.join("b.txt"), &src_path.join("c.txt"))
.unwrap();
builder
.add_symlink(&src_path.join("c.txt"), &src_path.join("a.txt"))
.unwrap();
let (dest_path, virtual_fs) = into_virtual_fs(builder, &temp_dir);
assert_eq!(
virtual_fs
.file_entry(&dest_path.join("a.txt"))
.err()
.unwrap()
.to_string(),
"circular symlinks",
);
assert_eq!(
virtual_fs.read_link(&dest_path.join("a.txt")).unwrap(),
dest_path.join("b.txt")
);
assert_eq!(
virtual_fs.read_link(&dest_path.join("b.txt")).unwrap(),
dest_path.join("c.txt")
);
} }
#[tokio::test] #[tokio::test]
async fn test_open_file() { async fn test_open_file() {
let temp_dir = TempDir::new(); let temp_dir = TempDir::new();
let temp_path = temp_dir.path().canonicalize(); let temp_path = temp_dir.path().canonicalize();
let mut builder = VfsBuilder::new(temp_path.to_path_buf()).unwrap(); let mut builder = VfsBuilder::new();
builder builder
.add_file_with_data_inner( .add_file_with_data_inner(
temp_path.join("a.txt").as_path(), temp_path.join("a.txt").as_path(),
@ -1436,8 +1646,7 @@ mod test {
temp_dir.write("c/a.txt", "contents"); temp_dir.write("c/a.txt", "contents");
temp_dir.symlink_file("c/a.txt", "c/b.txt"); temp_dir.symlink_file("c/a.txt", "c/b.txt");
assert_eq!(temp_dir.read_to_string("c/b.txt"), "contents"); // ensure the symlink works assert_eq!(temp_dir.read_to_string("c/b.txt"), "contents"); // ensure the symlink works
let mut vfs_builder = let mut vfs_builder = VfsBuilder::new();
VfsBuilder::new(temp_dir.path().to_path_buf()).unwrap();
// full dir // full dir
vfs_builder vfs_builder
.add_dir_recursive(temp_dir.path().join("a").as_path()) .add_dir_recursive(temp_dir.path().join("a").as_path())
@ -1451,16 +1660,14 @@ mod test {
.add_dir_recursive(temp_dir.path().join("c").as_path()) .add_dir_recursive(temp_dir.path().join("c").as_path())
.unwrap(); .unwrap();
temp_dir.write("c/c.txt", ""); // write an extra file so it shows the whole directory temp_dir.write("c/c.txt", ""); // write an extra file so it shows the whole directory
let node = vfs_as_display_tree(&vfs_builder, "executable"); let node = vfs_as_display_tree(&vfs_builder.build(), "executable");
let mut text = String::new(); let mut text = String::new();
node.print(&mut text).unwrap(); node.print(&mut text).unwrap();
assert_eq!( assert_eq!(
strip_ansi_codes(&text), strip_ansi_codes(&text),
r#"executable r#"executable
a a/*
* b/a.txt
b
a.txt
c c
a.txt a.txt
b.txt --> c/a.txt b.txt --> c/a.txt

View file

@ -5,7 +5,6 @@ use crate::args::CompileFlags;
use crate::args::Flags; use crate::args::Flags;
use crate::factory::CliFactory; use crate::factory::CliFactory;
use crate::http_util::HttpClientProvider; use crate::http_util::HttpClientProvider;
use crate::standalone::binary::StandaloneRelativeFileBaseUrl;
use crate::standalone::binary::WriteBinOptions; use crate::standalone::binary::WriteBinOptions;
use crate::standalone::is_standalone_binary; use crate::standalone::is_standalone_binary;
use deno_ast::MediaType; use deno_ast::MediaType;
@ -17,8 +16,11 @@ use deno_core::error::AnyError;
use deno_core::resolve_url_or_path; use deno_core::resolve_url_or_path;
use deno_graph::GraphKind; use deno_graph::GraphKind;
use deno_path_util::url_from_file_path; use deno_path_util::url_from_file_path;
use deno_path_util::url_to_file_path;
use deno_terminal::colors; use deno_terminal::colors;
use rand::Rng; use rand::Rng;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::path::Path; use std::path::Path;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
@ -84,29 +86,6 @@ pub async fn compile(
let ts_config_for_emit = cli_options let ts_config_for_emit = cli_options
.resolve_ts_config_for_emit(deno_config::deno_json::TsConfigType::Emit)?; .resolve_ts_config_for_emit(deno_config::deno_json::TsConfigType::Emit)?;
check_warn_tsconfig(&ts_config_for_emit); check_warn_tsconfig(&ts_config_for_emit);
let root_dir_url = resolve_root_dir_from_specifiers(
cli_options.workspace().root_dir(),
graph
.specifiers()
.map(|(s, _)| s)
.chain(
cli_options
.node_modules_dir_path()
.and_then(|p| ModuleSpecifier::from_directory_path(p).ok())
.iter(),
)
.chain(include_files.iter())
.chain(
// sometimes the import map path is outside the root dir
cli_options
.workspace()
.to_import_map_path()
.ok()
.and_then(|p| p.and_then(|p| url_from_file_path(&p).ok()))
.iter(),
),
);
log::debug!("Binary root dir: {}", root_dir_url);
log::info!( log::info!(
"{} {} to {}", "{} {} to {}",
colors::green("Compile"), colors::green("Compile"),
@ -138,7 +117,6 @@ pub async fn compile(
.unwrap() .unwrap()
.to_string_lossy(), .to_string_lossy(),
graph: &graph, graph: &graph,
root_dir_url: StandaloneRelativeFileBaseUrl::from(&root_dir_url),
entrypoint, entrypoint,
include_files: &include_files, include_files: &include_files,
compile_flags: &compile_flags, compile_flags: &compile_flags,
@ -261,15 +239,58 @@ fn get_module_roots_and_include_files(
} }
} }
let mut module_roots = Vec::with_capacity(compile_flags.include.len() + 1); fn analyze_path(
let mut include_files = Vec::with_capacity(compile_flags.include.len()); url: &ModuleSpecifier,
module_roots: &mut Vec<ModuleSpecifier>,
include_files: &mut Vec<ModuleSpecifier>,
searched_paths: &mut HashSet<PathBuf>,
) -> Result<(), AnyError> {
let Ok(path) = url_to_file_path(url) else {
return Ok(());
};
let mut pending = VecDeque::from([path]);
while let Some(path) = pending.pop_front() {
if !searched_paths.insert(path.clone()) {
continue;
}
if !path.is_dir() {
let url = url_from_file_path(&path)?;
include_files.push(url.clone());
if is_module_graph_module(&url) {
module_roots.push(url);
}
continue;
}
for entry in std::fs::read_dir(&path).with_context(|| {
format!("Failed reading directory '{}'", path.display())
})? {
let entry = entry.with_context(|| {
format!("Failed reading entry in directory '{}'", path.display())
})?;
pending.push_back(entry.path());
}
}
Ok(())
}
let mut searched_paths = HashSet::new();
let mut module_roots = Vec::new();
let mut include_files = Vec::new();
module_roots.push(entrypoint.clone()); module_roots.push(entrypoint.clone());
for side_module in &compile_flags.include { for side_module in &compile_flags.include {
let url = resolve_url_or_path(side_module, initial_cwd)?; let url = resolve_url_or_path(side_module, initial_cwd)?;
if is_module_graph_module(&url) { if is_module_graph_module(&url) {
module_roots.push(url); module_roots.push(url.clone());
if url.scheme() == "file" {
include_files.push(url);
}
} else { } else {
include_files.push(url); analyze_path(
&url,
&mut module_roots,
&mut include_files,
&mut searched_paths,
)?;
} }
} }
Ok((module_roots, include_files)) Ok((module_roots, include_files))
@ -335,57 +356,6 @@ fn get_os_specific_filepath(
} }
} }
fn resolve_root_dir_from_specifiers<'a>(
starting_dir: &ModuleSpecifier,
specifiers: impl Iterator<Item = &'a ModuleSpecifier>,
) -> ModuleSpecifier {
fn select_common_root<'a>(a: &'a str, b: &'a str) -> &'a str {
let min_length = a.len().min(b.len());
let mut last_slash = 0;
for i in 0..min_length {
if a.as_bytes()[i] == b.as_bytes()[i] && a.as_bytes()[i] == b'/' {
last_slash = i;
} else if a.as_bytes()[i] != b.as_bytes()[i] {
break;
}
}
// Return the common root path up to the last common slash.
// This returns a slice of the original string 'a', up to and including the last matching '/'.
let common = &a[..=last_slash];
if cfg!(windows) && common == "file:///" {
a
} else {
common
}
}
fn is_file_system_root(url: &str) -> bool {
let Some(path) = url.strip_prefix("file:///") else {
return false;
};
if cfg!(windows) {
let Some((_drive, path)) = path.split_once('/') else {
return true;
};
path.is_empty()
} else {
path.is_empty()
}
}
let mut found_dir = starting_dir.as_str();
if !is_file_system_root(found_dir) {
for specifier in specifiers {
if specifier.scheme() == "file" {
found_dir = select_common_root(found_dir, specifier.as_str());
}
}
}
ModuleSpecifier::parse(found_dir).unwrap()
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
pub use super::*; pub use super::*;
@ -462,41 +432,4 @@ mod test {
run_test("C:\\my-exe.0.1.2", Some("windows"), "C:\\my-exe.0.1.2.exe"); run_test("C:\\my-exe.0.1.2", Some("windows"), "C:\\my-exe.0.1.2.exe");
run_test("my-exe-0.1.2", Some("linux"), "my-exe-0.1.2"); run_test("my-exe-0.1.2", Some("linux"), "my-exe-0.1.2");
} }
#[test]
fn test_resolve_root_dir_from_specifiers() {
fn resolve(start: &str, specifiers: &[&str]) -> String {
let specifiers = specifiers
.iter()
.map(|s| ModuleSpecifier::parse(s).unwrap())
.collect::<Vec<_>>();
resolve_root_dir_from_specifiers(
&ModuleSpecifier::parse(start).unwrap(),
specifiers.iter(),
)
.to_string()
}
assert_eq!(
resolve("file:///a/b/e", &["file:///a/b/c/d"]),
"file:///a/b/"
);
assert_eq!(
resolve("file:///a/b/c/", &["file:///a/b/c/d"]),
"file:///a/b/c/"
);
assert_eq!(
resolve("file:///a/b/c/", &["file:///a/b/c/d", "file:///a/b/c/e"]),
"file:///a/b/c/"
);
assert_eq!(resolve("file:///", &["file:///a/b/c/d"]), "file:///");
if cfg!(windows) {
assert_eq!(resolve("file:///c:/", &["file:///c:/test"]), "file:///c:/");
// this will ignore the other one because it's on a separate drive
assert_eq!(
resolve("file:///c:/a/b/c/", &["file:///v:/a/b/c/d"]),
"file:///c:/a/b/c/"
);
}
}
} }

View file

@ -1,24 +1,34 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use super::draw_thread::DrawThread;
use deno_telemetry::OtelConfig;
use deno_telemetry::OtelConsoleConfig;
use std::io::Write; use std::io::Write;
use super::draw_thread::DrawThread; struct CliLogger {
otel_console_config: OtelConsoleConfig,
struct CliLogger(env_logger::Logger); logger: env_logger::Logger,
}
impl CliLogger { impl CliLogger {
pub fn new(logger: env_logger::Logger) -> Self { pub fn new(
Self(logger) logger: env_logger::Logger,
otel_console_config: OtelConsoleConfig,
) -> Self {
Self {
logger,
otel_console_config,
}
} }
pub fn filter(&self) -> log::LevelFilter { pub fn filter(&self) -> log::LevelFilter {
self.0.filter() self.logger.filter()
} }
} }
impl log::Log for CliLogger { impl log::Log for CliLogger {
fn enabled(&self, metadata: &log::Metadata) -> bool { fn enabled(&self, metadata: &log::Metadata) -> bool {
self.0.enabled(metadata) self.logger.enabled(metadata)
} }
fn log(&self, record: &log::Record) { fn log(&self, record: &log::Record) {
@ -28,18 +38,30 @@ impl log::Log for CliLogger {
// could potentially block other threads that access the draw // could potentially block other threads that access the draw
// thread's state // thread's state
DrawThread::hide(); DrawThread::hide();
self.0.log(record);
deno_telemetry::handle_log(record); match self.otel_console_config {
OtelConsoleConfig::Ignore => {
self.logger.log(record);
}
OtelConsoleConfig::Capture => {
self.logger.log(record);
deno_telemetry::handle_log(record);
}
OtelConsoleConfig::Replace => {
deno_telemetry::handle_log(record);
}
}
DrawThread::show(); DrawThread::show();
} }
} }
fn flush(&self) { fn flush(&self) {
self.0.flush(); self.logger.flush();
} }
} }
pub fn init(maybe_level: Option<log::Level>) { pub fn init(maybe_level: Option<log::Level>, otel_config: Option<OtelConfig>) {
let log_level = maybe_level.unwrap_or(log::Level::Info); let log_level = maybe_level.unwrap_or(log::Level::Info);
let logger = env_logger::Builder::from_env( let logger = env_logger::Builder::from_env(
env_logger::Env::new() env_logger::Env::new()
@ -93,7 +115,12 @@ pub fn init(maybe_level: Option<log::Level>) {
}) })
.build(); .build();
let cli_logger = CliLogger::new(logger); let cli_logger = CliLogger::new(
logger,
otel_config
.map(|c| c.console)
.unwrap_or(OtelConsoleConfig::Ignore),
);
let max_level = cli_logger.filter(); let max_level = cli_logger.filter();
let r = log::set_boxed_logger(Box::new(cli_logger)); let r = log::set_boxed_logger(Box::new(cli_logger));
if r.is_ok() { if r.is_ok() {

View file

@ -154,7 +154,7 @@ struct SharedWorkerState {
storage_key_resolver: StorageKeyResolver, storage_key_resolver: StorageKeyResolver,
options: CliMainWorkerOptions, options: CliMainWorkerOptions,
subcommand: DenoSubcommand, subcommand: DenoSubcommand,
otel_config: Option<OtelConfig>, // `None` means OpenTelemetry is disabled. otel_config: OtelConfig,
default_npm_caching_strategy: NpmCachingStrategy, default_npm_caching_strategy: NpmCachingStrategy,
} }
@ -426,7 +426,7 @@ impl CliMainWorkerFactory {
storage_key_resolver: StorageKeyResolver, storage_key_resolver: StorageKeyResolver,
subcommand: DenoSubcommand, subcommand: DenoSubcommand,
options: CliMainWorkerOptions, options: CliMainWorkerOptions,
otel_config: Option<OtelConfig>, otel_config: OtelConfig,
default_npm_caching_strategy: NpmCachingStrategy, default_npm_caching_strategy: NpmCachingStrategy,
) -> Self { ) -> Self {
Self { Self {

View file

@ -206,9 +206,6 @@ pub enum FetchError {
RequestBuilderHook(deno_core::error::AnyError), RequestBuilderHook(deno_core::error::AnyError),
#[error(transparent)] #[error(transparent)]
Io(#[from] std::io::Error), Io(#[from] std::io::Error),
// Only used for node upgrade
#[error(transparent)]
Hyper(#[from] hyper::Error),
} }
pub type CancelableResponseFuture = pub type CancelableResponseFuture =

View file

@ -364,9 +364,9 @@ deno_core::extension!(deno_node,
ops::zlib::brotli::op_create_brotli_decompress, ops::zlib::brotli::op_create_brotli_decompress,
ops::zlib::brotli::op_brotli_decompress_stream, ops::zlib::brotli::op_brotli_decompress_stream,
ops::zlib::brotli::op_brotli_decompress_stream_end, ops::zlib::brotli::op_brotli_decompress_stream_end,
ops::http::op_node_http_request<P>,
ops::http::op_node_http_fetch_response_upgrade, ops::http::op_node_http_fetch_response_upgrade,
ops::http::op_node_http_fetch_send, ops::http::op_node_http_request_with_conn<P>,
ops::http::op_node_http_await_response,
ops::http2::op_http2_connect, ops::http2::op_http2_connect,
ops::http2::op_http2_poll_client_connection, ops::http2::op_http2_poll_client_connection,
ops::http2::op_http2_client_request, ops::http2::op_http2_client_request,

View file

@ -2,18 +2,20 @@
use std::borrow::Cow; use std::borrow::Cow;
use std::cell::RefCell; use std::cell::RefCell;
use std::fmt::Debug;
use std::pin::Pin; use std::pin::Pin;
use std::rc::Rc; use std::rc::Rc;
use std::task::Context; use std::task::Context;
use std::task::Poll; use std::task::Poll;
use bytes::Bytes; use bytes::Bytes;
use deno_core::error::bad_resource;
use deno_core::error::type_error;
use deno_core::futures::stream::Peekable; use deno_core::futures::stream::Peekable;
use deno_core::futures::Future; use deno_core::futures::Future;
use deno_core::futures::FutureExt; use deno_core::futures::FutureExt;
use deno_core::futures::Stream; use deno_core::futures::Stream;
use deno_core::futures::StreamExt; use deno_core::futures::StreamExt;
use deno_core::futures::TryFutureExt;
use deno_core::op2; use deno_core::op2;
use deno_core::serde::Serialize; use deno_core::serde::Serialize;
use deno_core::unsync::spawn; use deno_core::unsync::spawn;
@ -25,17 +27,17 @@ use deno_core::ByteString;
use deno_core::CancelFuture; use deno_core::CancelFuture;
use deno_core::CancelHandle; use deno_core::CancelHandle;
use deno_core::CancelTryFuture; use deno_core::CancelTryFuture;
use deno_core::Canceled;
use deno_core::OpState; use deno_core::OpState;
use deno_core::RcRef; use deno_core::RcRef;
use deno_core::Resource; use deno_core::Resource;
use deno_core::ResourceId; use deno_core::ResourceId;
use deno_fetch::get_or_create_client_from_state;
use deno_fetch::FetchCancelHandle; use deno_fetch::FetchCancelHandle;
use deno_fetch::FetchError;
use deno_fetch::FetchRequestResource;
use deno_fetch::FetchReturn; use deno_fetch::FetchReturn;
use deno_fetch::HttpClientResource;
use deno_fetch::ResBody; use deno_fetch::ResBody;
use deno_net::io::TcpStreamResource;
use deno_net::ops_tls::TlsStreamResource;
use deno_permissions::PermissionCheckError;
use http::header::HeaderMap; use http::header::HeaderMap;
use http::header::HeaderName; use http::header::HeaderName;
use http::header::HeaderValue; use http::header::HeaderValue;
@ -44,41 +46,140 @@ use http::header::CONTENT_LENGTH;
use http::Method; use http::Method;
use http_body_util::BodyExt; use http_body_util::BodyExt;
use hyper::body::Frame; use hyper::body::Frame;
use hyper::body::Incoming;
use hyper_util::rt::TokioIo; use hyper_util::rt::TokioIo;
use std::cmp::min; use std::cmp::min;
use tokio::io::AsyncReadExt; use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt; use tokio::io::AsyncWriteExt;
#[op2(stack_trace)] #[derive(Default, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct NodeHttpResponse {
pub status: u16,
pub status_text: String,
pub headers: Vec<(ByteString, ByteString)>,
pub url: String,
pub response_rid: ResourceId,
pub content_length: Option<u64>,
pub remote_addr_ip: Option<String>,
pub remote_addr_port: Option<u16>,
pub error: Option<String>,
}
type CancelableResponseResult =
Result<Result<http::Response<Incoming>, hyper::Error>, Canceled>;
pub struct NodeHttpClientResponse {
response: Pin<Box<dyn Future<Output = CancelableResponseResult>>>,
url: String,
}
impl Debug for NodeHttpClientResponse {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("NodeHttpClientResponse")
.field("url", &self.url)
.finish()
}
}
impl deno_core::Resource for NodeHttpClientResponse {
fn name(&self) -> Cow<str> {
"nodeHttpClientResponse".into()
}
}
#[derive(Debug, thiserror::Error)]
pub enum ConnError {
#[error(transparent)]
Resource(deno_core::error::AnyError),
#[error(transparent)]
Permission(#[from] PermissionCheckError),
#[error("Invalid URL {0}")]
InvalidUrl(Url),
#[error(transparent)]
InvalidHeaderName(#[from] http::header::InvalidHeaderName),
#[error(transparent)]
InvalidHeaderValue(#[from] http::header::InvalidHeaderValue),
#[error(transparent)]
Url(#[from] url::ParseError),
#[error(transparent)]
Method(#[from] http::method::InvalidMethod),
#[error(transparent)]
Io(#[from] std::io::Error),
#[error("TLS stream is currently in use")]
TlsStreamBusy,
#[error("TCP stream is currently in use")]
TcpStreamBusy,
#[error(transparent)]
ReuniteTcp(#[from] tokio::net::tcp::ReuniteError),
#[error(transparent)]
Canceled(#[from] deno_core::Canceled),
#[error(transparent)]
Hyper(#[from] hyper::Error),
}
#[op2(async, stack_trace)]
#[serde] #[serde]
pub fn op_node_http_request<P>( pub async fn op_node_http_request_with_conn<P>(
state: &mut OpState, state: Rc<RefCell<OpState>>,
#[serde] method: ByteString, #[serde] method: ByteString,
#[string] url: String, #[string] url: String,
#[serde] headers: Vec<(ByteString, ByteString)>, #[serde] headers: Vec<(ByteString, ByteString)>,
#[smi] client_rid: Option<u32>,
#[smi] body: Option<ResourceId>, #[smi] body: Option<ResourceId>,
) -> Result<FetchReturn, FetchError> #[smi] conn_rid: ResourceId,
encrypted: bool,
) -> Result<FetchReturn, ConnError>
where where
P: crate::NodePermissions + 'static, P: crate::NodePermissions + 'static,
{ {
let client = if let Some(rid) = client_rid { let (_handle, mut sender) = if encrypted {
let r = state let resource_rc = state
.borrow_mut()
.resource_table .resource_table
.get::<HttpClientResource>(rid) .take::<TlsStreamResource>(conn_rid)
.map_err(FetchError::Resource)?; .map_err(ConnError::Resource)?;
r.client.clone() let resource =
Rc::try_unwrap(resource_rc).map_err(|_e| ConnError::TlsStreamBusy)?;
let (read_half, write_half) = resource.into_inner();
let tcp_stream = read_half.unsplit(write_half);
let io = TokioIo::new(tcp_stream);
let (sender, conn) = hyper::client::conn::http1::handshake(io).await?;
(
tokio::task::spawn(async move { conn.with_upgrades().await }),
sender,
)
} else { } else {
get_or_create_client_from_state(state)? let resource_rc = state
.borrow_mut()
.resource_table
.take::<TcpStreamResource>(conn_rid)
.map_err(ConnError::Resource)?;
let resource =
Rc::try_unwrap(resource_rc).map_err(|_| ConnError::TcpStreamBusy)?;
let (read_half, write_half) = resource.into_inner();
let tcp_stream = read_half.reunite(write_half)?;
let io = TokioIo::new(tcp_stream);
let (sender, conn) = hyper::client::conn::http1::handshake(io).await?;
// Spawn a task to poll the connection, driving the HTTP state
(
tokio::task::spawn(async move {
conn.with_upgrades().await?;
Ok::<_, _>(())
}),
sender,
)
}; };
// Create the request.
let method = Method::from_bytes(&method)?; let method = Method::from_bytes(&method)?;
let mut url = Url::parse(&url)?; let mut url_parsed = Url::parse(&url)?;
let maybe_authority = deno_fetch::extract_authority(&mut url); let maybe_authority = deno_fetch::extract_authority(&mut url_parsed);
{ {
let permissions = state.borrow_mut::<P>(); let mut state_ = state.borrow_mut();
permissions.check_net_url(&url, "ClientRequest")?; let permissions = state_.borrow_mut::<P>();
permissions.check_net_url(&url_parsed, "ClientRequest")?;
} }
let mut header_map = HeaderMap::new(); let mut header_map = HeaderMap::new();
@ -93,9 +194,10 @@ where
( (
BodyExt::boxed(NodeHttpResourceToBodyAdapter::new( BodyExt::boxed(NodeHttpResourceToBodyAdapter::new(
state state
.borrow_mut()
.resource_table .resource_table
.take_any(body) .take_any(body)
.map_err(FetchError::Resource)?, .map_err(ConnError::Resource)?,
)), )),
None, None,
) )
@ -117,10 +219,13 @@ where
let mut request = http::Request::new(body); let mut request = http::Request::new(body);
*request.method_mut() = method.clone(); *request.method_mut() = method.clone();
*request.uri_mut() = url let path = url_parsed.path();
.as_str() let query = url_parsed.query();
*request.uri_mut() = query
.map(|q| format!("{}?{}", path, q))
.unwrap_or_else(|| path.to_string())
.parse() .parse()
.map_err(|_| FetchError::InvalidUrl(url.clone()))?; .map_err(|_| ConnError::InvalidUrl(url_parsed.clone()))?;
*request.headers_mut() = header_map; *request.headers_mut() = header_map;
if let Some((username, password)) = maybe_authority { if let Some((username, password)) = maybe_authority {
@ -136,86 +241,44 @@ where
let cancel_handle = CancelHandle::new_rc(); let cancel_handle = CancelHandle::new_rc();
let cancel_handle_ = cancel_handle.clone(); let cancel_handle_ = cancel_handle.clone();
let fut = async move { let fut =
client async move { sender.send_request(request).or_cancel(cancel_handle_).await };
.send(request)
.map_err(Into::into)
.or_cancel(cancel_handle_)
.await
};
let request_rid = state.resource_table.add(FetchRequestResource { let rid = state
future: Box::pin(fut), .borrow_mut()
url, .resource_table
}); .add(NodeHttpClientResponse {
response: Box::pin(fut),
url: url.clone(),
});
let cancel_handle_rid = let cancel_handle_rid = state
state.resource_table.add(FetchCancelHandle(cancel_handle)); .borrow_mut()
.resource_table
.add(FetchCancelHandle(cancel_handle));
Ok(FetchReturn { Ok(FetchReturn {
request_rid, request_rid: rid,
cancel_handle_rid: Some(cancel_handle_rid), cancel_handle_rid: Some(cancel_handle_rid),
}) })
} }
#[derive(Default, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct NodeHttpFetchResponse {
pub status: u16,
pub status_text: String,
pub headers: Vec<(ByteString, ByteString)>,
pub url: String,
pub response_rid: ResourceId,
pub content_length: Option<u64>,
pub remote_addr_ip: Option<String>,
pub remote_addr_port: Option<u16>,
pub error: Option<String>,
}
#[op2(async)] #[op2(async)]
#[serde] #[serde]
pub async fn op_node_http_fetch_send( pub async fn op_node_http_await_response(
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
) -> Result<NodeHttpFetchResponse, FetchError> { ) -> Result<NodeHttpResponse, ConnError> {
let request = state let resource = state
.borrow_mut() .borrow_mut()
.resource_table .resource_table
.take::<FetchRequestResource>(rid) .take::<NodeHttpClientResponse>(rid)
.map_err(FetchError::Resource)?; .map_err(ConnError::Resource)?;
let resource = Rc::try_unwrap(resource)
let request = Rc::try_unwrap(request) .map_err(|_| ConnError::Resource(bad_resource("NodeHttpClientResponse")))?;
.ok()
.expect("multiple op_node_http_fetch_send ongoing");
let res = match request.future.await {
Ok(Ok(res)) => res,
Ok(Err(err)) => {
// We're going to try and rescue the error cause from a stream and return it from this fetch.
// If any error in the chain is a hyper body error, return that as a special result we can use to
// reconstruct an error chain (eg: `new TypeError(..., { cause: new Error(...) })`).
// TODO(mmastrac): it would be a lot easier if we just passed a v8::Global through here instead
if let FetchError::ClientSend(err_src) = &err {
if let Some(client_err) = std::error::Error::source(&err_src.source) {
if let Some(err_src) = client_err.downcast_ref::<hyper::Error>() {
if let Some(err_src) = std::error::Error::source(err_src) {
return Ok(NodeHttpFetchResponse {
error: Some(err_src.to_string()),
..Default::default()
});
}
}
}
}
return Err(err);
}
Err(_) => return Err(FetchError::RequestCanceled),
};
let res = resource.response.await??;
let status = res.status(); let status = res.status();
let url = request.url.into();
let mut res_headers = Vec::new(); let mut res_headers = Vec::new();
for (key, val) in res.headers().iter() { for (key, val) in res.headers().iter() {
res_headers.push((key.as_str().into(), val.as_bytes().into())); res_headers.push((key.as_str().into(), val.as_bytes().into()));
@ -232,16 +295,22 @@ pub async fn op_node_http_fetch_send(
(None, None) (None, None)
}; };
let (parts, body) = res.into_parts();
let body = body.map_err(deno_core::anyhow::Error::from);
let body = body.boxed();
let res = http::Response::from_parts(parts, body);
let response_rid = state let response_rid = state
.borrow_mut() .borrow_mut()
.resource_table .resource_table
.add(NodeHttpFetchResponseResource::new(res, content_length)); .add(NodeHttpResponseResource::new(res, content_length));
Ok(NodeHttpFetchResponse { Ok(NodeHttpResponse {
status: status.as_u16(), status: status.as_u16(),
status_text: status.canonical_reason().unwrap_or("").to_string(), status_text: status.canonical_reason().unwrap_or("").to_string(),
headers: res_headers, headers: res_headers,
url, url: resource.url,
response_rid, response_rid,
content_length, content_length,
remote_addr_ip, remote_addr_ip,
@ -255,12 +324,12 @@ pub async fn op_node_http_fetch_send(
pub async fn op_node_http_fetch_response_upgrade( pub async fn op_node_http_fetch_response_upgrade(
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
) -> Result<ResourceId, FetchError> { ) -> Result<ResourceId, ConnError> {
let raw_response = state let raw_response = state
.borrow_mut() .borrow_mut()
.resource_table .resource_table
.take::<NodeHttpFetchResponseResource>(rid) .take::<NodeHttpResponseResource>(rid)
.map_err(FetchError::Resource)?; .map_err(ConnError::Resource)?;
let raw_response = Rc::try_unwrap(raw_response) let raw_response = Rc::try_unwrap(raw_response)
.expect("Someone is holding onto NodeHttpFetchResponseResource"); .expect("Someone is holding onto NodeHttpFetchResponseResource");
@ -283,7 +352,7 @@ pub async fn op_node_http_fetch_response_upgrade(
} }
read_tx.write_all(&buf[..read]).await?; read_tx.write_all(&buf[..read]).await?;
} }
Ok::<_, FetchError>(()) Ok::<_, ConnError>(())
}); });
spawn(async move { spawn(async move {
let mut buf = [0; 1024]; let mut buf = [0; 1024];
@ -294,7 +363,7 @@ pub async fn op_node_http_fetch_response_upgrade(
} }
upgraded_tx.write_all(&buf[..read]).await?; upgraded_tx.write_all(&buf[..read]).await?;
} }
Ok::<_, FetchError>(()) Ok::<_, ConnError>(())
}); });
} }
@ -379,13 +448,13 @@ impl Default for NodeHttpFetchResponseReader {
} }
#[derive(Debug)] #[derive(Debug)]
pub struct NodeHttpFetchResponseResource { pub struct NodeHttpResponseResource {
pub response_reader: AsyncRefCell<NodeHttpFetchResponseReader>, pub response_reader: AsyncRefCell<NodeHttpFetchResponseReader>,
pub cancel: CancelHandle, pub cancel: CancelHandle,
pub size: Option<u64>, pub size: Option<u64>,
} }
impl NodeHttpFetchResponseResource { impl NodeHttpResponseResource {
pub fn new(response: http::Response<ResBody>, size: Option<u64>) -> Self { pub fn new(response: http::Response<ResBody>, size: Option<u64>) -> Self {
Self { Self {
response_reader: AsyncRefCell::new(NodeHttpFetchResponseReader::Start( response_reader: AsyncRefCell::new(NodeHttpFetchResponseReader::Start(
@ -400,14 +469,14 @@ impl NodeHttpFetchResponseResource {
let reader = self.response_reader.into_inner(); let reader = self.response_reader.into_inner();
match reader { match reader {
NodeHttpFetchResponseReader::Start(resp) => { NodeHttpFetchResponseReader::Start(resp) => {
Ok(hyper::upgrade::on(resp).await?) hyper::upgrade::on(resp).await
} }
_ => unreachable!(), _ => unreachable!(),
} }
} }
} }
impl Resource for NodeHttpFetchResponseResource { impl Resource for NodeHttpResponseResource {
fn name(&self) -> Cow<str> { fn name(&self) -> Cow<str> {
"fetchResponse".into() "fetchResponse".into()
} }
@ -454,9 +523,7 @@ impl Resource for NodeHttpFetchResponseResource {
// safely call `await` on it without creating a race condition. // safely call `await` on it without creating a race condition.
Some(_) => match reader.as_mut().next().await.unwrap() { Some(_) => match reader.as_mut().next().await.unwrap() {
Ok(chunk) => assert!(chunk.is_empty()), Ok(chunk) => assert!(chunk.is_empty()),
Err(err) => { Err(err) => break Err(type_error(err.to_string())),
break Err(deno_core::error::type_error(err.to_string()))
}
}, },
None => break Ok(BufView::empty()), None => break Ok(BufView::empty()),
} }
@ -464,7 +531,7 @@ impl Resource for NodeHttpFetchResponseResource {
}; };
let cancel_handle = RcRef::map(self, |r| &r.cancel); let cancel_handle = RcRef::map(self, |r| &r.cancel);
fut.try_or_cancel(cancel_handle).await.map_err(Into::into) fut.try_or_cancel(cancel_handle).await
}) })
} }
@ -514,8 +581,9 @@ impl Stream for NodeHttpResourceToBodyAdapter {
Poll::Ready(res) => match res { Poll::Ready(res) => match res {
Ok(buf) if buf.is_empty() => Poll::Ready(None), Ok(buf) if buf.is_empty() => Poll::Ready(None),
Ok(buf) => { Ok(buf) => {
let bytes: Bytes = buf.to_vec().into();
this.1 = Some(this.0.clone().read(64 * 1024)); this.1 = Some(this.0.clone().read(64 * 1024));
Poll::Ready(Some(Ok(buf.to_vec().into()))) Poll::Ready(Some(Ok(bytes)))
} }
Err(err) => Poll::Ready(Some(Err(err))), Err(err) => Poll::Ready(Some(Err(err))),
}, },

View file

@ -491,19 +491,53 @@ Object.defineProperties(
return ret; return ret;
}, },
/** Right after socket is ready, we need to writeHeader() to setup the request and
* client. This is invoked by onSocket(). */
_flushHeaders() {
if (!this._headerSent) {
this._headerSent = true;
this._writeHeader();
}
},
// deno-lint-ignore no-explicit-any // deno-lint-ignore no-explicit-any
_send(data: any, encoding?: string | null, callback?: () => void) { _send(data: any, encoding?: string | null, callback?: () => void) {
if (!this._headerSent && this._header !== null) { // if socket is ready, write the data after headers are written.
this._writeHeader(); // if socket is not ready, buffer data in outputbuffer.
this._headerSent = true; if (
this.socket && !this.socket.connecting && this.outputData.length === 0
) {
if (!this._headerSent) {
this._writeHeader();
this._headerSent = true;
}
return this._writeRaw(data, encoding, callback);
} else {
this.outputData.push({ data, encoding, callback });
} }
return this._writeRaw(data, encoding, callback); return false;
}, },
_writeHeader() { _writeHeader() {
throw new ERR_METHOD_NOT_IMPLEMENTED("_writeHeader()"); throw new ERR_METHOD_NOT_IMPLEMENTED("_writeHeader()");
}, },
_flushBuffer() {
const outputLength = this.outputData.length;
if (outputLength <= 0 || !this.socket || !this._bodyWriter) {
return undefined;
}
const { data, encoding, callback } = this.outputData.shift();
const ret = this._writeRaw(data, encoding, callback);
if (this.outputData.length > 0) {
this.once("drain", this._flushBuffer);
}
return ret;
},
_writeRaw( _writeRaw(
// deno-lint-ignore no-explicit-any // deno-lint-ignore no-explicit-any
data: any, data: any,
@ -517,11 +551,15 @@ Object.defineProperties(
data = new Uint8Array(data.buffer, data.byteOffset, data.byteLength); data = new Uint8Array(data.buffer, data.byteOffset, data.byteLength);
} }
if (data.buffer.byteLength > 0) { if (data.buffer.byteLength > 0) {
this._bodyWriter.write(data).then(() => { this._bodyWriter.ready.then(() => {
callback?.(); if (this._bodyWriter.desiredSize > 0) {
this.emit("drain"); this._bodyWriter.write(data).then(() => {
}).catch((e) => { callback?.();
this._requestSendError = e; this.emit("drain");
}).catch((e) => {
this._requestSendError = e;
});
}
}); });
} }
return false; return false;
@ -658,7 +696,6 @@ Object.defineProperties(
const { header } = state; const { header } = state;
this._header = header + "\r\n"; this._header = header + "\r\n";
this._headerSent = false;
// Wait until the first body chunk, or close(), is sent to flush, // Wait until the first body chunk, or close(), is sent to flush,
// UNLESS we're sending Expect: 100-continue. // UNLESS we're sending Expect: 100-continue.

View file

@ -154,6 +154,13 @@ export class TLSSocket extends net.Socket {
const afterConnect = handle.afterConnect; const afterConnect = handle.afterConnect;
handle.afterConnect = async (req: any, status: number) => { handle.afterConnect = async (req: any, status: number) => {
options.hostname ??= undefined; // coerce to undefined if null, startTls expects hostname to be undefined options.hostname ??= undefined; // coerce to undefined if null, startTls expects hostname to be undefined
if (tlssock._isNpmAgent) {
// skips the TLS handshake for @npmcli/agent as it's handled by
// onSocket handler of ClientRequest object.
tlssock.emit("secure");
tlssock.removeListener("end", onConnectEnd);
return afterConnect.call(handle, req, status);
}
try { try {
const conn = await Deno.startTls(handle[kStreamBaseField], options); const conn = await Deno.startTls(handle[kStreamBaseField], options);

View file

@ -5,16 +5,17 @@
import { core, primordials } from "ext:core/mod.js"; import { core, primordials } from "ext:core/mod.js";
import { import {
op_node_http_await_response,
op_node_http_fetch_response_upgrade, op_node_http_fetch_response_upgrade,
op_node_http_fetch_send, op_node_http_request_with_conn,
op_node_http_request, op_tls_start,
} from "ext:core/ops"; } from "ext:core/ops";
import { TextEncoder } from "ext:deno_web/08_text_encoding.js"; import { TextEncoder } from "ext:deno_web/08_text_encoding.js";
import { setTimeout } from "ext:deno_web/02_timers.js"; import { setTimeout } from "ext:deno_web/02_timers.js";
import { import {
_normalizeArgs, _normalizeArgs,
// createConnection, createConnection,
ListenOptions, ListenOptions,
Socket, Socket,
} from "node:net"; } from "node:net";
@ -48,9 +49,10 @@ import { kOutHeaders } from "ext:deno_node/internal/http.ts";
import { _checkIsHttpToken as checkIsHttpToken } from "node:_http_common"; import { _checkIsHttpToken as checkIsHttpToken } from "node:_http_common";
import { Agent, globalAgent } from "node:_http_agent"; import { Agent, globalAgent } from "node:_http_agent";
import { urlToHttpOptions } from "ext:deno_node/internal/url.ts"; import { urlToHttpOptions } from "ext:deno_node/internal/url.ts";
import { kEmptyObject } from "ext:deno_node/internal/util.mjs"; import { kEmptyObject, once } from "ext:deno_node/internal/util.mjs";
import { constants, TCP } from "ext:deno_node/internal_binding/tcp_wrap.ts"; import { constants, TCP } from "ext:deno_node/internal_binding/tcp_wrap.ts";
import { notImplemented, warnNotImplemented } from "ext:deno_node/_utils.ts"; import { kStreamBaseField } from "ext:deno_node/internal_binding/stream_wrap.ts";
import { notImplemented } from "ext:deno_node/_utils.ts";
import { import {
connResetException, connResetException,
ERR_HTTP_HEADERS_SENT, ERR_HTTP_HEADERS_SENT,
@ -62,7 +64,6 @@ import {
} from "ext:deno_node/internal/errors.ts"; } from "ext:deno_node/internal/errors.ts";
import { getTimerDuration } from "ext:deno_node/internal/timers.mjs"; import { getTimerDuration } from "ext:deno_node/internal/timers.mjs";
import { serve, upgradeHttpRaw } from "ext:deno_http/00_serve.ts"; import { serve, upgradeHttpRaw } from "ext:deno_http/00_serve.ts";
import { createHttpClient } from "ext:deno_fetch/22_http_client.js";
import { headersEntries } from "ext:deno_fetch/20_headers.js"; import { headersEntries } from "ext:deno_fetch/20_headers.js";
import { timerId } from "ext:deno_web/03_abort_signal.js"; import { timerId } from "ext:deno_web/03_abort_signal.js";
import { clearTimeout as webClearTimeout } from "ext:deno_web/02_timers.js"; import { clearTimeout as webClearTimeout } from "ext:deno_web/02_timers.js";
@ -148,6 +149,10 @@ class FakeSocket extends EventEmitter {
} }
} }
function emitErrorEvent(request, error) {
request.emit("error", error);
}
/** ClientRequest represents the http(s) request from the client */ /** ClientRequest represents the http(s) request from the client */
class ClientRequest extends OutgoingMessage { class ClientRequest extends OutgoingMessage {
defaultProtocol = "http:"; defaultProtocol = "http:";
@ -160,6 +165,8 @@ class ClientRequest extends OutgoingMessage {
useChunkedEncodingByDefault: boolean; useChunkedEncodingByDefault: boolean;
path: string; path: string;
_req: { requestRid: number; cancelHandleRid: number | null } | undefined; _req: { requestRid: number; cancelHandleRid: number | null } | undefined;
_encrypted = false;
socket: Socket;
constructor( constructor(
input: string | URL, input: string | URL,
@ -382,17 +389,11 @@ class ClientRequest extends OutgoingMessage {
delete optsWithoutSignal.signal; delete optsWithoutSignal.signal;
} }
if (options!.createConnection) {
warnNotImplemented("ClientRequest.options.createConnection");
}
if (options!.lookup) { if (options!.lookup) {
notImplemented("ClientRequest.options.lookup"); notImplemented("ClientRequest.options.lookup");
} }
// initiate connection if (this.agent) {
// TODO(crowlKats): finish this
/*if (this.agent) {
this.agent.addRequest(this, optsWithoutSignal); this.agent.addRequest(this, optsWithoutSignal);
} else { } else {
// No agent, default to Connection:close. // No agent, default to Connection:close.
@ -422,8 +423,7 @@ class ClientRequest extends OutgoingMessage {
debug("CLIENT use net.createConnection", optsWithoutSignal); debug("CLIENT use net.createConnection", optsWithoutSignal);
this.onSocket(createConnection(optsWithoutSignal)); this.onSocket(createConnection(optsWithoutSignal));
} }
}*/ }
this.onSocket(new FakeSocket({ encrypted: this._encrypted }));
} }
_writeHeader() { _writeHeader() {
@ -437,9 +437,6 @@ class ClientRequest extends OutgoingMessage {
} }
} }
const client = this._getClient() ?? createHttpClient({ http2: false });
this._client = client;
if ( if (
this.method === "POST" || this.method === "PATCH" || this.method === "PUT" this.method === "POST" || this.method === "PATCH" || this.method === "PUT"
) { ) {
@ -455,17 +452,29 @@ class ClientRequest extends OutgoingMessage {
this._bodyWriteRid = resourceForReadableStream(readable); this._bodyWriteRid = resourceForReadableStream(readable);
} }
this._req = op_node_http_request(
this.method,
url,
headers,
client[internalRidSymbol],
this._bodyWriteRid,
);
(async () => { (async () => {
try { try {
const res = await op_node_http_fetch_send(this._req.requestRid); const parsedUrl = new URL(url);
let baseConnRid =
this.socket._handle[kStreamBaseField][internalRidSymbol];
if (this._encrypted) {
[baseConnRid] = op_tls_start({
rid: baseConnRid,
hostname: parsedUrl.hostname,
caCerts: [],
alpnProtocols: ["http/1.0", "http/1.1"],
});
}
this._req = await op_node_http_request_with_conn(
this.method,
url,
headers,
this._bodyWriteRid,
baseConnRid,
this._encrypted,
);
this._flushBuffer();
const res = await op_node_http_await_response(this._req!.requestRid);
if (this._req.cancelHandleRid !== null) { if (this._req.cancelHandleRid !== null) {
core.tryClose(this._req.cancelHandleRid); core.tryClose(this._req.cancelHandleRid);
} }
@ -473,7 +482,6 @@ class ClientRequest extends OutgoingMessage {
this._timeout.removeEventListener("abort", this._timeoutCb); this._timeout.removeEventListener("abort", this._timeoutCb);
webClearTimeout(this._timeout[timerId]); webClearTimeout(this._timeout[timerId]);
} }
this._client.close();
const incoming = new IncomingMessageForClient(this.socket); const incoming = new IncomingMessageForClient(this.socket);
incoming.req = this; incoming.req = this;
this.res = incoming; this.res = incoming;
@ -512,12 +520,9 @@ class ClientRequest extends OutgoingMessage {
if (this.method === "CONNECT") { if (this.method === "CONNECT") {
throw new Error("not implemented CONNECT"); throw new Error("not implemented CONNECT");
} }
const upgradeRid = await op_node_http_fetch_response_upgrade( const upgradeRid = await op_node_http_fetch_response_upgrade(
res.responseRid, res.responseRid,
); );
assert(typeof res.remoteAddrIp !== "undefined");
assert(typeof res.remoteAddrIp !== "undefined");
const conn = new UpgradedConn( const conn = new UpgradedConn(
upgradeRid, upgradeRid,
{ {
@ -543,13 +548,11 @@ class ClientRequest extends OutgoingMessage {
this._closed = true; this._closed = true;
this.emit("close"); this.emit("close");
} else { } else {
{ incoming._bodyRid = res.responseRid;
incoming._bodyRid = res.responseRid;
}
this.emit("response", incoming); this.emit("response", incoming);
} }
} catch (err) { } catch (err) {
if (this._req.cancelHandleRid !== null) { if (this._req && this._req.cancelHandleRid !== null) {
core.tryClose(this._req.cancelHandleRid); core.tryClose(this._req.cancelHandleRid);
} }
@ -592,11 +595,54 @@ class ClientRequest extends OutgoingMessage {
return undefined; return undefined;
} }
// TODO(bartlomieju): handle error onSocket(socket, err) {
onSocket(socket, _err) {
nextTick(() => { nextTick(() => {
this.socket = socket; // deno-lint-ignore no-this-alias
this.emit("socket", socket); const req = this;
if (req.destroyed || err) {
req.destroyed = true;
// deno-lint-ignore no-inner-declarations
function _destroy(req, err) {
if (!req.aborted && !err) {
err = new connResetException("socket hang up");
}
if (err) {
emitErrorEvent(req, err);
}
req._closed = true;
req.emit("close");
}
if (socket) {
if (!err && req.agent && !socket.destroyed) {
socket.emit("free");
} else {
finished(socket.destroy(err || req[kError]), (er) => {
if (er?.code === "ERR_STREAM_PREMATURE_CLOSE") {
er = null;
}
_destroy(req, er || err);
});
return;
}
}
_destroy(req, err || req[kError]);
} else {
// Note: this code is specific to deno to initiate a request.
const onConnect = () => {
// Flush the internal buffers once socket is ready.
this._flushHeaders();
};
this.socket = socket;
this.emit("socket", socket);
if (socket.readyState === "opening") {
socket.on("connect", onConnect);
} else {
onConnect();
}
}
}); });
} }
@ -618,14 +664,20 @@ class ClientRequest extends OutgoingMessage {
if (chunk) { if (chunk) {
this.write_(chunk, encoding, null, true); this.write_(chunk, encoding, null, true);
} else if (!this._headerSent) { } else if (!this._headerSent) {
this._contentLength = 0; if (
this._implicitHeader(); (this.socket && !this.socket.connecting) || // socket is not connecting, or
this._send("", "latin1"); (!this.socket && this.outputData.length === 0) // no data to send
) {
this._contentLength = 0;
this._implicitHeader();
this._send("", "latin1");
}
} }
(async () => { const finish = async () => {
try { try {
await this._bodyWriter.ready;
await this._bodyWriter?.close(); await this._bodyWriter?.close();
} catch (_) { } catch {
// The readable stream resource is dropped right after // The readable stream resource is dropped right after
// read is complete closing the writable stream resource. // read is complete closing the writable stream resource.
// If we try to close the writer again, it will result in an // If we try to close the writer again, it will result in an
@ -633,10 +685,20 @@ class ClientRequest extends OutgoingMessage {
} }
try { try {
cb?.(); cb?.();
} catch (_) { } catch {
// //
} }
})(); };
if (this.socket && this._bodyWriter) {
finish();
} else {
this.on("drain", () => {
if (this.outputData.length === 0) {
finish();
}
});
}
return this; return this;
} }
@ -658,11 +720,6 @@ class ClientRequest extends OutgoingMessage {
} }
this.destroyed = true; this.destroyed = true;
const rid = this._client?.[internalRidSymbol];
if (rid) {
core.tryClose(rid);
}
// Request might be closed before we actually made it // Request might be closed before we actually made it
if (this._req !== undefined && this._req.cancelHandleRid !== null) { if (this._req !== undefined && this._req.cancelHandleRid !== null) {
core.tryClose(this._req.cancelHandleRid); core.tryClose(this._req.cancelHandleRid);

View file

@ -112,7 +112,7 @@ export const globalAgent = new Agent({
/** HttpsClientRequest class loosely follows http.ClientRequest class API. */ /** HttpsClientRequest class loosely follows http.ClientRequest class API. */
class HttpsClientRequest extends ClientRequest { class HttpsClientRequest extends ClientRequest {
override _encrypted: true; override _encrypted = true;
override defaultProtocol = "https:"; override defaultProtocol = "https:";
override _getClient(): Deno.HttpClient | undefined { override _getClient(): Deno.HttpClient | undefined {
if (caCerts === null) { if (caCerts === null) {

View file

@ -36,7 +36,6 @@ import {
} from "ext:deno_node/internal_binding/async_wrap.ts"; } from "ext:deno_node/internal_binding/async_wrap.ts";
import { ares_strerror } from "ext:deno_node/internal_binding/ares.ts"; import { ares_strerror } from "ext:deno_node/internal_binding/ares.ts";
import { notImplemented } from "ext:deno_node/_utils.ts"; import { notImplemented } from "ext:deno_node/_utils.ts";
import { isWindows } from "ext:deno_node/_util/os.ts";
interface LookupAddress { interface LookupAddress {
address: string; address: string;
@ -68,7 +67,7 @@ export function getaddrinfo(
_hints: number, _hints: number,
verbatim: boolean, verbatim: boolean,
): number { ): number {
let addresses: string[] = []; const addresses: string[] = [];
// TODO(cmorten): use hints // TODO(cmorten): use hints
// REF: https://nodejs.org/api/dns.html#dns_supported_getaddrinfo_flags // REF: https://nodejs.org/api/dns.html#dns_supported_getaddrinfo_flags
@ -107,13 +106,6 @@ export function getaddrinfo(
}); });
} }
// TODO(@bartlomieju): Forces IPv4 as a workaround for Deno not
// aligning with Node on implicit binding on Windows
// REF: https://github.com/denoland/deno/issues/10762
if (isWindows && hostname === "localhost") {
addresses = addresses.filter((address) => isIPv4(address));
}
req.oncomplete(error, addresses); req.oncomplete(error, addresses);
})(); })();

View file

@ -986,16 +986,20 @@ function _lookupAndConnect(
} else { } else {
self._unrefTimer(); self._unrefTimer();
defaultTriggerAsyncIdScope( defaultTriggerAsyncIdScope(self[asyncIdSymbol], nextTick, () => {
self[asyncIdSymbol], if (self.connecting) {
_internalConnect, defaultTriggerAsyncIdScope(
self, self[asyncIdSymbol],
ip, _internalConnect,
port, self,
addressType, ip,
localAddress, port,
localPort, addressType,
); localAddress,
localPort,
);
}
});
} }
}, },
); );
@ -1197,6 +1201,9 @@ export class Socket extends Duplex {
_host: string | null = null; _host: string | null = null;
// deno-lint-ignore no-explicit-any // deno-lint-ignore no-explicit-any
_parent: any = null; _parent: any = null;
// The flag for detecting if it's called in @npmcli/agent
// See discussions in https://github.com/denoland/deno/pull/25470 for more details.
_isNpmAgent = false;
autoSelectFamilyAttemptedAddresses: AddressInfo[] | undefined = undefined; autoSelectFamilyAttemptedAddresses: AddressInfo[] | undefined = undefined;
constructor(options: SocketOptions | number) { constructor(options: SocketOptions | number) {
@ -1217,6 +1224,19 @@ export class Socket extends Duplex {
super(options); super(options);
// Note: If the socket is created from @npmcli/agent, the 'socket' event
// on ClientRequest object happens after 'connect' event on Socket object.
// That swaps the sequence of op_node_http_request_with_conn() call and
// initial socket read. That causes op_node_http_request_with_conn() not
// working.
// To avoid the above situation, we detect the socket created from
// @npmcli/agent and pause the socket (and also skips the startTls call
// if it's TLSSocket)
this._isNpmAgent = new Error().stack?.includes("@npmcli/agent") || false;
if (this._isNpmAgent) {
this.pause();
}
if (options.handle) { if (options.handle) {
this._handle = options.handle; this._handle = options.handle;
this[asyncIdSymbol] = _getNewAsyncId(this._handle); this[asyncIdSymbol] = _getNewAsyncId(this._handle);

View file

@ -97,13 +97,28 @@ deno_core::extension!(
); );
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OtelConfig { pub struct OtelRuntimeConfig {
pub runtime_name: Cow<'static, str>, pub runtime_name: Cow<'static, str>,
pub runtime_version: Cow<'static, str>, pub runtime_version: Cow<'static, str>,
}
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub struct OtelConfig {
pub tracing_enabled: bool,
pub console: OtelConsoleConfig, pub console: OtelConsoleConfig,
pub deterministic: bool, pub deterministic: bool,
} }
impl OtelConfig {
pub fn as_v8(&self) -> Box<[u8]> {
Box::new([
self.tracing_enabled as u8,
self.console as u8,
self.deterministic as u8,
])
}
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)] #[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[repr(u8)] #[repr(u8)]
pub enum OtelConsoleConfig { pub enum OtelConsoleConfig {
@ -112,14 +127,9 @@ pub enum OtelConsoleConfig {
Replace = 2, Replace = 2,
} }
impl Default for OtelConfig { impl Default for OtelConsoleConfig {
fn default() -> Self { fn default() -> Self {
Self { Self::Ignore
runtime_name: Cow::Borrowed(env!("CARGO_PKG_NAME")),
runtime_version: Cow::Borrowed(env!("CARGO_PKG_VERSION")),
console: OtelConsoleConfig::Capture,
deterministic: false,
}
} }
} }
@ -411,16 +421,14 @@ static BUILT_IN_INSTRUMENTATION_SCOPE: OnceCell<
opentelemetry::InstrumentationScope, opentelemetry::InstrumentationScope,
> = OnceCell::new(); > = OnceCell::new();
pub fn init(config: OtelConfig) -> anyhow::Result<()> { pub fn init(config: OtelRuntimeConfig) -> anyhow::Result<()> {
// Parse the `OTEL_EXPORTER_OTLP_PROTOCOL` variable. The opentelemetry_* // Parse the `OTEL_EXPORTER_OTLP_PROTOCOL` variable. The opentelemetry_*
// crates don't do this automatically. // crates don't do this automatically.
// TODO(piscisaureus): enable GRPC support. // TODO(piscisaureus): enable GRPC support.
let protocol = match env::var("OTEL_EXPORTER_OTLP_PROTOCOL").as_deref() { let protocol = match env::var("OTEL_EXPORTER_OTLP_PROTOCOL").as_deref() {
Ok("http/protobuf") => Protocol::HttpBinary, Ok("http/protobuf") => Protocol::HttpBinary,
Ok("http/json") => Protocol::HttpJson, Ok("http/json") => Protocol::HttpJson,
Ok("") | Err(env::VarError::NotPresent) => { Ok("") | Err(env::VarError::NotPresent) => Protocol::HttpBinary,
return Ok(());
}
Ok(protocol) => { Ok(protocol) => {
return Err(anyhow!( return Err(anyhow!(
"Env var OTEL_EXPORTER_OTLP_PROTOCOL specifies an unsupported protocol: {}", "Env var OTEL_EXPORTER_OTLP_PROTOCOL specifies an unsupported protocol: {}",

View file

@ -950,15 +950,15 @@ const otelConsoleConfig = {
}; };
export function bootstrap( export function bootstrap(
config: [] | [ config: [
0 | 1,
typeof otelConsoleConfig[keyof typeof otelConsoleConfig], typeof otelConsoleConfig[keyof typeof otelConsoleConfig],
number, 0 | 1,
], ],
): void { ): void {
if (config.length === 0) return; const { 0: tracingEnabled, 1: consoleConfig, 2: deterministic } = config;
const { 0: consoleConfig, 1: deterministic } = config;
TRACING_ENABLED = true; TRACING_ENABLED = tracingEnabled === 1;
DETERMINISTIC = deterministic === 1; DETERMINISTIC = deterministic === 1;
switch (consoleConfig) { switch (consoleConfig) {

View file

@ -355,16 +355,16 @@ impl<
}) })
.map_err(|err| { .map_err(|err| {
match err.into_kind() { match err.into_kind() {
ResolveReqWithSubPathErrorKind::MissingPackageNodeModulesFolder( ResolveReqWithSubPathErrorKind::MissingPackageNodeModulesFolder(
err, err,
) => err.into(), ) => err.into(),
ResolveReqWithSubPathErrorKind::ResolvePkgFolderFromDenoReq( ResolveReqWithSubPathErrorKind::ResolvePkgFolderFromDenoReq(
err, err,
) => err.into(), ) => err.into(),
ResolveReqWithSubPathErrorKind::PackageSubpathResolve(err) => { ResolveReqWithSubPathErrorKind::PackageSubpathResolve(err) => {
err.into() err.into()
}
} }
}
}); });
} }
} }

View file

@ -205,9 +205,9 @@ impl<Fs: DenoResolverFs, TEnv: NodeResolverEnv> ByonmNpmResolver<Fs, TEnv> {
} }
// attempt to resolve the npm specifier from the referrer's package.json, // attempt to resolve the npm specifier from the referrer's package.json,
if let Ok(file_path) = url_to_file_path(referrer) { let maybe_referrer_path = url_to_file_path(referrer).ok();
let mut current_path = file_path.as_path(); if let Some(file_path) = maybe_referrer_path {
while let Some(dir_path) = current_path.parent() { for dir_path in file_path.as_path().ancestors().skip(1) {
let package_json_path = dir_path.join("package.json"); let package_json_path = dir_path.join("package.json");
if let Some(pkg_json) = self.load_pkg_json(&package_json_path)? { if let Some(pkg_json) = self.load_pkg_json(&package_json_path)? {
if let Some(alias) = if let Some(alias) =
@ -216,11 +216,10 @@ impl<Fs: DenoResolverFs, TEnv: NodeResolverEnv> ByonmNpmResolver<Fs, TEnv> {
return Ok(Some((pkg_json, alias))); return Ok(Some((pkg_json, alias)));
} }
} }
current_path = dir_path;
} }
} }
// otherwise, fall fallback to the project's package.json // fall fallback to the project's package.json
if let Some(root_node_modules_dir) = &self.root_node_modules_dir { if let Some(root_node_modules_dir) = &self.root_node_modules_dir {
let root_pkg_json_path = let root_pkg_json_path =
root_node_modules_dir.parent().unwrap().join("package.json"); root_node_modules_dir.parent().unwrap().join("package.json");
@ -232,6 +231,58 @@ impl<Fs: DenoResolverFs, TEnv: NodeResolverEnv> ByonmNpmResolver<Fs, TEnv> {
} }
} }
// now try to resolve based on the closest node_modules directory
let maybe_referrer_path = url_to_file_path(referrer).ok();
let search_node_modules = |node_modules: &Path| {
if req.version_req.tag().is_some() {
return None;
}
let pkg_folder = node_modules.join(&req.name);
if let Ok(Some(dep_pkg_json)) =
self.load_pkg_json(&pkg_folder.join("package.json"))
{
if dep_pkg_json.name.as_ref() == Some(&req.name) {
let matches_req = dep_pkg_json
.version
.as_ref()
.and_then(|v| Version::parse_from_npm(v).ok())
.map(|version| req.version_req.matches(&version))
.unwrap_or(true);
if matches_req {
return Some((dep_pkg_json, req.name.clone()));
}
}
}
None
};
if let Some(file_path) = &maybe_referrer_path {
for dir_path in file_path.as_path().ancestors().skip(1) {
if let Some(result) =
search_node_modules(&dir_path.join("node_modules"))
{
return Ok(Some(result));
}
}
}
// and finally check the root node_modules directory
if let Some(root_node_modules_dir) = &self.root_node_modules_dir {
let already_searched = maybe_referrer_path
.as_ref()
.and_then(|referrer_path| {
root_node_modules_dir
.parent()
.map(|root_dir| referrer_path.starts_with(root_dir))
})
.unwrap_or(false);
if !already_searched {
if let Some(result) = search_node_modules(root_node_modules_dir) {
return Ok(Some(result));
}
}
}
Ok(None) Ok(None)
} }

View file

@ -712,7 +712,6 @@ fn get_fetch_error(error: &FetchError) -> &'static str {
FetchError::ClientSend(_) => "TypeError", FetchError::ClientSend(_) => "TypeError",
FetchError::RequestBuilderHook(_) => "TypeError", FetchError::RequestBuilderHook(_) => "TypeError",
FetchError::Io(e) => get_io_error_class(e), FetchError::Io(e) => get_io_error_class(e),
FetchError::Hyper(e) => get_hyper_error_class(e),
} }
} }
@ -1083,6 +1082,7 @@ mod node {
pub use deno_node::ops::crypto::SignEd25519Error; pub use deno_node::ops::crypto::SignEd25519Error;
pub use deno_node::ops::crypto::VerifyEd25519Error; pub use deno_node::ops::crypto::VerifyEd25519Error;
pub use deno_node::ops::fs::FsError; pub use deno_node::ops::fs::FsError;
pub use deno_node::ops::http::ConnError;
pub use deno_node::ops::http2::Http2Error; pub use deno_node::ops::http2::Http2Error;
pub use deno_node::ops::idna::IdnaError; pub use deno_node::ops::idna::IdnaError;
pub use deno_node::ops::ipc::IpcError; pub use deno_node::ops::ipc::IpcError;
@ -1538,6 +1538,24 @@ mod node {
pub fn get_verify_ed25519_error(_: &VerifyEd25519Error) -> &'static str { pub fn get_verify_ed25519_error(_: &VerifyEd25519Error) -> &'static str {
"TypeError" "TypeError"
} }
pub fn get_conn_error(e: &ConnError) -> &'static str {
match e {
ConnError::Resource(e) => get_error_class_name(e).unwrap_or("Error"),
ConnError::Permission(e) => get_permission_check_error_class(e),
ConnError::InvalidUrl(_) => "TypeError",
ConnError::InvalidHeaderName(_) => "TypeError",
ConnError::InvalidHeaderValue(_) => "TypeError",
ConnError::Url(e) => get_url_parse_error_class(e),
ConnError::Method(_) => "TypeError",
ConnError::Io(e) => get_io_error_class(e),
ConnError::Hyper(e) => super::get_hyper_error_class(e),
ConnError::TlsStreamBusy => "Busy",
ConnError::TcpStreamBusy => "Busy",
ConnError::ReuniteTcp(_) => "Error",
ConnError::Canceled(_) => "Error",
}
}
} }
fn get_os_error(error: &OsError) -> &'static str { fn get_os_error(error: &OsError) -> &'static str {
@ -1730,6 +1748,10 @@ pub fn get_error_class_name(e: &AnyError) -> Option<&'static str> {
e.downcast_ref::<node::VerifyEd25519Error>() e.downcast_ref::<node::VerifyEd25519Error>()
.map(node::get_verify_ed25519_error) .map(node::get_verify_ed25519_error)
}) })
.or_else(|| {
e.downcast_ref::<node::ConnError>()
.map(node::get_conn_error)
})
.or_else(|| e.downcast_ref::<NApiError>().map(get_napi_error_class)) .or_else(|| e.downcast_ref::<NApiError>().map(get_napi_error_class))
.or_else(|| e.downcast_ref::<WebError>().map(get_web_error_class)) .or_else(|| e.downcast_ref::<WebError>().map(get_web_error_class))
.or_else(|| { .or_else(|| {

View file

@ -119,8 +119,7 @@ pub struct BootstrapOptions {
// Used by `deno serve` // Used by `deno serve`
pub serve_port: Option<u16>, pub serve_port: Option<u16>,
pub serve_host: Option<String>, pub serve_host: Option<String>,
// OpenTelemetry output options. If `None`, OpenTelemetry is disabled. pub otel_config: OtelConfig,
pub otel_config: Option<OtelConfig>,
} }
impl Default for BootstrapOptions { impl Default for BootstrapOptions {
@ -155,7 +154,7 @@ impl Default for BootstrapOptions {
mode: WorkerExecutionMode::None, mode: WorkerExecutionMode::None,
serve_port: Default::default(), serve_port: Default::default(),
serve_host: Default::default(), serve_host: Default::default(),
otel_config: None, otel_config: Default::default(),
} }
} }
} }
@ -225,11 +224,7 @@ impl BootstrapOptions {
self.serve_host.as_deref(), self.serve_host.as_deref(),
serve_is_main, serve_is_main,
serve_worker_count, serve_worker_count,
if let Some(otel_config) = self.otel_config.as_ref() { self.otel_config.as_v8(),
Box::new([otel_config.console as u8, otel_config.deterministic as u8])
} else {
Box::new([])
},
); );
bootstrap.serialize(ser).unwrap() bootstrap.serialize(ser).unwrap()

View file

@ -846,21 +846,6 @@ testing[WILDCARD]this
.assert_matches_text("2\n"); .assert_matches_text("2\n");
} }
#[test]
fn compile_npm_file_system() {
run_npm_bin_compile_test(RunNpmBinCompileOptions {
input_specifier: "compile/npm_fs/main.ts",
copy_temp_dir: Some("compile/npm_fs"),
compile_args: vec!["-A"],
run_args: vec![],
output_file: "compile/npm_fs/main.out",
node_modules_local: true,
input_name: Some("binary"),
expected_name: "binary",
exit_code: 0,
});
}
#[test] #[test]
fn compile_npm_bin_esm() { fn compile_npm_bin_esm() {
run_npm_bin_compile_test(RunNpmBinCompileOptions { run_npm_bin_compile_test(RunNpmBinCompileOptions {
@ -906,21 +891,6 @@ fn compile_npm_cowsay_main() {
}); });
} }
#[test]
fn compile_npm_vfs_implicit_read_permissions() {
run_npm_bin_compile_test(RunNpmBinCompileOptions {
input_specifier: "compile/vfs_implicit_read_permission/main.ts",
copy_temp_dir: Some("compile/vfs_implicit_read_permission"),
compile_args: vec![],
run_args: vec![],
output_file: "compile/vfs_implicit_read_permission/main.out",
node_modules_local: false,
input_name: Some("binary"),
expected_name: "binary",
exit_code: 0,
});
}
#[test] #[test]
fn compile_npm_no_permissions() { fn compile_npm_no_permissions() {
run_npm_bin_compile_test(RunNpmBinCompileOptions { run_npm_bin_compile_test(RunNpmBinCompileOptions {
@ -1045,6 +1015,7 @@ fn compile_node_modules_symlink_outside() {
let symlink_target_dir = temp_dir.path().join("some_folder"); let symlink_target_dir = temp_dir.path().join("some_folder");
project_dir.join("node_modules").create_dir_all(); project_dir.join("node_modules").create_dir_all();
symlink_target_dir.create_dir_all(); symlink_target_dir.create_dir_all();
symlink_target_dir.join("file.txt").write("5");
let symlink_target_file = temp_dir.path().join("target.txt"); let symlink_target_file = temp_dir.path().join("target.txt");
symlink_target_file.write("5"); symlink_target_file.write("5");
let symlink_dir = project_dir.join("node_modules").join("symlink_dir"); let symlink_dir = project_dir.join("node_modules").join("symlink_dir");

View file

@ -565,9 +565,7 @@
"test-handle-wrap-close-abort.js", "test-handle-wrap-close-abort.js",
"test-http-abort-before-end.js", "test-http-abort-before-end.js",
"test-http-addrequest-localaddress.js", "test-http-addrequest-localaddress.js",
"test-http-agent-false.js",
"test-http-agent-getname.js", "test-http-agent-getname.js",
"test-http-agent-keepalive-delay.js",
"test-http-agent-maxtotalsockets.js", "test-http-agent-maxtotalsockets.js",
"test-http-agent-no-protocol.js", "test-http-agent-no-protocol.js",
"test-http-agent-null.js", "test-http-agent-null.js",
@ -590,7 +588,6 @@
"test-http-client-race.js", "test-http-client-race.js",
"test-http-client-read-in-error.js", "test-http-client-read-in-error.js",
"test-http-client-reject-unexpected-agent.js", "test-http-client-reject-unexpected-agent.js",
"test-http-client-timeout-connect-listener.js",
"test-http-client-timeout-with-data.js", "test-http-client-timeout-with-data.js",
"test-http-client-unescaped-path.js", "test-http-client-unescaped-path.js",
"test-http-client-upload-buf.js", "test-http-client-upload-buf.js",
@ -604,7 +601,6 @@
"test-http-date-header.js", "test-http-date-header.js",
"test-http-decoded-auth.js", "test-http-decoded-auth.js",
"test-http-default-encoding.js", "test-http-default-encoding.js",
"test-http-dump-req-when-res-ends.js",
"test-http-end-throw-socket-handling.js", "test-http-end-throw-socket-handling.js",
"test-http-eof-on-connect.js", "test-http-eof-on-connect.js",
"test-http-extra-response.js", "test-http-extra-response.js",
@ -622,7 +618,6 @@
"test-http-hex-write.js", "test-http-hex-write.js",
"test-http-highwatermark.js", "test-http-highwatermark.js",
"test-http-host-headers.js", "test-http-host-headers.js",
"test-http-hostname-typechecking.js",
"test-http-incoming-message-destroy.js", "test-http-incoming-message-destroy.js",
"test-http-invalid-path-chars.js", "test-http-invalid-path-chars.js",
"test-http-invalidheaderfield.js", "test-http-invalidheaderfield.js",
@ -1292,10 +1287,7 @@
"test-buffer-creation-regression.js", "test-buffer-creation-regression.js",
"test-child-process-exit.js", "test-child-process-exit.js",
"test-http-server-keep-alive-timeout-slow-server.js", "test-http-server-keep-alive-timeout-slow-server.js",
"test-net-better-error-messages-port.js",
"test-net-connect-handle-econnrefused.js",
"test-net-connect-local-error.js", "test-net-connect-local-error.js",
"test-net-reconnect-error.js",
"test-net-response-size.js", "test-net-response-size.js",
"test-net-server-bind.js", "test-net-server-bind.js",
"test-tls-lookup.js", "test-tls-lookup.js",

View file

@ -1,7 +1,7 @@
<!-- deno-fmt-ignore-file --> <!-- deno-fmt-ignore-file -->
# Remaining Node Tests # Remaining Node Tests
1163 tests out of 3681 have been ported from Node 20.11.1 (31.59% ported, 68.92% remaining). 1155 tests out of 3681 have been ported from Node 20.11.1 (31.38% ported, 69.14% remaining).
NOTE: This file should not be manually edited. Please edit `tests/node_compat/config.json` and run `deno task setup` in `tests/node_compat/runner` dir instead. NOTE: This file should not be manually edited. Please edit `tests/node_compat/config.json` and run `deno task setup` in `tests/node_compat/runner` dir instead.
@ -792,6 +792,8 @@ NOTE: This file should not be manually edited. Please edit `tests/node_compat/co
- [parallel/test-http-agent-destroyed-socket.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-agent-destroyed-socket.js) - [parallel/test-http-agent-destroyed-socket.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-agent-destroyed-socket.js)
- [parallel/test-http-agent-domain-reused-gc.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-agent-domain-reused-gc.js) - [parallel/test-http-agent-domain-reused-gc.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-agent-domain-reused-gc.js)
- [parallel/test-http-agent-error-on-idle.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-agent-error-on-idle.js) - [parallel/test-http-agent-error-on-idle.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-agent-error-on-idle.js)
- [parallel/test-http-agent-false.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-agent-false.js)
- [parallel/test-http-agent-keepalive-delay.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-agent-keepalive-delay.js)
- [parallel/test-http-agent-keepalive.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-agent-keepalive.js) - [parallel/test-http-agent-keepalive.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-agent-keepalive.js)
- [parallel/test-http-agent-maxsockets-respected.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-agent-maxsockets-respected.js) - [parallel/test-http-agent-maxsockets-respected.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-agent-maxsockets-respected.js)
- [parallel/test-http-agent-maxsockets.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-agent-maxsockets.js) - [parallel/test-http-agent-maxsockets.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-agent-maxsockets.js)
@ -848,6 +850,7 @@ NOTE: This file should not be manually edited. Please edit `tests/node_compat/co
- [parallel/test-http-client-set-timeout.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-client-set-timeout.js) - [parallel/test-http-client-set-timeout.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-client-set-timeout.js)
- [parallel/test-http-client-spurious-aborted.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-client-spurious-aborted.js) - [parallel/test-http-client-spurious-aborted.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-client-spurious-aborted.js)
- [parallel/test-http-client-timeout-agent.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-client-timeout-agent.js) - [parallel/test-http-client-timeout-agent.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-client-timeout-agent.js)
- [parallel/test-http-client-timeout-connect-listener.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-client-timeout-connect-listener.js)
- [parallel/test-http-client-timeout-event.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-client-timeout-event.js) - [parallel/test-http-client-timeout-event.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-client-timeout-event.js)
- [parallel/test-http-client-timeout-on-connect.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-client-timeout-on-connect.js) - [parallel/test-http-client-timeout-on-connect.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-client-timeout-on-connect.js)
- [parallel/test-http-client-timeout-option-listeners.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-client-timeout-option-listeners.js) - [parallel/test-http-client-timeout-option-listeners.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-client-timeout-option-listeners.js)
@ -865,6 +868,7 @@ NOTE: This file should not be manually edited. Please edit `tests/node_compat/co
- [parallel/test-http-destroyed-socket-write2.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-destroyed-socket-write2.js) - [parallel/test-http-destroyed-socket-write2.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-destroyed-socket-write2.js)
- [parallel/test-http-dns-error.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-dns-error.js) - [parallel/test-http-dns-error.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-dns-error.js)
- [parallel/test-http-double-content-length.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-double-content-length.js) - [parallel/test-http-double-content-length.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-double-content-length.js)
- [parallel/test-http-dump-req-when-res-ends.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-dump-req-when-res-ends.js)
- [parallel/test-http-early-hints-invalid-argument.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-early-hints-invalid-argument.js) - [parallel/test-http-early-hints-invalid-argument.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-early-hints-invalid-argument.js)
- [parallel/test-http-early-hints.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-early-hints.js) - [parallel/test-http-early-hints.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-early-hints.js)
- [parallel/test-http-exceptions.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-exceptions.js) - [parallel/test-http-exceptions.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-exceptions.js)
@ -876,6 +880,7 @@ NOTE: This file should not be manually edited. Please edit `tests/node_compat/co
- [parallel/test-http-header-badrequest.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-header-badrequest.js) - [parallel/test-http-header-badrequest.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-header-badrequest.js)
- [parallel/test-http-header-overflow.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-header-overflow.js) - [parallel/test-http-header-overflow.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-header-overflow.js)
- [parallel/test-http-host-header-ipv6-fail.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-host-header-ipv6-fail.js) - [parallel/test-http-host-header-ipv6-fail.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-host-header-ipv6-fail.js)
- [parallel/test-http-hostname-typechecking.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-hostname-typechecking.js)
- [parallel/test-http-incoming-matchKnownFields.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-incoming-matchKnownFields.js) - [parallel/test-http-incoming-matchKnownFields.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-incoming-matchKnownFields.js)
- [parallel/test-http-incoming-message-connection-setter.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-incoming-message-connection-setter.js) - [parallel/test-http-incoming-message-connection-setter.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-incoming-message-connection-setter.js)
- [parallel/test-http-incoming-message-options.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-incoming-message-options.js) - [parallel/test-http-incoming-message-options.js](https://github.com/nodejs/node/tree/v20.11.1/test/parallel/test-http-incoming-message-options.js)
@ -2508,9 +2513,12 @@ NOTE: This file should not be manually edited. Please edit `tests/node_compat/co
- [sequential/test-inspector-port-cluster.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-inspector-port-cluster.js) - [sequential/test-inspector-port-cluster.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-inspector-port-cluster.js)
- [sequential/test-module-loading.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-module-loading.js) - [sequential/test-module-loading.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-module-loading.js)
- [sequential/test-net-GH-5504.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-net-GH-5504.js) - [sequential/test-net-GH-5504.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-net-GH-5504.js)
- [sequential/test-net-better-error-messages-port.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-net-better-error-messages-port.js)
- [sequential/test-net-connect-econnrefused.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-net-connect-econnrefused.js) - [sequential/test-net-connect-econnrefused.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-net-connect-econnrefused.js)
- [sequential/test-net-connect-handle-econnrefused.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-net-connect-handle-econnrefused.js)
- [sequential/test-net-listen-shared-ports.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-net-listen-shared-ports.js) - [sequential/test-net-listen-shared-ports.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-net-listen-shared-ports.js)
- [sequential/test-net-localport.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-net-localport.js) - [sequential/test-net-localport.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-net-localport.js)
- [sequential/test-net-reconnect-error.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-net-reconnect-error.js)
- [sequential/test-net-server-address.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-net-server-address.js) - [sequential/test-net-server-address.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-net-server-address.js)
- [sequential/test-next-tick-error-spin.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-next-tick-error-spin.js) - [sequential/test-next-tick-error-spin.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-next-tick-error-spin.js)
- [sequential/test-perf-hooks.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-perf-hooks.js) - [sequential/test-perf-hooks.js](https://github.com/nodejs/node/tree/v20.11.1/test/sequential/test-perf-hooks.js)

View file

@ -1,53 +0,0 @@
// deno-fmt-ignore-file
// deno-lint-ignore-file
// Copyright Joyent and Node contributors. All rights reserved. MIT license.
// Taken from Node 20.11.1
// This file is automatically generated by `tests/node_compat/runner/setup.ts`. Do not modify this file manually.
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
'use strict';
const common = require('../common');
const http = require('http');
// Sending `agent: false` when `port: null` is also passed in (i.e. the result
// of a `url.parse()` call with the default port used, 80 or 443), should not
// result in an assertion error...
const opts = {
host: '127.0.0.1',
port: null,
path: '/',
method: 'GET',
agent: false
};
// We just want an "error" (no local HTTP server on port 80) or "response"
// to happen (user happens ot have HTTP server running on port 80).
// As long as the process doesn't crash from a C++ assertion then we're good.
const req = http.request(opts);
// Will be called by either the response event or error event, not both
const oneResponse = common.mustCall();
req.on('response', oneResponse);
req.on('error', oneResponse);
req.end();

View file

@ -1,43 +0,0 @@
// deno-fmt-ignore-file
// deno-lint-ignore-file
// Copyright Joyent and Node contributors. All rights reserved. MIT license.
// Taken from Node 20.11.1
// This file is automatically generated by `tests/node_compat/runner/setup.ts`. Do not modify this file manually.
'use strict';
const common = require('../common');
const assert = require('assert');
const http = require('http');
const { Agent } = require('_http_agent');
const agent = new Agent({
keepAlive: true,
keepAliveMsecs: 1000,
});
const server = http.createServer(common.mustCall((req, res) => {
res.end('ok');
}));
server.listen(0, common.mustCall(() => {
const createConnection = agent.createConnection;
agent.createConnection = (options, ...args) => {
assert.strictEqual(options.keepAlive, true);
assert.strictEqual(options.keepAliveInitialDelay, agent.keepAliveMsecs);
return createConnection.call(agent, options, ...args);
};
http.get({
host: 'localhost',
port: server.address().port,
agent: agent,
path: '/'
}, common.mustCall((res) => {
// for emit end event
res.on('data', () => {});
res.on('end', () => {
server.close();
});
}));
}));

View file

@ -1,49 +0,0 @@
// deno-fmt-ignore-file
// deno-lint-ignore-file
// Copyright Joyent and Node contributors. All rights reserved. MIT license.
// Taken from Node 20.11.1
// This file is automatically generated by `tests/node_compat/runner/setup.ts`. Do not modify this file manually.
'use strict';
const common = require('../common');
// This test ensures that `ClientRequest.prototype.setTimeout()` does
// not add a listener for the `'connect'` event to the socket if the
// socket is already connected.
const assert = require('assert');
const http = require('http');
// Maximum allowed value for timeouts.
const timeout = 2 ** 31 - 1;
const server = http.createServer((req, res) => {
res.end();
});
server.listen(0, common.mustCall(() => {
const agent = new http.Agent({ keepAlive: true, maxSockets: 1 });
const options = { port: server.address().port, agent: agent };
doRequest(options, common.mustCall(() => {
const req = doRequest(options, common.mustCall(() => {
agent.destroy();
server.close();
}));
req.on('socket', common.mustCall((socket) => {
assert.strictEqual(socket.listenerCount('connect'), 0);
}));
}));
}));
function doRequest(options, callback) {
const req = http.get(options, (res) => {
res.on('end', callback);
res.resume();
});
req.setTimeout(timeout);
return req;
}

View file

@ -1,73 +0,0 @@
// deno-fmt-ignore-file
// deno-lint-ignore-file
// Copyright Joyent and Node contributors. All rights reserved. MIT license.
// Taken from Node 20.11.1
// This file is automatically generated by `tests/node_compat/runner/setup.ts`. Do not modify this file manually.
'use strict';
const { mustCall } = require('../common');
const fs = require('fs');
const http = require('http');
const { strictEqual } = require('assert');
const server = http.createServer(mustCall(function(req, res) {
strictEqual(req.socket.listenerCount('data'), 1);
req.socket.once('data', mustCall(function() {
// Ensure that a chunk of data is received before calling `res.end()`.
res.end('hello world');
}));
// This checks if the request gets dumped
// resume will be triggered by res.end().
req.on('resume', mustCall(function() {
// There is no 'data' event handler anymore
// it gets automatically removed when dumping the request.
strictEqual(req.listenerCount('data'), 0);
req.on('data', mustCall());
}));
// We explicitly pause the stream
// so that the following on('data') does not cause
// a resume.
req.pause();
req.on('data', function() {});
// Start sending the response.
res.flushHeaders();
}));
server.listen(0, mustCall(function() {
const req = http.request({
method: 'POST',
port: server.address().port
});
// Send the http request without waiting
// for the body.
req.flushHeaders();
req.on('response', mustCall(function(res) {
// Pipe the body as soon as we get the headers of the
// response back.
fs.createReadStream(__filename).pipe(req);
res.resume();
// On some platforms the `'end'` event might not be emitted because the
// socket could be destroyed by the other peer while data is still being
// sent. In this case the 'aborted'` event is emitted instead of `'end'`.
// `'close'` is used here because it is always emitted and does not
// invalidate the test.
res.on('close', function() {
server.close();
});
}));
req.on('error', function() {
// An error can happen if there is some data still
// being sent, as the other side is calling .destroy()
// this is safe to ignore.
});
}));

View file

@ -1,49 +0,0 @@
// deno-fmt-ignore-file
// deno-lint-ignore-file
// Copyright Joyent and Node contributors. All rights reserved. MIT license.
// Taken from Node 20.11.1
// This file is automatically generated by `tests/node_compat/runner/setup.ts`. Do not modify this file manually.
'use strict';
const common = require('../common');
const assert = require('assert');
const http = require('http');
// All of these values should cause http.request() to throw synchronously
// when passed as the value of either options.hostname or options.host
const vals = [{}, [], NaN, Infinity, -Infinity, true, false, 1, 0, new Date()];
vals.forEach((v) => {
const received = common.invalidArgTypeHelper(v);
assert.throws(
() => http.request({ hostname: v }),
{
code: 'ERR_INVALID_ARG_TYPE',
name: 'TypeError',
message: 'The "options.hostname" property must be of ' +
'type string or one of undefined or null.' +
received
}
);
assert.throws(
() => http.request({ host: v }),
{
code: 'ERR_INVALID_ARG_TYPE',
name: 'TypeError',
message: 'The "options.host" property must be of ' +
'type string or one of undefined or null.' +
received
}
);
});
// These values are OK and should not throw synchronously.
// Only testing for 'hostname' validation so ignore connection errors.
const dontCare = () => {};
['', undefined, null].forEach((v) => {
http.request({ hostname: v }).on('error', dontCare).end();
http.request({ host: v }).on('error', dontCare).end();
});

View file

@ -1,24 +0,0 @@
// deno-fmt-ignore-file
// deno-lint-ignore-file
// Copyright Joyent and Node contributors. All rights reserved. MIT license.
// Taken from Node 20.11.1
// This file is automatically generated by `tests/node_compat/runner/setup.ts`. Do not modify this file manually.
'use strict';
const common = require('../common');
const net = require('net');
const assert = require('assert');
const c = net.createConnection(common.PORT);
c.on('connect', common.mustNotCall());
c.on('error', common.mustCall(function(error) {
// Family autoselection might be skipped if only a single address is returned by DNS.
const failedAttempt = Array.isArray(error.errors) ? error.errors[0] : error;
assert.strictEqual(failedAttempt.code, 'ECONNREFUSED');
assert.strictEqual(failedAttempt.port, common.PORT);
assert.match(failedAttempt.address, /^(127\.0\.0\.1|::1)$/);
}));

View file

@ -1,39 +0,0 @@
// deno-fmt-ignore-file
// deno-lint-ignore-file
// Copyright Joyent and Node contributors. All rights reserved. MIT license.
// Taken from Node 20.11.1
// This file is automatically generated by `tests/node_compat/runner/setup.ts`. Do not modify this file manually.
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
'use strict';
const common = require('../common');
const net = require('net');
const assert = require('assert');
const c = net.createConnection(common.PORT);
c.on('connect', common.mustNotCall());
c.on('error', common.mustCall((e) => {
assert.strictEqual(c.connecting, false);
assert.strictEqual(e.code, 'ECONNREFUSED');
}));

View file

@ -1,50 +0,0 @@
// deno-fmt-ignore-file
// deno-lint-ignore-file
// Copyright Joyent and Node contributors. All rights reserved. MIT license.
// Taken from Node 20.11.1
// This file is automatically generated by `tests/node_compat/runner/setup.ts`. Do not modify this file manually.
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
'use strict';
const common = require('../common');
const net = require('net');
const assert = require('assert');
const N = 20;
let disconnectCount = 0;
const c = net.createConnection(common.PORT);
c.on('connect', common.mustNotCall('client should not have connected'));
c.on('error', common.mustCall((error) => {
// Family autoselection might be skipped if only a single address is returned by DNS.
const actualError = Array.isArray(error.errors) ? error.errors[0] : error;
assert.strictEqual(actualError.code, 'ECONNREFUSED');
}, N + 1));
c.on('close', common.mustCall(() => {
if (disconnectCount++ < N)
c.connect(common.PORT); // reconnect
}, N + 1));

View file

@ -13,6 +13,7 @@ const server = Deno.serve(
const command = new Deno.Command(Deno.execPath(), { const command = new Deno.Command(Deno.execPath(), {
args: ["run", "-A", "-q", "--unstable-otel", Deno.args[0]], args: ["run", "-A", "-q", "--unstable-otel", Deno.args[0]],
env: { env: {
OTEL_DENO: "true",
DENO_UNSTABLE_OTEL_DETERMINISTIC: "1", DENO_UNSTABLE_OTEL_DETERMINISTIC: "1",
OTEL_EXPORTER_OTLP_PROTOCOL: "http/json", OTEL_EXPORTER_OTLP_PROTOCOL: "http/json",
OTEL_EXPORTER_OTLP_ENDPOINT: `http://localhost:${port}`, OTEL_EXPORTER_OTLP_ENDPOINT: `http://localhost:${port}`,

View file

@ -0,0 +1,22 @@
{
"tempDir": true,
"steps": [{
"if": "unix",
"args": "compile --output main main.ts",
"output": "compile.out"
}, {
"if": "unix",
"commandName": "./main",
"args": [],
"output": "main.out"
}, {
"if": "windows",
"args": "compile --output main.exe main.ts",
"output": "compile.out"
}, {
"if": "windows",
"commandName": "./main.exe",
"args": [],
"output": "main.out"
}]
}

View file

@ -0,0 +1,47 @@
[WILDCARD]
Compile file:///[WILDLINE]/main.ts to [WILDLINE]
Embedded File System
main[WILDLINE]
├─┬ .deno_compile_node_modules
│ └─┬ localhost
│ ├─┬ ansi-regex
│ │ ├── 3.0.1/*
│ │ └── 5.0.1/*
│ ├── ansi-styles/4.3.0/*
│ ├── camelcase/5.3.1/*
│ ├── cliui/6.0.0/*
│ ├── color-convert/2.0.1/*
│ ├── color-name/1.1.4/*
│ ├── cowsay/1.5.0/*
│ ├── decamelize/1.2.0/*
│ ├── emoji-regex/8.0.0/*
│ ├── find-up/4.1.0/*
│ ├── get-caller-file/2.0.5/*
│ ├── get-stdin/8.0.0/*
│ ├─┬ is-fullwidth-code-point
│ │ ├── 2.0.0/*
│ │ └── 3.0.0/*
│ ├── locate-path/5.0.0/*
│ ├── p-limit/2.3.0/*
│ ├── p-locate/4.1.0/*
│ ├── p-try/2.2.0/*
│ ├── path-exists/4.0.0/*
│ ├── require-directory/2.1.1/*
│ ├── require-main-filename/2.0.0/*
│ ├── set-blocking/2.0.0/*
│ ├─┬ string-width
│ │ ├── 2.1.1/*
│ │ └── 4.2.3/*
│ ├─┬ strip-ansi
│ │ ├── 4.0.0/*
│ │ └── 6.0.1/*
│ ├── strip-final-newline/2.0.0/*
│ ├── which-module/2.0.0/*
│ ├── wrap-ansi/6.2.0/*
│ ├── y18n/4.0.3/*
│ ├── yargs/15.4.1/*
│ └── yargs-parser/18.1.3/*
└── main.ts

View file

@ -3,4 +3,5 @@ error: Writing deno compile executable to temporary file 'main[WILDLINE]'
Caused by: Caused by:
0: Including [WILDLINE]does_not_exist.txt 0: Including [WILDLINE]does_not_exist.txt
1: [WILDLINE] 1: Reading [WILDLINE]does_not_exist.txt
2: [WILDLINE]

View file

@ -0,0 +1,25 @@
{
"tempDir": true,
"steps": [{
"if": "unix",
// notice how the math folder is not included
"args": "compile --allow-read=data --include src --output main main.js",
"output": "[WILDCARD]"
}, {
"if": "unix",
"commandName": "./main",
"args": [],
"output": "output.out",
"exitCode": 0
}, {
"if": "windows",
"args": "compile --allow-read=data --include src --output main.exe main.js",
"output": "[WILDCARD]"
}, {
"if": "windows",
"commandName": "./main.exe",
"args": [],
"output": "output.out",
"exitCode": 0
}]
}

View file

@ -0,0 +1,14 @@
const mathDir = import.meta.dirname + "/math";
const files = Array.from(
Deno.readDirSync(mathDir).map((entry) => mathDir + "/" + entry.name),
);
files.sort();
for (const file of files) {
console.log(file);
}
function nonAnalyzable() {
return "./src/main.ts";
}
await import(nonAnalyzable());

View file

@ -0,0 +1,3 @@
export function add(a: number, b: number) {
return a + b;
}

View file

@ -0,0 +1,2 @@
[WILDLINE]add.ts
3

View file

@ -0,0 +1,2 @@
import { add } from "../math/add.ts";
console.log(add(1, 2));

View file

@ -6,7 +6,7 @@
}, { }, {
"if": "unix", "if": "unix",
"args": "compile --allow-read=data --include . --output main link.js", "args": "compile --allow-read=data --include . --output main link.js",
"output": "[WILDCARD]" "output": "compile.out"
}, { }, {
"if": "unix", "if": "unix",
"commandName": "./main", "commandName": "./main",
@ -16,7 +16,7 @@
}, { }, {
"if": "windows", "if": "windows",
"args": "compile --allow-read=data --include . --output main.exe link.js", "args": "compile --allow-read=data --include . --output main.exe link.js",
"output": "[WILDCARD]" "output": "compile.out"
}, { }, {
"if": "windows", "if": "windows",
"commandName": "./main.exe", "commandName": "./main.exe",

View file

@ -0,0 +1,9 @@
Compile [WILDLINE]
Embedded File System
main[WILDLINE]
├── index.js
├── link.js --> index.js
└── setup.js

View file

@ -1,3 +1,2 @@
Deno.mkdirSync("data");
Deno.writeTextFileSync("index.js", "console.log(1);"); Deno.writeTextFileSync("index.js", "console.log(1);");
Deno.symlinkSync("index.js", "link.js"); Deno.symlinkSync("index.js", "link.js");

View file

@ -0,0 +1,24 @@
{
"tempDir": true,
// use this so the vfs output is all in the same folder
"canonicalizedTempDir": true,
"steps": [{
"if": "unix",
"args": "compile -A --output main main.ts",
"output": "compile.out"
}, {
"if": "unix",
"commandName": "./main",
"args": [],
"output": "main.out"
}, {
"if": "windows",
"args": "compile -A --output main.exe main.ts",
"output": "compile.out"
}, {
"if": "windows",
"commandName": "./main.exe",
"args": [],
"output": "main.out"
}]
}

View file

@ -0,0 +1,8 @@
[WILDCARD]
Embedded File System
main[WILDLINE]
├── main.ts
└── node_modules/*

View file

@ -0,0 +1,3 @@
{
"nodeModulesDir": "auto"
}

View file

@ -118,6 +118,12 @@ struct MultiStepMetaData {
/// steps. /// steps.
#[serde(default)] #[serde(default)]
pub temp_dir: bool, pub temp_dir: bool,
/// Whether the temporary directory should be canonicalized.
///
/// This should be used sparingly, but is sometimes necessary
/// on the CI.
#[serde(default)]
pub canonicalized_temp_dir: bool,
/// Whether the temporary directory should be symlinked to another path. /// Whether the temporary directory should be symlinked to another path.
#[serde(default)] #[serde(default)]
pub symlinked_temp_dir: bool, pub symlinked_temp_dir: bool,
@ -144,6 +150,8 @@ struct SingleTestMetaData {
#[serde(default)] #[serde(default)]
pub temp_dir: bool, pub temp_dir: bool,
#[serde(default)] #[serde(default)]
pub canonicalized_temp_dir: bool,
#[serde(default)]
pub symlinked_temp_dir: bool, pub symlinked_temp_dir: bool,
#[serde(default)] #[serde(default)]
pub repeat: Option<usize>, pub repeat: Option<usize>,
@ -159,6 +167,7 @@ impl SingleTestMetaData {
base: self.base, base: self.base,
cwd: None, cwd: None,
temp_dir: self.temp_dir, temp_dir: self.temp_dir,
canonicalized_temp_dir: self.canonicalized_temp_dir,
symlinked_temp_dir: self.symlinked_temp_dir, symlinked_temp_dir: self.symlinked_temp_dir,
repeat: self.repeat, repeat: self.repeat,
envs: Default::default(), envs: Default::default(),
@ -326,6 +335,13 @@ fn test_context_from_metadata(
builder = builder.cwd(cwd.to_string_lossy()); builder = builder.cwd(cwd.to_string_lossy());
} }
if metadata.canonicalized_temp_dir {
// not actually deprecated, we just want to discourage its use
#[allow(deprecated)]
{
builder = builder.use_canonicalized_temp_dir();
}
}
if metadata.symlinked_temp_dir { if metadata.symlinked_temp_dir {
// not actually deprecated, we just want to discourage its use // not actually deprecated, we just want to discourage its use
// because it's mostly used for testing purposes locally // because it's mostly used for testing purposes locally

View file

@ -0,0 +1,18 @@
{
"tests": {
"matches": {
"args": "run -A matches.ts",
"output": "5\n"
},
"not_matches": {
"args": "run -A not_matches.ts",
"output": "not_matches.out",
"exitCode": 1
},
"not_matches_aliased": {
"args": "run -A not_matches_aliased.ts",
"output": "not_matches_aliased.out",
"exitCode": 1
}
}
}

View file

@ -0,0 +1,3 @@
import { add } from "npm:package@1";
console.log(add(2, 3));

View file

@ -0,0 +1,3 @@
module.exports.add = function(a, b) {
return a + b;
};

View file

@ -0,0 +1,4 @@
{
"name": "not-same-name",
"version": "1.0.0"
}

View file

@ -0,0 +1,3 @@
module.exports.add = function(a, b) {
return a + b;
};

View file

@ -0,0 +1,4 @@
{
"name": "package",
"version": "1.0.0"
}

View file

@ -0,0 +1,2 @@
error: Could not find a matching package for 'npm:package@2' in the node_modules directory. Ensure you have all your JSR and npm dependencies listed in your deno.json or package.json, then run `deno install`. Alternatively, turn on auto-install by specifying `"nodeModulesDir": "auto"` in your deno.json file.
at file:///[WILDLINE]

View file

@ -0,0 +1,3 @@
import { add } from "npm:package@2"; // won't match 2
console.log(add(2, 3));

View file

@ -0,0 +1,2 @@
error: Could not find a matching package for 'npm:aliased@1' in the node_modules directory. Ensure you have all your JSR and npm dependencies listed in your deno.json or package.json, then run `deno install`. Alternatively, turn on auto-install by specifying `"nodeModulesDir": "auto"` in your deno.json file.
at file:///[WILDLINE]

View file

@ -0,0 +1,3 @@
import { add } from "npm:aliased@1";
console.log(add(2, 3));

View file

@ -0,0 +1,2 @@
{
}

View file

@ -36,6 +36,9 @@
"flaky": { "flaky": {
"type": "boolean" "type": "boolean"
}, },
"canonicalizedTempDir": {
"type": "boolean"
},
"symlinkedTempDir": { "symlinkedTempDir": {
"type": "boolean" "type": "boolean"
}, },
@ -66,6 +69,12 @@
"tempDir": { "tempDir": {
"type": "boolean" "type": "boolean"
}, },
"canonicalizedTempDir": {
"type": "boolean"
},
"symlinkedTempDir": {
"type": "boolean"
},
"base": { "base": {
"type": "string" "type": "string"
}, },
@ -94,6 +103,12 @@
"tempDir": { "tempDir": {
"type": "boolean" "type": "boolean"
}, },
"canonicalizedTempDir": {
"type": "boolean"
},
"symlinkedTempDir": {
"type": "boolean"
},
"base": { "base": {
"type": "string" "type": "string"
}, },

View file

@ -1,5 +1,4 @@
Compile file:///[WILDCARD]/node_modules_symlink_outside/main.ts to [WILDCARD] Compile file:///[WILDCARD]/node_modules_symlink_outside/main.ts to [WILDCARD]
Warning Symlink target is outside '[WILDCARD]node_modules_symlink_outside'. Inlining symlink at '[WILDCARD]node_modules_symlink_outside[WILDCARD]node_modules[WILDCARD]test.txt' to '[WILDCARD]target.txt' as file.
Embedded File System Embedded File System

View file

@ -3,8 +3,13 @@ Download http://localhost:4260/@denotest/esm-basic/1.0.0.tgz
Initialize @denotest/esm-basic@1.0.0 Initialize @denotest/esm-basic@1.0.0
Check file:///[WILDCARD]/node_modules_symlink_outside/main.ts Check file:///[WILDCARD]/node_modules_symlink_outside/main.ts
Compile file:///[WILDCARD]/node_modules_symlink_outside/main.ts to [WILDLINE] Compile file:///[WILDCARD]/node_modules_symlink_outside/main.ts to [WILDLINE]
Warning Symlink target is outside '[WILDLINE]node_modules_symlink_outside'. Excluding symlink at '[WILDLINE]node_modules_symlink_outside[WILDLINE]node_modules[WILDLINE]symlink_dir' with target '[WILDLINE]some_folder'.
Embedded File System Embedded File System
[WILDCARD] bin[WILDLINE]
├─┬ compile
│ └─┬ node_modules_symlink_outside
│ ├── main.ts
│ └── node_modules/*
└── some_folder/*

View file

@ -499,7 +499,6 @@ Deno.test("[node/http] send request with non-chunked body", async () => {
assert(socket.writable); assert(socket.writable);
assert(socket.readable); assert(socket.readable);
socket.setKeepAlive(); socket.setKeepAlive();
socket.destroy();
socket.setTimeout(100); socket.setTimeout(100);
}); });
req.write("hello "); req.write("hello ");
@ -512,6 +511,11 @@ Deno.test("[node/http] send request with non-chunked body", async () => {
// in order to not cause a flaky test sanitizer failure // in order to not cause a flaky test sanitizer failure
await new Promise((resolve) => setTimeout(resolve, 100)), await new Promise((resolve) => setTimeout(resolve, 100)),
]); ]);
if (Deno.build.os === "windows") {
// FIXME(kt3k): This is necessary for preventing op leak on windows
await new Promise((resolve) => setTimeout(resolve, 4000));
}
}); });
Deno.test("[node/http] send request with chunked body", async () => { Deno.test("[node/http] send request with chunked body", async () => {
@ -559,6 +563,11 @@ Deno.test("[node/http] send request with chunked body", async () => {
req.end(); req.end();
await servePromise; await servePromise;
if (Deno.build.os === "windows") {
// FIXME(kt3k): This is necessary for preventing op leak on windows
await new Promise((resolve) => setTimeout(resolve, 4000));
}
}); });
Deno.test("[node/http] send request with chunked body as default", async () => { Deno.test("[node/http] send request with chunked body as default", async () => {
@ -604,6 +613,11 @@ Deno.test("[node/http] send request with chunked body as default", async () => {
req.end(); req.end();
await servePromise; await servePromise;
if (Deno.build.os === "windows") {
// FIXME(kt3k): This is necessary for preventing op leak on windows
await new Promise((resolve) => setTimeout(resolve, 4000));
}
}); });
Deno.test("[node/http] ServerResponse _implicitHeader", async () => { Deno.test("[node/http] ServerResponse _implicitHeader", async () => {
@ -689,7 +703,7 @@ Deno.test("[node/http] ClientRequest handle non-string headers", async () => {
assertEquals(headers!["1"], "2"); assertEquals(headers!["1"], "2");
}); });
Deno.test("[node/http] ClientRequest uses HTTP/1.1", async () => { Deno.test("[node/https] ClientRequest uses HTTP/1.1", async () => {
let body = ""; let body = "";
const { promise, resolve, reject } = Promise.withResolvers<void>(); const { promise, resolve, reject } = Promise.withResolvers<void>();
const req = https.request("https://localhost:5545/http_version", { const req = https.request("https://localhost:5545/http_version", {
@ -800,8 +814,9 @@ Deno.test("[node/http] ClientRequest search params", async () => {
let body = ""; let body = "";
const { promise, resolve, reject } = Promise.withResolvers<void>(); const { promise, resolve, reject } = Promise.withResolvers<void>();
const req = http.request({ const req = http.request({
host: "localhost:4545", host: "localhost",
path: "search_params?foo=bar", port: 4545,
path: "/search_params?foo=bar",
}, (resp) => { }, (resp) => {
resp.on("data", (chunk) => { resp.on("data", (chunk) => {
body += chunk; body += chunk;
@ -1011,28 +1026,50 @@ Deno.test(
Deno.test( Deno.test(
"[node/http] client destroy before sending request should not error", "[node/http] client destroy before sending request should not error",
() => { async () => {
const { resolve, promise } = Promise.withResolvers<void>();
const request = http.request("http://localhost:5929/"); const request = http.request("http://localhost:5929/");
// Calling this would throw // Calling this would throw
request.destroy(); request.destroy();
request.on("error", (e) => {
assertEquals(e.message, "socket hang up");
});
request.on("close", () => resolve());
await promise;
if (Deno.build.os === "windows") {
// FIXME(kt3k): This is necessary for preventing op leak on windows
await new Promise((resolve) => setTimeout(resolve, 4000));
}
}, },
); );
const isWindows = Deno.build.os === "windows";
Deno.test( Deno.test(
"[node/http] destroyed requests should not be sent", "[node/http] destroyed requests should not be sent",
{ sanitizeResources: !isWindows, sanitizeOps: !isWindows },
async () => { async () => {
let receivedRequest = false; let receivedRequest = false;
const server = Deno.serve(() => { const requestClosed = Promise.withResolvers<void>();
const ac = new AbortController();
const server = Deno.serve({ port: 0, signal: ac.signal }, () => {
receivedRequest = true; receivedRequest = true;
return new Response(null); return new Response(null);
}); });
const request = http.request(`http://localhost:${server.addr.port}/`); const request = http.request(`http://localhost:${server.addr.port}/`);
request.destroy(); request.destroy();
request.end("hello"); request.end("hello");
request.on("error", (err) => {
await new Promise((r) => setTimeout(r, 500)); assert(err.message.includes("socket hang up"));
ac.abort();
});
request.on("close", () => {
requestClosed.resolve();
});
await requestClosed.promise;
assertEquals(receivedRequest, false); assertEquals(receivedRequest, false);
await server.shutdown(); await server.finished;
}, },
); );
@ -1060,22 +1097,33 @@ Deno.test("[node/https] node:https exports globalAgent", async () => {
); );
}); });
Deno.test("[node/http] node:http request.setHeader(header, null) doesn't throw", () => { Deno.test("[node/http] node:http request.setHeader(header, null) doesn't throw", async () => {
{ {
const req = http.request("http://localhost:4545/"); const { promise, resolve } = Promise.withResolvers<void>();
req.on("error", () => {}); const req = http.request("http://localhost:4545/", (res) => {
res.on("data", () => {});
res.on("end", () => {
resolve();
});
});
// @ts-expect-error - null is not a valid header value // @ts-expect-error - null is not a valid header value
req.setHeader("foo", null); req.setHeader("foo", null);
req.end(); req.end();
req.destroy(); await promise;
} }
{ {
const req = https.request("https://localhost:4545/"); const { promise, resolve } = Promise.withResolvers<void>();
req.on("error", () => {}); const req = http.request("http://localhost:4545/", (res) => {
res.on("data", () => {});
res.on("end", () => {
resolve();
});
});
// @ts-expect-error - null is not a valid header value // @ts-expect-error - null is not a valid header value
req.setHeader("foo", null); req.setHeader("foo", null);
req.end(); req.end();
req.destroy();
await promise;
} }
}); });

View file

@ -816,15 +816,17 @@ pub fn wildcard_match_detailed(
} }
let actual_next_text = let actual_next_text =
&current_text[max_current_text_found_index..]; &current_text[max_current_text_found_index..];
let max_next_text_len = 40; let next_text_len = actual_next_text
let next_text_len = .chars()
std::cmp::min(max_next_text_len, actual_next_text.len()); .take(40)
.map(|c| c.len_utf8())
.sum::<usize>();
output_lines.push(format!( output_lines.push(format!(
"==== NEXT ACTUAL TEXT ====\n{}{}", "==== NEXT ACTUAL TEXT ====\n{}{}",
colors::red(annotate_whitespace( colors::red(annotate_whitespace(
&actual_next_text[..next_text_len] &actual_next_text[..next_text_len]
)), )),
if actual_next_text.len() > max_next_text_len { if actual_next_text.len() > next_text_len {
"[TRUNCATED]" "[TRUNCATED]"
} else { } else {
"" ""

View file

@ -1,81 +1,365 @@
{ {
"version": "3", "version": "4",
"packages": { "specifiers": {
"specifiers": { "jsr:@david/dax@0.41.0": "0.41.0",
"jsr:@david/dax@0.41.0": "jsr:@david/dax@0.41.0", "jsr:@david/dax@0.42": "0.42.0",
"jsr:@david/which@^0.4.1": "jsr:@david/which@0.4.1", "jsr:@david/path@0.2": "0.2.0",
"jsr:@deno/patchver@0.1.0": "jsr:@deno/patchver@0.1.0", "jsr:@david/which@~0.4.1": "0.4.1",
"jsr:@std/assert@^0.221.0": "jsr:@std/assert@0.221.0", "jsr:@deno/patchver@0.1.0": "0.1.0",
"jsr:@std/bytes@^0.221.0": "jsr:@std/bytes@0.221.0", "jsr:@std/assert@0.221": "0.221.0",
"jsr:@std/fmt@1": "jsr:@std/fmt@1.0.0", "jsr:@std/bytes@0.221": "0.221.0",
"jsr:@std/fmt@^0.221.0": "jsr:@std/fmt@0.221.0", "jsr:@std/fmt@0.221": "0.221.0",
"jsr:@std/fs@0.221.0": "jsr:@std/fs@0.221.0", "jsr:@std/fmt@1": "1.0.0",
"jsr:@std/io@0.221.0": "jsr:@std/io@0.221.0", "jsr:@std/fs@0.221.0": "0.221.0",
"jsr:@std/io@^0.221.0": "jsr:@std/io@0.221.0", "jsr:@std/fs@1": "1.0.5",
"jsr:@std/path@0.221.0": "jsr:@std/path@0.221.0", "jsr:@std/io@0.221": "0.221.0",
"jsr:@std/path@^0.221.0": "jsr:@std/path@0.221.0", "jsr:@std/io@0.221.0": "0.221.0",
"jsr:@std/streams@0.221.0": "jsr:@std/streams@0.221.0", "jsr:@std/path@0.221": "0.221.0",
"jsr:@std/yaml@^0.221": "jsr:@std/yaml@0.221.0" "jsr:@std/path@0.221.0": "0.221.0",
"jsr:@std/path@1": "1.0.8",
"jsr:@std/path@^1.0.7": "1.0.8",
"jsr:@std/streams@0.221": "0.221.0",
"jsr:@std/streams@0.221.0": "0.221.0",
"jsr:@std/yaml@0.221": "0.221.0",
"npm:decompress@4.2.1": "4.2.1"
},
"jsr": {
"@david/dax@0.41.0": {
"integrity": "9e1ecf66a0415962cc8ad3ba4e3fa93ce0f1a1cc797dd95c36fdfb6977dc7fc8",
"dependencies": [
"jsr:@david/which",
"jsr:@std/fmt@0.221",
"jsr:@std/fs@0.221.0",
"jsr:@std/io@0.221.0",
"jsr:@std/path@0.221.0",
"jsr:@std/streams@0.221.0"
]
}, },
"jsr": { "@david/dax@0.42.0": {
"@david/dax@0.41.0": { "integrity": "0c547c9a20577a6072b90def194c159c9ddab82280285ebfd8268a4ebefbd80b",
"integrity": "9e1ecf66a0415962cc8ad3ba4e3fa93ce0f1a1cc797dd95c36fdfb6977dc7fc8", "dependencies": [
"dependencies": [ "jsr:@david/path",
"jsr:@david/which@^0.4.1", "jsr:@david/which",
"jsr:@std/fmt@^0.221.0", "jsr:@std/fmt@1",
"jsr:@std/fs@0.221.0", "jsr:@std/fs@1",
"jsr:@std/io@0.221.0", "jsr:@std/io@0.221",
"jsr:@std/path@0.221.0", "jsr:@std/path@1",
"jsr:@std/streams@0.221.0" "jsr:@std/streams@0.221"
] ]
}, },
"@david/which@0.4.1": { "@david/path@0.2.0": {
"integrity": "896a682b111f92ab866cc70c5b4afab2f5899d2f9bde31ed00203b9c250f225e" "integrity": "f2d7aa7f02ce5a55e27c09f9f1381794acb09d328f8d3c8a2e3ab3ffc294dccd",
}, "dependencies": [
"@deno/patchver@0.1.0": { "jsr:@std/fs@1",
"integrity": "3102aa1b751a9fb85ef6cf7d4c0a1ec6624c85a77facc140c5748d82126d66a6" "jsr:@std/path@1"
}, ]
"@std/assert@0.221.0": { },
"integrity": "a5f1aa6e7909dbea271754fd4ab3f4e687aeff4873b4cef9a320af813adb489a" "@david/which@0.4.1": {
}, "integrity": "896a682b111f92ab866cc70c5b4afab2f5899d2f9bde31ed00203b9c250f225e"
"@std/bytes@0.221.0": { },
"integrity": "64a047011cf833890a4a2ab7293ac55a1b4f5a050624ebc6a0159c357de91966" "@deno/patchver@0.1.0": {
}, "integrity": "3102aa1b751a9fb85ef6cf7d4c0a1ec6624c85a77facc140c5748d82126d66a6"
"@std/fmt@0.221.0": { },
"integrity": "379fed69bdd9731110f26b9085aeb740606b20428ce6af31ef6bd45ef8efa62a" "@std/assert@0.221.0": {
}, "integrity": "a5f1aa6e7909dbea271754fd4ab3f4e687aeff4873b4cef9a320af813adb489a"
"@std/fmt@1.0.0": { },
"integrity": "8a95c9fdbb61559418ccbc0f536080cf43341655e1444f9d375a66886ceaaa3d" "@std/bytes@0.221.0": {
}, "integrity": "64a047011cf833890a4a2ab7293ac55a1b4f5a050624ebc6a0159c357de91966"
"@std/fs@0.221.0": { },
"integrity": "028044450299de8ed5a716ade4e6d524399f035513b85913794f4e81f07da286", "@std/fmt@0.221.0": {
"dependencies": [ "integrity": "379fed69bdd9731110f26b9085aeb740606b20428ce6af31ef6bd45ef8efa62a"
"jsr:@std/assert@^0.221.0", },
"jsr:@std/path@^0.221.0" "@std/fmt@1.0.0": {
] "integrity": "8a95c9fdbb61559418ccbc0f536080cf43341655e1444f9d375a66886ceaaa3d"
}, },
"@std/io@0.221.0": { "@std/fs@0.221.0": {
"integrity": "faf7f8700d46ab527fa05cc6167f4b97701a06c413024431c6b4d207caa010da", "integrity": "028044450299de8ed5a716ade4e6d524399f035513b85913794f4e81f07da286",
"dependencies": [ "dependencies": [
"jsr:@std/assert@^0.221.0", "jsr:@std/assert",
"jsr:@std/bytes@^0.221.0" "jsr:@std/path@0.221"
] ]
}, },
"@std/path@0.221.0": { "@std/fs@1.0.5": {
"integrity": "0a36f6b17314ef653a3a1649740cc8db51b25a133ecfe838f20b79a56ebe0095", "integrity": "41806ad6823d0b5f275f9849a2640d87e4ef67c51ee1b8fb02426f55e02fd44e",
"dependencies": [ "dependencies": [
"jsr:@std/assert@^0.221.0" "jsr:@std/path@^1.0.7"
] ]
}, },
"@std/streams@0.221.0": { "@std/io@0.221.0": {
"integrity": "47f2f74634b47449277c0ee79fe878da4424b66bd8975c032e3afdca88986e61", "integrity": "faf7f8700d46ab527fa05cc6167f4b97701a06c413024431c6b4d207caa010da",
"dependencies": [ "dependencies": [
"jsr:@std/io@^0.221.0" "jsr:@std/assert",
] "jsr:@std/bytes"
}, ]
"@std/yaml@0.221.0": { },
"integrity": "bac8913ee4f6fc600d4b92cc020f755070e22687ad242341f31d123ff690ae98" "@std/path@0.221.0": {
} "integrity": "0a36f6b17314ef653a3a1649740cc8db51b25a133ecfe838f20b79a56ebe0095",
"dependencies": [
"jsr:@std/assert"
]
},
"@std/path@1.0.8": {
"integrity": "548fa456bb6a04d3c1a1e7477986b6cffbce95102d0bb447c67c4ee70e0364be"
},
"@std/streams@0.221.0": {
"integrity": "47f2f74634b47449277c0ee79fe878da4424b66bd8975c032e3afdca88986e61",
"dependencies": [
"jsr:@std/io@0.221"
]
},
"@std/yaml@0.221.0": {
"integrity": "bac8913ee4f6fc600d4b92cc020f755070e22687ad242341f31d123ff690ae98"
}
},
"npm": {
"base64-js@1.5.1": {
"integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA=="
},
"bl@1.2.3": {
"integrity": "sha512-pvcNpa0UU69UT341rO6AYy4FVAIkUHuZXRIWbq+zHnsVcRzDDjIAhGuuYoi0d//cwIwtt4pkpKycWEfjdV+vww==",
"dependencies": [
"readable-stream",
"safe-buffer@5.2.1"
]
},
"buffer-alloc-unsafe@1.1.0": {
"integrity": "sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg=="
},
"buffer-alloc@1.2.0": {
"integrity": "sha512-CFsHQgjtW1UChdXgbyJGtnm+O/uLQeZdtbDo8mfUgYXCHSM1wgrVxXm6bSyrUuErEb+4sYVGCzASBRot7zyrow==",
"dependencies": [
"buffer-alloc-unsafe",
"buffer-fill"
]
},
"buffer-crc32@0.2.13": {
"integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ=="
},
"buffer-fill@1.0.0": {
"integrity": "sha512-T7zexNBwiiaCOGDg9xNX9PBmjrubblRkENuptryuI64URkXDFum9il/JGL8Lm8wYfAXpredVXXZz7eMHilimiQ=="
},
"buffer@5.7.1": {
"integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==",
"dependencies": [
"base64-js",
"ieee754"
]
},
"commander@2.20.3": {
"integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ=="
},
"core-util-is@1.0.3": {
"integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ=="
},
"decompress-tar@4.1.1": {
"integrity": "sha512-JdJMaCrGpB5fESVyxwpCx4Jdj2AagLmv3y58Qy4GE6HMVjWz1FeVQk1Ct4Kye7PftcdOo/7U7UKzYBJgqnGeUQ==",
"dependencies": [
"file-type@5.2.0",
"is-stream",
"tar-stream"
]
},
"decompress-tarbz2@4.1.1": {
"integrity": "sha512-s88xLzf1r81ICXLAVQVzaN6ZmX4A6U4z2nMbOwobxkLoIIfjVMBg7TeguTUXkKeXni795B6y5rnvDw7rxhAq9A==",
"dependencies": [
"decompress-tar",
"file-type@6.2.0",
"is-stream",
"seek-bzip",
"unbzip2-stream"
]
},
"decompress-targz@4.1.1": {
"integrity": "sha512-4z81Znfr6chWnRDNfFNqLwPvm4db3WuZkqV+UgXQzSngG3CEKdBkw5jrv3axjjL96glyiiKjsxJG3X6WBZwX3w==",
"dependencies": [
"decompress-tar",
"file-type@5.2.0",
"is-stream"
]
},
"decompress-unzip@4.0.1": {
"integrity": "sha512-1fqeluvxgnn86MOh66u8FjbtJpAFv5wgCT9Iw8rcBqQcCo5tO8eiJw7NNTrvt9n4CRBVq7CstiS922oPgyGLrw==",
"dependencies": [
"file-type@3.9.0",
"get-stream",
"pify@2.3.0",
"yauzl"
]
},
"decompress@4.2.1": {
"integrity": "sha512-e48kc2IjU+2Zw8cTb6VZcJQ3lgVbS4uuB1TfCHbiZIP/haNXm+SVyhu+87jts5/3ROpd82GSVCoNs/z8l4ZOaQ==",
"dependencies": [
"decompress-tar",
"decompress-tarbz2",
"decompress-targz",
"decompress-unzip",
"graceful-fs",
"make-dir",
"pify@2.3.0",
"strip-dirs"
]
},
"end-of-stream@1.4.4": {
"integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==",
"dependencies": [
"once"
]
},
"fd-slicer@1.1.0": {
"integrity": "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==",
"dependencies": [
"pend"
]
},
"file-type@3.9.0": {
"integrity": "sha512-RLoqTXE8/vPmMuTI88DAzhMYC99I8BWv7zYP4A1puo5HIjEJ5EX48ighy4ZyKMG9EDXxBgW6e++cn7d1xuFghA=="
},
"file-type@5.2.0": {
"integrity": "sha512-Iq1nJ6D2+yIO4c8HHg4fyVb8mAJieo1Oloy1mLLaB2PvezNedhBVm+QU7g0qM42aiMbRXTxKKwGD17rjKNJYVQ=="
},
"file-type@6.2.0": {
"integrity": "sha512-YPcTBDV+2Tm0VqjybVd32MHdlEGAtuxS3VAYsumFokDSMG+ROT5wawGlnHDoz7bfMcMDt9hxuXvXwoKUx2fkOg=="
},
"fs-constants@1.0.0": {
"integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow=="
},
"get-stream@2.3.1": {
"integrity": "sha512-AUGhbbemXxrZJRD5cDvKtQxLuYaIbNtDTK8YqupCI393Q2KSTreEsLUN3ZxAWFGiKTzL6nKuzfcIvieflUX9qA==",
"dependencies": [
"object-assign",
"pinkie-promise"
]
},
"graceful-fs@4.2.11": {
"integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="
},
"ieee754@1.2.1": {
"integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA=="
},
"inherits@2.0.4": {
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
},
"is-natural-number@4.0.1": {
"integrity": "sha512-Y4LTamMe0DDQIIAlaer9eKebAlDSV6huy+TWhJVPlzZh2o4tRP5SQWFlLn5N0To4mDD22/qdOq+veo1cSISLgQ=="
},
"is-stream@1.1.0": {
"integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ=="
},
"isarray@1.0.0": {
"integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ=="
},
"make-dir@1.3.0": {
"integrity": "sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==",
"dependencies": [
"pify@3.0.0"
]
},
"object-assign@4.1.1": {
"integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg=="
},
"once@1.4.0": {
"integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
"dependencies": [
"wrappy"
]
},
"pend@1.2.0": {
"integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg=="
},
"pify@2.3.0": {
"integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog=="
},
"pify@3.0.0": {
"integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg=="
},
"pinkie-promise@2.0.1": {
"integrity": "sha512-0Gni6D4UcLTbv9c57DfxDGdr41XfgUjqWZu492f0cIGr16zDU06BWP/RAEvOuo7CQ0CNjHaLlM59YJJFm3NWlw==",
"dependencies": [
"pinkie"
]
},
"pinkie@2.0.4": {
"integrity": "sha512-MnUuEycAemtSaeFSjXKW/aroV7akBbY+Sv+RkyqFjgAe73F+MR0TBWKBRDkmfWq/HiFmdavfZ1G7h4SPZXaCSg=="
},
"process-nextick-args@2.0.1": {
"integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag=="
},
"readable-stream@2.3.8": {
"integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==",
"dependencies": [
"core-util-is",
"inherits",
"isarray",
"process-nextick-args",
"safe-buffer@5.1.2",
"string_decoder",
"util-deprecate"
]
},
"safe-buffer@5.1.2": {
"integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
},
"safe-buffer@5.2.1": {
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ=="
},
"seek-bzip@1.0.6": {
"integrity": "sha512-e1QtP3YL5tWww8uKaOCQ18UxIT2laNBXHjV/S2WYCiK4udiv8lkG89KRIoCjUagnAmCBurjF4zEVX2ByBbnCjQ==",
"dependencies": [
"commander"
]
},
"string_decoder@1.1.1": {
"integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
"dependencies": [
"safe-buffer@5.1.2"
]
},
"strip-dirs@2.1.0": {
"integrity": "sha512-JOCxOeKLm2CAS73y/U4ZeZPTkE+gNVCzKt7Eox84Iej1LT/2pTWYpZKJuxwQpvX1LiZb1xokNR7RLfuBAa7T3g==",
"dependencies": [
"is-natural-number"
]
},
"tar-stream@1.6.2": {
"integrity": "sha512-rzS0heiNf8Xn7/mpdSVVSMAWAoy9bfb1WOTYC78Z0UQKeKa/CWS8FOq0lKGNa8DWKAn9gxjCvMLYc5PGXYlK2A==",
"dependencies": [
"bl",
"buffer-alloc",
"end-of-stream",
"fs-constants",
"readable-stream",
"to-buffer",
"xtend"
]
},
"through@2.3.8": {
"integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg=="
},
"to-buffer@1.1.1": {
"integrity": "sha512-lx9B5iv7msuFYE3dytT+KE5tap+rNYw+K4jVkb9R/asAb+pbBSM17jtunHplhBe6RRJdZx3Pn2Jph24O32mOVg=="
},
"unbzip2-stream@1.4.3": {
"integrity": "sha512-mlExGW4w71ebDJviH16lQLtZS32VKqsSfk80GCfUlwT/4/hNRFsoscrF/c++9xinkMzECL1uL9DDwXqFWkruPg==",
"dependencies": [
"buffer",
"through"
]
},
"util-deprecate@1.0.2": {
"integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="
},
"wrappy@1.0.2": {
"integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="
},
"xtend@4.0.2": {
"integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ=="
},
"yauzl@2.10.0": {
"integrity": "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==",
"dependencies": [
"buffer-crc32",
"fd-slicer"
]
} }
}, },
"remote": { "remote": {

1
tools/release/npm/.gitignore vendored Normal file
View file

@ -0,0 +1 @@
dist

54
tools/release/npm/bin.cjs Normal file
View file

@ -0,0 +1,54 @@
#!/usr/bin/env node
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
// @ts-check
const path = require("path");
const child_process = require("child_process");
const os = require("os");
const fs = require("fs");
const exePath = path.join(
__dirname,
os.platform() === "win32" ? "deno.exe" : "deno",
);
if (!fs.existsSync(exePath)) {
try {
const resolvedExePath = require("./install_api.cjs").runInstall();
runDenoExe(resolvedExePath);
} catch (err) {
if (err !== undefined && typeof err.message === "string") {
console.error(err.message);
} else {
console.error(err);
}
process.exit(1);
}
} else {
runDenoExe(exePath);
}
/** @param exePath {string} */
function runDenoExe(exePath) {
const result = child_process.spawnSync(
exePath,
process.argv.slice(2),
{ stdio: "inherit" },
);
if (result.error) {
throw result.error;
}
throwIfNoExePath();
process.exitCode = result.status;
function throwIfNoExePath() {
if (!fs.existsSync(exePath)) {
throw new Error(
"Could not find exe at path '" + exePath +
"'. Maybe try running deno again.",
);
}
}
}

237
tools/release/npm/build.ts Executable file
View file

@ -0,0 +1,237 @@
#!/usr/bin/env -S deno run -A --lock=tools/deno.lock.json
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
// NOTICE: This deployment/npm folder was lifted from https://github.com/dprint/dprint/blob/0ba79811cc96d2dee8e0cf766a8c8c0fc44879c2/deployment/npm/
// with permission (Copyright 2019-2023 David Sherret)
import $ from "jsr:@david/dax@^0.42.0";
// @ts-types="npm:@types/decompress@4.2.7"
import decompress from "npm:decompress@4.2.1";
import { parseArgs } from "@std/cli/parse-args";
interface Package {
zipFileName: string;
os: "win32" | "darwin" | "linux";
cpu: "x64" | "arm64";
libc?: "glibc" | "musl";
}
const args = parseArgs(Deno.args, {
boolean: ["publish"],
});
const packages: Package[] = [{
zipFileName: "deno-x86_64-pc-windows-msvc.zip",
os: "win32",
cpu: "x64",
}, {
// use x64_64 until there's an arm64 build
zipFileName: "deno-x86_64-pc-windows-msvc.zip",
os: "win32",
cpu: "arm64",
}, {
zipFileName: "deno-x86_64-apple-darwin.zip",
os: "darwin",
cpu: "x64",
}, {
zipFileName: "deno-aarch64-apple-darwin.zip",
os: "darwin",
cpu: "arm64",
}, {
zipFileName: "deno-x86_64-unknown-linux-gnu.zip",
os: "linux",
cpu: "x64",
libc: "glibc",
}, {
zipFileName: "deno-aarch64-unknown-linux-gnu.zip",
os: "linux",
cpu: "arm64",
libc: "glibc",
}];
const markdownText = `# Deno
[Deno](https://www.deno.com)
([/ˈdiːnoʊ/](http://ipa-reader.xyz/?text=%CB%88di%CB%90no%CA%8A), pronounced
\`dee-no\`) is a JavaScript, TypeScript, and WebAssembly runtime with secure
defaults and a great developer experience. It's built on [V8](https://v8.dev/),
[Rust](https://www.rust-lang.org/), and [Tokio](https://tokio.rs/).
Learn more about the Deno runtime
[in the documentation](https://docs.deno.com/runtime/manual).
`;
const currentDir = $.path(import.meta.url).parentOrThrow();
const rootDir = currentDir.parentOrThrow().parentOrThrow().parentOrThrow();
const outputDir = currentDir.join("./dist");
const scopeDir = outputDir.join("@deno");
const denoDir = outputDir.join("deno");
const version = resolveVersion();
$.logStep(`Publishing ${version}...`);
await $`rm -rf ${outputDir}`;
await $`mkdir -p ${denoDir} ${scopeDir}`;
// setup Deno packages
{
$.logStep(`Setting up deno ${version}...`);
const pkgJson = {
"name": "deno",
"version": version,
"description": "A modern runtime for JavaScript and TypeScript.",
"bin": "bin.cjs",
"repository": {
"type": "git",
"url": "git+https://github.com/denoland/deno.git",
},
"keywords": [
"runtime",
"typescript",
],
"author": "the Deno authors",
"license": "MIT",
"bugs": {
"url": "https://github.com/denoland/deno/issues",
},
"homepage": "https://deno.com",
// for yarn berry (https://github.com/dprint/dprint/issues/686)
"preferUnplugged": true,
"scripts": {
"postinstall": "node ./install.cjs",
},
optionalDependencies: packages
.map((pkg) => `@deno/${getPackageNameNoScope(pkg)}`)
.reduce((obj, pkgName) => ({ ...obj, [pkgName]: version }), {}),
};
currentDir.join("bin.cjs").copyFileToDirSync(denoDir);
currentDir.join("install_api.cjs").copyFileToDirSync(denoDir);
currentDir.join("install.cjs").copyFileToDirSync(denoDir);
denoDir.join("package.json").writeJsonPrettySync(pkgJson);
rootDir.join("LICENSE.md").copyFileSync(denoDir.join("LICENSE"));
denoDir.join("README.md").writeTextSync(markdownText);
// ensure the test files don't get published
denoDir.join(".npmignore").writeTextSync("deno\ndeno.exe\n");
// setup each binary package
for (const pkg of packages) {
const pkgName = getPackageNameNoScope(pkg);
$.logStep(`Setting up @deno/${pkgName}...`);
const pkgDir = scopeDir.join(pkgName);
const zipPath = pkgDir.join("output.zip");
await $`mkdir -p ${pkgDir}`;
// download and extract the zip file
const zipUrl =
`https://github.com/denoland/deno/releases/download/v${version}/${pkg.zipFileName}`;
await $.request(zipUrl).showProgress().pipeToPath(zipPath);
await decompress(zipPath.toString(), pkgDir.toString());
zipPath.removeSync();
// create the package.json and readme
pkgDir.join("README.md").writeTextSync(
`# @denoland/${pkgName}\n\n${pkgName} distribution of [Deno](https://deno.land).\n`,
);
pkgDir.join("package.json").writeJsonPrettySync({
"name": `@deno/${pkgName}`,
"version": version,
"description": `${pkgName} distribution of Deno`,
"repository": {
"type": "git",
"url": "git+https://github.com/denoland/deno.git",
},
// force yarn to unpack
"preferUnplugged": true,
"author": "David Sherret",
"license": "MIT",
"bugs": {
"url": "https://github.com/denoland/deno/issues",
},
"homepage": "https://deno.land",
"os": [pkg.os],
"cpu": [pkg.cpu],
libc: pkg.libc == null ? undefined : [pkg.libc],
});
}
}
// verify that the package is created correctly
{
$.logStep("Verifying packages...");
const testPlatform = Deno.build.os == "windows"
? (Deno.build.arch === "x86_64" ? "@deno/win32-x64" : "@deno/win32-arm64")
: Deno.build.os === "darwin"
? (Deno.build.arch === "x86_64" ? "@deno/darwin-x64" : "@deno/darwin-arm64")
: "@deno/linux-x64-glibc";
outputDir.join("package.json").writeJsonPrettySync({
workspaces: [
"deno",
// There seems to be a bug with npm workspaces where this doesn't
// work, so for now make some assumptions and only include the package
// that works on the CI for the current operating system
// ...packages.map(p => `@deno/${getPackageNameNoScope(p)}`),
testPlatform,
],
});
const denoExe = Deno.build.os === "windows" ? "deno.exe" : "deno";
await $`npm install`.cwd(denoDir);
// ensure the post-install script adds the executable to the deno package,
// which is necessary for faster caching and to ensure the vscode extension
// picks it up
if (!denoDir.join(denoExe).existsSync()) {
throw new Error("Deno executable did not exist after post install");
}
// run once after post install created deno, once with a simulated readonly file system, once creating the cache and once with
await $`node bin.cjs -v && rm ${denoExe} && DENO_SIMULATED_READONLY_FILE_SYSTEM=1 node bin.cjs -v && node bin.cjs -v && node bin.cjs -v`
.cwd(denoDir);
if (!denoDir.join(denoExe).existsSync()) {
throw new Error("Deno executable did not exist when lazily initialized");
}
}
// publish if necessary
if (args.publish) {
for (const pkg of packages) {
const pkgName = getPackageNameNoScope(pkg);
$.logStep(`Publishing @deno/${pkgName}...`);
if (await checkPackagePublished(`@deno/${pkgName}`)) {
$.logLight(" Already published.");
continue;
}
const pkgDir = scopeDir.join(pkgName);
await $`cd ${pkgDir} && npm publish --provenance --access public`;
}
$.logStep(`Publishing deno...`);
await $`cd ${denoDir} && npm publish --provenance --access public`;
}
function getPackageNameNoScope(name: Package) {
const libc = name.libc == null ? "" : `-${name.libc}`;
return `${name.os}-${name.cpu}${libc}`;
}
function resolveVersion() {
const firstArg = args._[0];
if (
firstArg != null &&
typeof firstArg === "string" &&
firstArg.trim().length > 0
) {
return firstArg;
}
const version = (rootDir.join("cli/Cargo.toml").readTextSync().match(
/version = "(.*?)"/,
))?.[1];
if (version == null) {
throw new Error("Could not resolve version.");
}
return version;
}
async function checkPackagePublished(pkgName: string) {
const result = await $`npm info ${pkgName}@${version}`.quiet().noThrow();
return result.code === 0;
}

View file

@ -0,0 +1,5 @@
// @ts-check
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
"use strict";
require("./install_api.cjs").runInstall();

View file

@ -0,0 +1,196 @@
// @ts-check
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
"use strict";
const fs = require("fs");
const os = require("os");
const path = require("path");
/** @type {string | undefined} */
let cachedIsMusl = undefined;
module.exports = {
runInstall() {
const denoFileName = os.platform() === "win32" ? "deno.exe" : "deno";
const targetExecutablePath = path.join(
__dirname,
denoFileName,
);
if (fs.existsSync(targetExecutablePath)) {
return targetExecutablePath;
}
const target = getTarget();
const sourcePackagePath = path.dirname(
require.resolve("@deno/" + target + "/package.json"),
);
const sourceExecutablePath = path.join(sourcePackagePath, denoFileName);
if (!fs.existsSync(sourceExecutablePath)) {
throw new Error(
"Could not find executable for @deno/" + target + " at " +
sourceExecutablePath,
);
}
try {
if (process.env.DPRINT_SIMULATED_READONLY_FILE_SYSTEM === "1") {
console.warn("Simulating readonly file system for testing.");
throw new Error("Throwing for testing purposes.");
}
// in order to make things faster the next time we run and to allow the
// deno vscode extension to easily pick this up, copy the executable
// into the deno package folder
hardLinkOrCopy(sourceExecutablePath, targetExecutablePath);
if (os.platform() !== "win32") {
// chomd +x
chmodX(targetExecutablePath);
}
return targetExecutablePath;
} catch (err) {
// this may fail on readonly file systems... in this case, fall
// back to using the resolved package path
if (process.env.DENO_DEBUG === "1") {
console.warn(
"Failed to copy executable from " +
sourceExecutablePath + " to " + targetExecutablePath +
". Using resolved package path instead.",
err,
);
}
// use the path found in the specific package
try {
chmodX(sourceExecutablePath);
} catch (_err) {
// ignore
}
return sourceExecutablePath;
}
},
};
/** @filePath {string} */
function chmodX(filePath) {
const perms = fs.statSync(filePath).mode;
fs.chmodSync(filePath, perms | 0o111);
}
function getTarget() {
const platform = os.platform();
if (platform === "linux") {
return platform + "-" + getArch() + "-" + getLinuxFamily();
} else {
return platform + "-" + getArch();
}
}
function getArch() {
const arch = os.arch();
if (arch !== "arm64" && arch !== "x64") {
throw new Error(
"Unsupported architecture " + os.arch() +
". Only x64 and aarch64 binaries are available.",
);
}
return arch;
}
function getLinuxFamily() {
if (getIsMusl()) {
throw new Error(
"Musl is not supported. It's one of our priorities. Please upvote this issue: https://github.com/denoland/deno/issues/3711",
);
// return "musl";
}
return "glibc";
function getIsMusl() {
// code adapted from https://github.com/lovell/detect-libc
// Copyright Apache 2.0 license, the detect-libc maintainers
if (cachedIsMusl == null) {
cachedIsMusl = innerGet();
}
return cachedIsMusl;
function innerGet() {
try {
if (os.platform() !== "linux") {
return false;
}
return isProcessReportMusl() || isConfMusl();
} catch (err) {
// just in case
console.warn("Error checking if musl.", err);
return false;
}
}
function isProcessReportMusl() {
if (!process.report) {
return false;
}
const rawReport = process.report.getReport();
const report = typeof rawReport === "string"
? JSON.parse(rawReport)
: rawReport;
if (!report || !(report.sharedObjects instanceof Array)) {
return false;
}
return report.sharedObjects.some((o) =>
o.includes("libc.musl-") || o.includes("ld-musl-")
);
}
function isConfMusl() {
const output = getCommandOutput();
const [_, ldd1] = output.split(/[\r\n]+/);
return ldd1 && ldd1.includes("musl");
}
function getCommandOutput() {
try {
const command =
"getconf GNU_LIBC_VERSION 2>&1 || true; ldd --version 2>&1 || true";
return require("child_process").execSync(command, { encoding: "utf8" });
} catch (_err) {
return "";
}
}
}
}
/**
* @param sourcePath {string}
* @param destinationPath {string}
*/
function hardLinkOrCopy(sourcePath, destinationPath) {
try {
fs.linkSync(sourcePath, destinationPath);
} catch {
atomicCopyFile(sourcePath, destinationPath);
}
}
/**
* @param sourcePath {string}
* @param destinationPath {string}
*/
function atomicCopyFile(sourcePath, destinationPath) {
const crypto = require("crypto");
const rand = crypto.randomBytes(4).toString("hex");
const tempFilePath = destinationPath + "." + rand;
fs.copyFileSync(sourcePath, tempFilePath);
try {
fs.renameSync(tempFilePath, destinationPath);
} catch (err) {
// will maybe throw when another process had already done this
// so just ignore and delete the created temporary file
try {
fs.unlinkSync(tempFilePath);
} catch (_err2) {
// ignore
}
throw err;
}
}