0
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2025-03-03 17:34:47 -05:00

Merge branch 'main' into jsdocs-consoleInterface

This commit is contained in:
Phil Hawksworth 2025-02-13 15:13:17 +00:00 committed by GitHub
commit c1262e7d85
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
200 changed files with 9645 additions and 12679 deletions

View file

@ -5,7 +5,7 @@ import { stringify } from "jsr:@std/yaml@^0.221/stringify";
// Bump this number when you want to purge the cache.
// Note: the tools/release/01_bump_crate_versions.ts script will update this version
// automatically via regex, so ensure that this line maintains this format.
const cacheVersion = 39;
const cacheVersion = 40;
const ubuntuX86Runner = "ubuntu-24.04";
const ubuntuX86XlRunner = "ubuntu-24.04-xl";

View file

@ -184,8 +184,8 @@ jobs:
~/.cargo/registry/index
~/.cargo/registry/cache
~/.cargo/git/db
key: '39-cargo-home-${{ matrix.os }}-${{ matrix.arch }}-${{ hashFiles(''Cargo.lock'') }}'
restore-keys: '39-cargo-home-${{ matrix.os }}-${{ matrix.arch }}-'
key: '40-cargo-home-${{ matrix.os }}-${{ matrix.arch }}-${{ hashFiles(''Cargo.lock'') }}'
restore-keys: '40-cargo-home-${{ matrix.os }}-${{ matrix.arch }}-'
if: '!(matrix.skip)'
- uses: dsherret/rust-toolchain-file@v1
if: '!(matrix.skip)'
@ -377,7 +377,7 @@ jobs:
!./target/*/*.zip
!./target/*/*.tar.gz
key: never_saved
restore-keys: '39-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-'
restore-keys: '40-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-'
- name: Apply and update mtime cache
if: '!(matrix.skip) && (!startsWith(github.ref, ''refs/tags/''))'
uses: ./.github/mtime_cache
@ -693,7 +693,7 @@ jobs:
!./target/*/gn_root
!./target/*/*.zip
!./target/*/*.tar.gz
key: '39-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-${{ github.sha }}'
key: '40-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-${{ github.sha }}'
wasm:
name: build wasm32
needs:

481
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -53,17 +53,17 @@ repository = "https://github.com/denoland/deno"
deno_ast = { version = "=0.44.0", features = ["transpiling"] }
deno_core = { version = "0.336.0" }
deno_bench_util = { version = "0.183.0", path = "./bench_util" }
deno_config = { version = "=0.48.0", features = ["workspace"] }
deno_bench_util = { version = "0.184.0", path = "./bench_util" }
deno_config = { version = "=0.46.0", features = ["workspace"] }
deno_lockfile = "=0.24.0"
deno_media_type = { version = "=0.2.5", features = ["module_specifier"] }
deno_npm = "=0.27.2"
deno_path_util = "=0.3.1"
deno_permissions = { version = "0.48.0", path = "./runtime/permissions" }
deno_runtime = { version = "0.197.0", path = "./runtime" }
deno_permissions = { version = "0.49.0", path = "./runtime/permissions" }
deno_runtime = { version = "0.198.0", path = "./runtime" }
deno_semver = "=0.7.1"
deno_terminal = "0.2.0"
napi_sym = { version = "0.119.0", path = "./ext/napi/sym" }
napi_sym = { version = "0.120.0", path = "./ext/napi/sym" }
test_util = { package = "test_server", path = "./tests/util/server" }
denokv_proto = "0.9.0"
@ -72,38 +72,38 @@ denokv_remote = "0.9.0"
denokv_sqlite = { default-features = false, version = "0.9.0" }
# exts
deno_broadcast_channel = { version = "0.183.0", path = "./ext/broadcast_channel" }
deno_cache = { version = "0.121.0", path = "./ext/cache" }
deno_canvas = { version = "0.58.0", path = "./ext/canvas" }
deno_console = { version = "0.189.0", path = "./ext/console" }
deno_cron = { version = "0.69.0", path = "./ext/cron" }
deno_crypto = { version = "0.203.0", path = "./ext/crypto" }
deno_fetch = { version = "0.213.0", path = "./ext/fetch" }
deno_ffi = { version = "0.176.0", path = "./ext/ffi" }
deno_fs = { version = "0.99.0", path = "./ext/fs" }
deno_http = { version = "0.187.0", path = "./ext/http" }
deno_io = { version = "0.99.0", path = "./ext/io" }
deno_kv = { version = "0.97.0", path = "./ext/kv" }
deno_napi = { version = "0.120.0", path = "./ext/napi" }
deno_net = { version = "0.181.0", path = "./ext/net" }
deno_node = { version = "0.127.0", path = "./ext/node" }
deno_os = { version = "0.6.0", path = "./ext/os" }
deno_process = { version = "0.4.0", path = "./ext/process" }
deno_telemetry = { version = "0.11.0", path = "./ext/telemetry" }
deno_tls = { version = "0.176.0", path = "./ext/tls" }
deno_url = { version = "0.189.0", path = "./ext/url" }
deno_web = { version = "0.220.0", path = "./ext/web" }
deno_webgpu = { version = "0.156.0", path = "./ext/webgpu" }
deno_webidl = { version = "0.189.0", path = "./ext/webidl" }
deno_websocket = { version = "0.194.0", path = "./ext/websocket" }
deno_webstorage = { version = "0.184.0", path = "./ext/webstorage" }
deno_broadcast_channel = { version = "0.184.0", path = "./ext/broadcast_channel" }
deno_cache = { version = "0.122.0", path = "./ext/cache" }
deno_canvas = { version = "0.59.0", path = "./ext/canvas" }
deno_console = { version = "0.190.0", path = "./ext/console" }
deno_cron = { version = "0.70.0", path = "./ext/cron" }
deno_crypto = { version = "0.204.0", path = "./ext/crypto" }
deno_fetch = { version = "0.214.0", path = "./ext/fetch" }
deno_ffi = { version = "0.177.0", path = "./ext/ffi" }
deno_fs = { version = "0.100.0", path = "./ext/fs" }
deno_http = { version = "0.188.0", path = "./ext/http" }
deno_io = { version = "0.100.0", path = "./ext/io" }
deno_kv = { version = "0.98.0", path = "./ext/kv" }
deno_napi = { version = "0.121.0", path = "./ext/napi" }
deno_net = { version = "0.182.0", path = "./ext/net" }
deno_node = { version = "0.128.0", path = "./ext/node" }
deno_os = { version = "0.7.0", path = "./ext/os" }
deno_process = { version = "0.5.0", path = "./ext/process" }
deno_telemetry = { version = "0.12.0", path = "./ext/telemetry" }
deno_tls = { version = "0.177.0", path = "./ext/tls" }
deno_url = { version = "0.190.0", path = "./ext/url" }
deno_web = { version = "0.221.0", path = "./ext/web" }
deno_webgpu = { version = "0.157.0", path = "./ext/webgpu" }
deno_webidl = { version = "0.190.0", path = "./ext/webidl" }
deno_websocket = { version = "0.195.0", path = "./ext/websocket" }
deno_webstorage = { version = "0.185.0", path = "./ext/webstorage" }
# workspace libraries
deno_lib = { version = "0.5.0", path = "./cli/lib" }
deno_npm_cache = { version = "0.8.0", path = "./resolvers/npm_cache" }
deno_resolver = { version = "0.20.0", path = "./resolvers/deno" }
deno_snapshots = { version = "0.4.0", path = "./cli/snapshot" }
node_resolver = { version = "0.27.0", path = "./resolvers/node" }
deno_lib = { version = "0.6.0", path = "./cli/lib" }
deno_npm_cache = { version = "0.9.0", path = "./resolvers/npm_cache" }
deno_resolver = { version = "0.21.0", path = "./resolvers/deno" }
deno_snapshots = { version = "0.5.0", path = "./cli/snapshot" }
node_resolver = { version = "0.28.0", path = "./resolvers/node" }
aes = "=0.8.3"
anyhow = "1.0.57"
@ -218,7 +218,7 @@ tokio-socks = "0.5.1"
tokio-util = "0.7.4"
tower = { version = "0.5.2", default-features = false, features = ["retry", "util"] }
tower-http = { version = "0.6.1", features = ["decompression-br", "decompression-gzip"] }
tower-lsp = { package = "deno_tower_lsp", version = "0.1.0", features = ["proposed"] }
tower-lsp = { package = "deno_tower_lsp", version = "=0.3.0", features = ["proposed"] }
tower-service = "0.3.2"
twox-hash = "=1.6.3"
url = { version = "2.5", features = ["serde", "expose_internals"] }
@ -235,7 +235,7 @@ opentelemetry = "0.27.0"
opentelemetry-http = "0.27.0"
opentelemetry-otlp = { version = "0.27.0", features = ["logs", "http-proto", "http-json"] }
opentelemetry-semantic-conventions = { version = "0.27.0", features = ["semconv_experimental"] }
opentelemetry_sdk = "0.27.0"
opentelemetry_sdk = { version = "0.27.0", features = ["rt-tokio", "trace"] }
# crypto
hkdf = "0.12.3"
@ -243,8 +243,8 @@ rsa = { version = "0.9.3", default-features = false, features = ["std", "pem", "
# webgpu
raw-window-handle = "0.6.0"
wgpu-core = "0.21.1"
wgpu-types = "0.20"
wgpu-core = "24.0.0"
wgpu-types = "24.0.0"
# macros
quote = "1"

View file

@ -6,6 +6,35 @@ https://github.com/denoland/deno/releases
We also have one-line install commands at:
https://github.com/denoland/deno_install
### 2.1.10 / 2025.02.13
- Revert "fix(lsp): silence debug error for 'move to a new file' action
(#27780)" (#27903)
- fix(cli): Fix panic in `load_native_certs` (#27863)
- fix(compile): never include the specified output executable in itself (#27877)
- fix(ext/napi): napi_is_buffer tests for ArrayBufferView (#27956)
- fix(ext/node): expose brotli stream APIs (#27943)
- fix(ext/node): fix missing privateKey.x in curve25519 JWK (#27990)
- fix(ext/node): fix twitter-api-v2 compatibility (#27971)
- fix(ext/node): handle non-ws upgrade headers (#27931)
- fix(ext/node): set process fields on own instance (#27927)
- fix(ext/node): set process.env as own property (#27891)
- fix(ext/node): support proxy http request (#27871)
- fix(lsp): ignore a few more diagnostics for ambient modules (#27949)
- fix(node): resolve module as maybe CJS when it's missing a file extension
(#27904)
- fix(node): show directory import and missing extension suggestions (#27905)
- fix(otel): custom span start + end times are fractional ms (#27995)
- fix(publish): correct coloring in --help (#27939)
- fix(streams): handle Resource stream error (#27975)
- fix: allow creating TSC host without a snapshot (#28058)
- fix: do special file permission check for `check_read_path` (#27989)
- fix: panic with js lint plugins and invalid js syntax (#28006)
- perf(compile): use bytes already in memory after downloading executable
(#28000)
- perf(lsp): cancellation checks in blocking code (#27997)
- perf: node resolution cache (#27838)
### 2.1.9 / 2025.01.30
- fix(ext/node): add http information support (#27381)

View file

@ -2,7 +2,7 @@
[package]
name = "deno_bench_util"
version = "0.183.0"
version = "0.184.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -2,7 +2,7 @@
[package]
name = "deno"
version = "2.1.9"
version = "2.1.10"
authors.workspace = true
default-run = "deno"
edition.workspace = true
@ -72,7 +72,7 @@ deno_config = { workspace = true, features = ["sync", "workspace"] }
deno_core = { workspace = true, features = ["include_js_files_for_snapshotting"] }
deno_doc = { version = "=0.164.0", features = ["rust", "comrak"] }
deno_error.workspace = true
deno_graph = { version = "=0.87.2" }
deno_graph = { version = "=0.87.3" }
deno_lib.workspace = true
deno_lint = { version = "0.72.0" }
deno_lockfile.workspace = true
@ -141,6 +141,10 @@ monch.workspace = true
notify.workspace = true
once_cell.workspace = true
open = "5.0.1"
opentelemetry.workspace = true
opentelemetry-otlp.workspace = true
opentelemetry-semantic-conventions.workspace = true
opentelemetry_sdk.workspace = true
p256.workspace = true
pathdiff = "0.2.1"
percent-encoding.workspace = true
@ -166,8 +170,11 @@ text_lines = "=0.6.0"
thiserror.workspace = true
tokio.workspace = true
tokio-util.workspace = true
tower.workspace = true
tower-lsp.workspace = true
tracing = { version = "0.1", features = ["log", "default"] }
tracing = { version = "0.1", features = ["log"] }
tracing-opentelemetry = "0.28.0"
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
twox-hash.workspace = true
typed-arena = "=2.0.2"
unicode-width = "0.1.3"

View file

@ -1772,7 +1772,7 @@ If you specify a directory instead of a file, the path is expanded to all contai
}
fn bundle_subcommand() -> Command {
command("bundle", "⚠️ `deno bundle` was removed in Deno 2.
command("bundle", "`deno bundle` was removed in Deno 2.
See the Deno 1.x to 2.x Migration Guide for migration instructions: https://docs.deno.com/runtime/manual/advanced/migrate_deprecations", UnstableArgsConfig::ResolutionOnly)
.hide(true)
@ -1828,6 +1828,7 @@ Unless --reload is specified, this command will not re-download already cached d
)
.defer(|cmd| {
compile_args_without_check_args(cmd)
.arg(no_code_cache_arg())
.arg(
Arg::new("all")
.long("all")
@ -3025,6 +3026,7 @@ Evaluate a task from string
.allow_external_subcommands(true)
.subcommand_value_name("TASK")
.arg(config_arg())
.arg(frozen_lockfile_arg())
.arg(
Arg::new("cwd")
.long("cwd")
@ -3320,7 +3322,7 @@ different location, use the <c>--output</> flag:
fn vendor_subcommand() -> Command {
command("vendor",
"⚠️ `deno vendor` was removed in Deno 2.
"`deno vendor` was removed in Deno 2.
See the Deno 1.x to 2.x Migration Guide for migration instructions: https://docs.deno.com/runtime/manual/advanced/migrate_deprecations",
UnstableArgsConfig::ResolutionOnly
@ -4572,6 +4574,7 @@ fn check_parse(
doc: matches.get_flag("doc"),
doc_only: matches.get_flag("doc-only"),
});
flags.code_cache_enabled = !matches.get_flag("no-code-cache");
allow_import_parse(flags, matches);
Ok(())
}
@ -5256,6 +5259,7 @@ fn task_parse(
unstable_args_parse(flags, matches, UnstableArgsConfig::ResolutionAndRuntime);
node_modules_arg_parse(flags, matches);
frozen_lockfile_arg_parse(flags, matches);
let mut recursive = matches.get_flag("recursive");
let filter = if let Some(filter) = matches.remove_one::<String>("filter") {
@ -7412,6 +7416,7 @@ mod tests {
doc_only: false,
}),
type_check_mode: TypeCheckMode::Local,
code_cache_enabled: true,
..Flags::default()
}
);
@ -7426,6 +7431,7 @@ mod tests {
doc_only: false,
}),
type_check_mode: TypeCheckMode::Local,
code_cache_enabled: true,
..Flags::default()
}
);
@ -7440,6 +7446,7 @@ mod tests {
doc_only: true,
}),
type_check_mode: TypeCheckMode::Local,
code_cache_enabled: true,
..Flags::default()
}
);
@ -7468,6 +7475,7 @@ mod tests {
doc_only: false,
}),
type_check_mode: TypeCheckMode::All,
code_cache_enabled: true,
..Flags::default()
}
);

View file

@ -71,7 +71,7 @@ fn patch_uris<'a>(
};
if let Some(new_req) = new_req {
*req = new_req;
*req = new_req.request;
}
}
}

View file

@ -202,7 +202,7 @@ fn main() {
// To debug snapshot issues uncomment:
// op_fetch_asset::trace_serializer();
if !cfg!(debug_assertions) {
if !cfg!(debug_assertions) && std::env::var("CARGO_FEATURE_HMR").is_err() {
let out_dir =
std::path::PathBuf::from(std::env::var_os("OUT_DIR").unwrap());
compress_sources(&out_dir);

View file

@ -873,6 +873,11 @@ impl CliFactory {
self.npm_resolver().await?.clone(),
self.sys(),
self.tsconfig_resolver()?.clone(),
if cli_options.code_cache_enabled() {
Some(self.code_cache()?.clone())
} else {
None
},
)))
})
.await
@ -1194,7 +1199,7 @@ impl CliFactory {
serve_port: cli_options.serve_port(),
serve_host: cli_options.serve_host(),
otel_config: self.cli_options()?.otel_config(),
startup_snapshot: crate::js::deno_isolate_init(),
startup_snapshot: deno_snapshots::CLI_SNAPSHOT,
})
}

View file

@ -1,8 +0,0 @@
// Copyright 2018-2025 the Deno authors. MIT license.
use log::debug;
pub fn deno_isolate_init() -> Option<&'static [u8]> {
debug!("Deno isolate init with snapshots.");
deno_snapshots::CLI_SNAPSHOT
}

View file

@ -192,6 +192,84 @@ class Fixer {
}
}
/**
* @implements {Deno.lint.SourceCode}
*/
export class SourceCode {
/** @type {string | null} */
#source = null;
/** @type {AstContext} */
#ctx;
/**
* @param {AstContext} ctx
*/
constructor(ctx) {
this.#ctx = ctx;
}
get text() {
return this.#getSource();
}
get ast() {
const program = /** @type {*} */ (getNode(
this.#ctx,
this.#ctx.rootOffset,
));
return program;
}
/**
* @param {Deno.lint.Node} [node]
* @returns {string}
*/
getText(node) {
const source = this.#getSource();
if (node === undefined) {
return source;
}
return source.slice(node.range[0], node.range[1]);
}
/**
* @param {Deno.lint.Node} node
*/
getAncestors(node) {
const { buf } = this.#ctx;
/** @type {Deno.lint.Node[]} */
const ancestors = [];
let parent = /** @type {*} */ (node)[INTERNAL_IDX];
while ((parent = readParent(buf, parent)) > AST_IDX_INVALID) {
if (readType(buf, parent) === AST_GROUP_TYPE) continue;
const parentNode = /** @type {*} */ (getNode(this.#ctx, parent));
if (parentNode !== null) {
ancestors.push(parentNode);
}
}
ancestors.reverse();
return ancestors;
}
/**
* @returns {string}
*/
#getSource() {
if (this.#source === null) {
this.#source = op_lint_get_source();
}
return /** @type {string} */ (this.#source);
}
}
/**
* Every rule gets their own instance of this class. This is the main
* API lint rules interact with.
@ -199,25 +277,27 @@ class Fixer {
*/
export class Context {
id;
fileName;
#source = null;
// ESLint uses lowercase
filename;
sourceCode;
/**
* @param {AstContext} ctx
* @param {string} id
* @param {string} fileName
*/
constructor(id, fileName) {
constructor(ctx, id, fileName) {
this.id = id;
this.fileName = fileName;
this.filename = fileName;
this.sourceCode = new SourceCode(ctx);
}
source() {
if (this.#source === null) {
this.#source = op_lint_get_source();
}
return /** @type {*} */ (this.#source);
getFilename() {
return this.filename;
}
getSourceCode() {
return this.sourceCode;
}
/**
@ -961,8 +1041,8 @@ export function runPluginsForFile(fileName, serializedAst) {
continue;
}
const ctx = new Context(id, fileName);
const visitor = rule.create(ctx);
const ruleCtx = new Context(ctx, id, fileName);
const visitor = rule.create(ruleCtx);
// deno-lint-ignore guard-for-in
for (let key in visitor) {
@ -1016,7 +1096,7 @@ export function runPluginsForFile(fileName, serializedAst) {
const destroyFn = rule.destroy.bind(rule);
destroyFns.push(() => {
try {
destroyFn(ctx);
destroyFn(ruleCtx);
} catch (err) {
throw new Error(`Destroy hook of "${id}" errored`, { cause: err });
}

View file

@ -66,13 +66,11 @@ impl JsrFetchResolver {
}
let fetch_package_info = || async {
let meta_url = jsr_url().join(&format!("{}/meta.json", name)).ok()?;
let file_fetcher = self.file_fetcher.clone();
// spawn due to the lsp's `Send` requirement
let file = deno_core::unsync::spawn(async move {
file_fetcher.fetch_bypass_permissions(&meta_url).await.ok()
})
.await
.ok()??;
let file = self
.file_fetcher
.fetch_bypass_permissions(&meta_url)
.await
.ok()?;
serde_json::from_slice::<JsrPackageInfo>(&file.source).ok()
};
let info = fetch_package_info().await.map(Arc::new);
@ -92,12 +90,10 @@ impl JsrFetchResolver {
.join(&format!("{}/{}_meta.json", &nv.name, &nv.version))
.ok()?;
let file_fetcher = self.file_fetcher.clone();
// spawn due to the lsp's `Send` requirement
let file = deno_core::unsync::spawn(async move {
file_fetcher.fetch_bypass_permissions(&meta_url).await.ok()
})
.await
.ok()??;
let file = file_fetcher
.fetch_bypass_permissions(&meta_url)
.await
.ok()?;
partial_jsr_package_version_info_from_slice(&file.source).ok()
};
let info = fetch_package_version_info().await.map(Arc::new);

View file

@ -2,7 +2,7 @@
[package]
name = "deno_lib"
version = "0.5.0"
version = "0.6.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -91,6 +91,8 @@ pub fn init<
.filter_module("swc_ecma_parser", log::LevelFilter::Error)
// Suppress span lifecycle logs since they are too verbose
.filter_module("tracing::span", log::LevelFilter::Off)
.filter_module("tower_lsp", log::LevelFilter::Trace)
.filter_module("opentelemetry_sdk", log::LevelFilter::Off)
// for deno_compile, this is too verbose
.filter_module("editpe", log::LevelFilter::Error)
.format(|buf, record| {

View file

@ -1 +1 @@
2.1.9
2.1.10

View file

@ -150,6 +150,7 @@ fn to_narrow_lsp_range(
/// completion response, which will be valid import completions for the specific
/// context.
#[allow(clippy::too_many_arguments)]
#[tracing::instrument(skip_all)]
pub async fn get_import_completions(
specifier: &ModuleSpecifier,
position: &lsp::Position,

View file

@ -619,6 +619,9 @@ pub struct WorkspaceSettings {
#[serde(default)]
pub typescript: LanguageWorkspaceSettings,
#[serde(default)]
pub tracing: Option<super::trace::TracingConfigOrEnabled>,
}
impl Default for WorkspaceSettings {
@ -645,6 +648,7 @@ impl Default for WorkspaceSettings {
unstable: Default::default(),
javascript: Default::default(),
typescript: Default::default(),
tracing: Default::default(),
}
}
}
@ -1534,17 +1538,11 @@ impl ConfigData {
import_map_url.clone(),
ConfigWatchedFileType::ImportMap,
);
// spawn due to the lsp's `Send` requirement
let fetch_result =
deno_core::unsync::spawn({
let file_fetcher = file_fetcher.cloned().unwrap();
let import_map_url = import_map_url.clone();
async move {
file_fetcher.fetch_bypass_permissions(&import_map_url).await
}
})
.await
.unwrap();
let fetch_result = file_fetcher
.as_ref()
.unwrap()
.fetch_bypass_permissions(import_map_url)
.await;
let value_result = fetch_result.and_then(|f| {
serde_json::from_slice::<Value>(&f.source).map_err(|e| e.into())
@ -2315,8 +2313,9 @@ mod tests {
suggestion_actions: SuggestionActionsSettings { enabled: true },
update_imports_on_file_move: UpdateImportsOnFileMoveOptions {
enabled: UpdateImportsOnFileMoveEnabled::Prompt
}
},
},
tracing: Default::default()
}
);
}

View file

@ -1297,6 +1297,7 @@ impl Documents {
/// For a given set of string specifiers, resolve each one from the graph,
/// for a given referrer. This is used to provide resolution information to
/// tsc when type checking.
#[tracing::instrument(skip_all)]
pub fn resolve(
&self,
// (is_cjs: bool, raw_specifier: String)
@ -1555,6 +1556,7 @@ impl Documents {
self.dirty = false;
}
#[tracing::instrument(skip_all)]
pub fn resolve_dependency(
&self,
specifier: &ModuleSpecifier,

View file

@ -307,7 +307,7 @@ impl CliJsrSearchApi {
}
}
#[async_trait::async_trait]
#[async_trait::async_trait(?Send)]
impl PackageSearchApi for CliJsrSearchApi {
async fn search(&self, query: &str) -> Result<Arc<Vec<String>>, AnyError> {
if let Some(names) = self.search_cache.get(query) {
@ -316,12 +316,10 @@ impl PackageSearchApi for CliJsrSearchApi {
let mut search_url = jsr_api_url().join("packages")?;
search_url.query_pairs_mut().append_pair("query", query);
let file_fetcher = self.file_fetcher.clone();
// spawn due to the lsp's `Send` requirement
let file = deno_core::unsync::spawn(async move {
let file = {
let file = file_fetcher.fetch_bypass_permissions(&search_url).await?;
TextDecodedFile::decode(file)
})
.await??;
TextDecodedFile::decode(file)?
};
let names = Arc::new(parse_jsr_search_response(&file.source)?);
self.search_cache.insert(query.to_string(), names.clone());
Ok(names)

File diff suppressed because it is too large Load diff

View file

@ -36,6 +36,7 @@ mod search;
mod semantic_tokens;
mod testing;
mod text;
mod trace;
mod tsc;
mod urls;
@ -74,7 +75,7 @@ pub async fn start() -> Result<(), AnyError> {
builder
};
let (service, socket) = builder.finish();
let (service, socket, pending) = builder.finish();
// TODO(nayeemrmn): This shutdown flag is a workaround for
// https://github.com/denoland/deno/issues/20700. Remove when
@ -82,7 +83,7 @@ pub async fn start() -> Result<(), AnyError> {
// Force end the server 8 seconds after receiving a shutdown request.
tokio::select! {
biased;
_ = Server::new(stdin, stdout, socket).concurrency_level(32).serve(service) => {}
_ = Server::new(stdin, stdout, socket, pending).concurrency_level(32).serve(service) => {}
_ = spawn(async move {
shutdown_flag.wait_raised().await;
tokio::time::sleep(std::time::Duration::from_secs(8)).await;

View file

@ -48,7 +48,7 @@ impl CliNpmSearchApi {
}
}
#[async_trait::async_trait]
#[async_trait::async_trait(?Send)]
impl PackageSearchApi for CliNpmSearchApi {
async fn search(&self, query: &str) -> Result<Arc<Vec<String>>, AnyError> {
if let Some(names) = self.search_cache.get(query) {

View file

@ -74,14 +74,15 @@ impl From<PerformanceMark> for PerformanceMeasure {
#[derive(Debug)]
pub struct PerformanceScopeMark {
performance_inner: Arc<Mutex<PerformanceInner>>,
performance: Arc<Performance>,
inner: Option<PerformanceMark>,
}
impl Drop for PerformanceScopeMark {
fn drop(&mut self) {
self
.performance_inner
.performance
.0
.lock()
.measure(self.inner.take().unwrap());
}
@ -139,7 +140,7 @@ impl Default for PerformanceInner {
/// The structure will limit the size of measurements to the most recent 1000,
/// and will roll off when that limit is reached.
#[derive(Debug, Default)]
pub struct Performance(Arc<Mutex<PerformanceInner>>);
pub struct Performance(Mutex<PerformanceInner>);
impl Performance {
/// Return the count and average duration of a measurement identified by name.
@ -279,9 +280,12 @@ impl Performance {
/// // ❌
/// let _ = self.performance.measure_scope("foo");
/// ```
pub fn measure_scope<S: AsRef<str>>(&self, name: S) -> PerformanceScopeMark {
pub fn measure_scope<S: AsRef<str>>(
self: &Arc<Self>,
name: S,
) -> PerformanceScopeMark {
PerformanceScopeMark {
performance_inner: self.0.clone(),
performance: self.clone(),
inner: Some(self.mark(name)),
}
}

View file

@ -474,24 +474,17 @@ impl ModuleRegistry {
&self,
specifier: &ModuleSpecifier,
) -> Result<Vec<RegistryConfiguration>, AnyError> {
// spawn due to the lsp's `Send` requirement
let fetch_result = deno_core::unsync::spawn({
let file_fetcher = self.file_fetcher.clone();
let specifier = specifier.clone();
async move {
file_fetcher
.fetch_with_options(
&specifier,
FetchPermissionsOptionRef::AllowAll,
FetchOptions {
maybe_auth: None,
maybe_accept: Some("application/vnd.deno.reg.v2+json, application/vnd.deno.reg.v1+json;q=0.9, application/json;q=0.8"),
maybe_cache_setting: None,
}
)
.await
}
}).await?;
let fetch_result = self.file_fetcher
.fetch_with_options(
specifier,
FetchPermissionsOptionRef::AllowAll,
FetchOptions {
maybe_auth: None,
maybe_accept: Some("application/vnd.deno.reg.v2+json, application/vnd.deno.reg.v1+json;q=0.9, application/json;q=0.8"),
maybe_cache_setting: None,
}
)
.await;
// if there is an error fetching, we will cache an empty file, so that
// subsequent requests they are just an empty doc which will error without
// needing to connect to the remote URL. We will cache it for 1 week.
@ -584,18 +577,13 @@ FetchPermissionsOptionRef::AllowAll,
)
.ok()?;
let file_fetcher = self.file_fetcher.clone();
// spawn due to the lsp's `Send` requirement
let file = deno_core::unsync::spawn({
async move {
let file = file_fetcher
.fetch_bypass_permissions(&endpoint)
.await
.ok()?;
TextDecodedFile::decode(file).ok()
}
})
.await
.ok()??;
let file = {
let file = file_fetcher
.fetch_bypass_permissions(&endpoint)
.await
.ok()?;
TextDecodedFile::decode(file).ok()?
};
let documentation: lsp::Documentation =
serde_json::from_str(&file.source).ok()?;
return match documentation {
@ -613,6 +601,7 @@ FetchPermissionsOptionRef::AllowAll,
/// For a string specifier from the client, provide a set of completions, if
/// any, for the specifier.
#[tracing::instrument(skip_all)]
pub async fn get_completions(
&self,
text: &str,
@ -983,16 +972,13 @@ FetchPermissionsOptionRef::AllowAll,
) -> Option<lsp::Documentation> {
let specifier = Url::parse(url).ok()?;
let file_fetcher = self.file_fetcher.clone();
// spawn due to the lsp's `Send` requirement
let file = deno_core::unsync::spawn(async move {
let file = {
let file = file_fetcher
.fetch_bypass_permissions(&specifier)
.await
.ok()?;
TextDecodedFile::decode(file).ok()
})
.await
.ok()??;
TextDecodedFile::decode(file).ok()?
};
serde_json::from_str(&file.source).ok()
}
@ -1045,26 +1031,20 @@ FetchPermissionsOptionRef::AllowAll,
async fn get_items(&self, url: &str) -> Option<VariableItems> {
let specifier = ModuleSpecifier::parse(url).ok()?;
// spawn due to the lsp's `Send` requirement
let file = deno_core::unsync::spawn({
let file_fetcher = self.file_fetcher.clone();
let specifier = specifier.clone();
async move {
let file = file_fetcher
.fetch_bypass_permissions(&specifier)
.await
.map_err(|err| {
error!(
"Internal error fetching endpoint \"{}\". {}",
specifier, err
);
})
.ok()?;
TextDecodedFile::decode(file).ok()
}
})
.await
.ok()??;
let file = {
let file = self
.file_fetcher
.fetch_bypass_permissions(&specifier)
.await
.map_err(|err| {
error!(
"Internal error fetching endpoint \"{}\". {}",
specifier, err
);
})
.ok()?;
TextDecodedFile::decode(file).ok()?
};
let items: VariableItems = serde_json::from_str(&file.source)
.map_err(|err| {
error!(
@ -1090,26 +1070,20 @@ FetchPermissionsOptionRef::AllowAll,
error!("Internal error mapping endpoint \"{}\". {}", url, err);
})
.ok()?;
// spawn due to the lsp's `Send` requirement
let file = deno_core::unsync::spawn({
let file_fetcher = self.file_fetcher.clone();
let specifier = specifier.clone();
async move {
let file = file_fetcher
.fetch_bypass_permissions(&specifier)
.await
.map_err(|err| {
error!(
"Internal error fetching endpoint \"{}\". {}",
specifier, err
);
})
.ok()?;
TextDecodedFile::decode(file).ok()
}
})
.await
.ok()??;
let file = {
let file = self
.file_fetcher
.fetch_bypass_permissions(&specifier)
.await
.map_err(|err| {
error!(
"Internal error fetching endpoint \"{}\". {}",
specifier, err
);
})
.ok()?;
TextDecodedFile::decode(file).ok()?
};
let items: VariableItems = serde_json::from_str(&file.source)
.map_err(|err| {
error!(

View file

@ -9,6 +9,7 @@ use deno_core::anyhow::anyhow;
use deno_core::error::AnyError;
use deno_core::serde_json;
use lsp_types::Uri;
use tokio_util::sync::CancellationToken;
use tower_lsp::lsp_types::ClientCapabilities;
use tower_lsp::lsp_types::ClientInfo;
use tower_lsp::lsp_types::CompletionContext;
@ -124,6 +125,7 @@ impl ReplLanguageServer {
&mut self,
line_text: &str,
position: usize,
token: CancellationToken,
) -> Vec<ReplCompletionItem> {
self.did_change(line_text).await;
let text_info = deno_ast::SourceTextInfo::from_string(format!(
@ -135,27 +137,30 @@ impl ReplLanguageServer {
let line_and_column = text_info.line_and_column_index(position);
let response = self
.language_server
.completion(CompletionParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: self.get_document_uri(),
.completion(
CompletionParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: self.get_document_uri(),
},
position: Position {
line: line_and_column.line_index as u32,
character: line_and_column.column_index as u32,
},
},
position: Position {
line: line_and_column.line_index as u32,
character: line_and_column.column_index as u32,
work_done_progress_params: WorkDoneProgressParams {
work_done_token: None,
},
partial_result_params: PartialResultParams {
partial_result_token: None,
},
context: Some(CompletionContext {
trigger_kind: CompletionTriggerKind::INVOKED,
trigger_character: None,
}),
},
work_done_progress_params: WorkDoneProgressParams {
work_done_token: None,
},
partial_result_params: PartialResultParams {
partial_result_token: None,
},
context: Some(CompletionContext {
trigger_kind: CompletionTriggerKind::INVOKED,
trigger_character: None,
}),
})
token,
)
.await
.ok()
.unwrap_or_default();
@ -352,5 +357,6 @@ pub fn get_repl_workspace_settings() -> WorkspaceSettings {
},
..Default::default()
},
tracing: Default::default(),
}
}

View file

@ -796,15 +796,9 @@ impl<'a> ResolverFactory<'a> {
NpmSystemInfo::default(),
));
self.set_npm_installer(npm_installer);
// spawn due to the lsp's `Send` requirement
deno_core::unsync::spawn(async move {
if let Err(err) = npm_resolution_initializer.ensure_initialized().await
{
log::warn!("failed to initialize npm resolution: {}", err);
}
})
.await
.unwrap();
if let Err(err) = npm_resolution_initializer.ensure_initialized().await {
log::warn!("failed to initialize npm resolution: {}", err);
}
CliNpmResolverCreateOptions::Managed(CliManagedNpmResolverCreateOptions {
sys: CliSys::default(),

View file

@ -6,7 +6,7 @@ use deno_core::error::AnyError;
use deno_semver::package::PackageNv;
use deno_semver::Version;
#[async_trait::async_trait]
#[async_trait::async_trait(?Send)]
pub trait PackageSearchApi {
async fn search(&self, query: &str) -> Result<Arc<Vec<String>>, AnyError>;
async fn versions(&self, name: &str) -> Result<Arc<Vec<Version>>, AnyError>;
@ -45,7 +45,7 @@ pub mod tests {
}
}
#[async_trait::async_trait]
#[async_trait::async_trait(?Send)]
impl PackageSearchApi for TestPackageSearchApi {
async fn search(&self, query: &str) -> Result<Arc<Vec<String>>, AnyError> {
let names = self

159
cli/lsp/trace.rs Normal file
View file

@ -0,0 +1,159 @@
// Copyright 2018-2025 the Deno authors. MIT license.
use std::fmt;
use deno_core::anyhow;
use opentelemetry::trace::TracerProvider;
use opentelemetry::KeyValue;
use opentelemetry_otlp::WithExportConfig;
use opentelemetry_sdk::Resource;
use opentelemetry_semantic_conventions::resource::SERVICE_NAME;
use serde::Deserialize;
use serde::Serialize;
use tracing::level_filters::LevelFilter;
use tracing_opentelemetry::OpenTelemetryLayer;
use tracing_subscriber::fmt::format::FmtSpan;
use tracing_subscriber::layer::SubscriberExt;
use crate::lsp::logging::lsp_debug;
pub(crate) fn make_tracer(
endpoint: Option<&str>,
) -> Result<opentelemetry_sdk::trace::Tracer, anyhow::Error> {
let endpoint = endpoint.unwrap_or("http://localhost:4317");
let exporter = opentelemetry_otlp::SpanExporter::builder()
.with_tonic()
.with_endpoint(endpoint)
.build()?;
let provider = opentelemetry_sdk::trace::Builder::default()
.with_batch_exporter(exporter, opentelemetry_sdk::runtime::Tokio)
.with_resource(Resource::new(vec![KeyValue::new(SERVICE_NAME, "deno-lsp")]))
.build();
opentelemetry::global::set_tracer_provider(provider.clone());
Ok(provider.tracer("deno-lsp-tracer"))
}
pub(crate) struct TracingGuard(
// TODO(nathanwhit): use default guard here so we can change tracing after init
// but needs wiring through the subscriber to the TSC thread, as it can't be a global default
// #[allow(dead_code)] tracing::dispatcher::DefaultGuard,
#[allow(dead_code)] (),
);
impl fmt::Debug for TracingGuard {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("TracingGuard").finish()
}
}
impl Drop for TracingGuard {
fn drop(&mut self) {
lsp_debug!("Shutting down tracing");
tokio::task::spawn_blocking(|| {
opentelemetry::global::shutdown_tracer_provider()
});
}
}
#[derive(
Debug, Clone, Deserialize, Serialize, PartialEq, Eq, Copy, Default,
)]
#[serde(rename_all = "camelCase")]
pub(crate) enum TracingCollector {
#[default]
OpenTelemetry,
Logging,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, Default)]
#[serde(default, rename_all = "camelCase")]
pub(crate) struct TracingConfig {
/// Enable tracing.
pub(crate) enable: bool,
/// The collector to use. Defaults to `OpenTelemetry`.
/// If `Logging` is used, the collected traces will be written to stderr.
pub(crate) collector: TracingCollector,
/// The filter to use. Defaults to `INFO`.
pub(crate) filter: Option<String>,
/// The endpoint to use for the OpenTelemetry collector.
pub(crate) collector_endpoint: Option<String>,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
#[serde(untagged)]
pub(crate) enum TracingConfigOrEnabled {
Config(TracingConfig),
Enabled(bool),
}
impl From<TracingConfig> for TracingConfigOrEnabled {
fn from(value: TracingConfig) -> Self {
TracingConfigOrEnabled::Config(value)
}
}
impl From<TracingConfigOrEnabled> for TracingConfig {
fn from(value: TracingConfigOrEnabled) -> Self {
match value {
TracingConfigOrEnabled::Config(config) => config,
TracingConfigOrEnabled::Enabled(enabled) => TracingConfig {
enable: enabled,
..Default::default()
},
}
}
}
impl TracingConfigOrEnabled {
pub(crate) fn enabled(&self) -> bool {
match self {
TracingConfigOrEnabled::Config(config) => config.enable,
TracingConfigOrEnabled::Enabled(enabled) => *enabled,
}
}
}
pub(crate) fn init_tracing_subscriber(
config: &TracingConfig,
) -> Result<TracingGuard, anyhow::Error> {
if !config.enable {
return Err(anyhow::anyhow!("Tracing is not enabled"));
}
let filter = tracing_subscriber::EnvFilter::builder()
.with_default_directive(LevelFilter::INFO.into());
let filter = if let Some(directive) = config.filter.as_ref() {
filter.parse(directive)?
} else {
filter.with_env_var("DENO_LSP_TRACE").from_env()?
};
let open_telemetry_layer = match config.collector {
TracingCollector::OpenTelemetry => Some(OpenTelemetryLayer::new(
make_tracer(config.collector_endpoint.as_deref())?,
)),
_ => None,
};
let logging_layer = match config.collector {
TracingCollector::Logging => Some(
tracing_subscriber::fmt::layer()
.with_writer(std::io::stderr)
// Include span events in the log output.
// Without this, only events get logged (and at the moment we have none).
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE),
),
_ => None,
};
tracing::subscriber::set_global_default(
tracing_subscriber::registry()
.with(filter)
.with(logging_layer)
.with(open_telemetry_layer),
)
.unwrap();
let guard = ();
Ok(TracingGuard(guard))
}

View file

@ -1,15 +1,18 @@
// Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::RefCell;
use std::cmp;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::collections::HashSet;
use std::convert::Infallible;
use std::ffi::c_void;
use std::net::SocketAddr;
use std::ops::Range;
use std::path::Path;
use std::rc::Rc;
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
use std::thread;
@ -65,6 +68,7 @@ use tokio_util::sync::CancellationToken;
use tower_lsp::jsonrpc::Error as LspError;
use tower_lsp::jsonrpc::Result as LspResult;
use tower_lsp::lsp_types as lsp;
use tracing_opentelemetry::OpenTelemetrySpanExt;
use super::analysis::CodeActionData;
use super::code_lens;
@ -124,6 +128,7 @@ type Request = (
oneshot::Sender<Result<String, AnyError>>,
CancellationToken,
Option<PendingChange>,
Option<opentelemetry::Context>,
);
#[derive(Debug, Clone, Copy, Serialize_repr)]
@ -243,6 +248,7 @@ pub struct TsServer {
pub specifier_map: Arc<TscSpecifierMap>,
inspector_server: Mutex<Option<Arc<InspectorServer>>>,
pending_change: Mutex<Option<PendingChange>>,
enable_tracing: Arc<AtomicBool>,
}
impl std::fmt::Debug for TsServer {
@ -401,9 +407,16 @@ impl TsServer {
specifier_map: Arc::new(TscSpecifierMap::new()),
inspector_server: Mutex::new(None),
pending_change: Mutex::new(None),
enable_tracing: Default::default(),
}
}
pub fn set_tracing_enabled(&self, enabled: bool) {
self
.enable_tracing
.store(enabled, std::sync::atomic::Ordering::Relaxed);
}
pub fn start(
&self,
inspector_server_addr: Option<String>,
@ -427,12 +440,14 @@ impl TsServer {
let receiver = self.receiver.lock().take().unwrap();
let performance = self.performance.clone();
let specifier_map = self.specifier_map.clone();
let enable_tracing = self.enable_tracing.clone();
let _join_handle = thread::spawn(move || {
run_tsc_thread(
receiver,
performance,
specifier_map,
maybe_inspector_server,
enable_tracing,
)
});
Ok(())
@ -467,6 +482,7 @@ impl TsServer {
}
}
#[tracing::instrument(skip_all)]
pub async fn get_diagnostics(
&self,
snapshot: Arc<StateSnapshot>,
@ -532,6 +548,7 @@ impl TsServer {
Ok((diagnostics_map, ambient_modules_by_scope))
}
#[tracing::instrument(skip_all)]
pub async fn cleanup_semantic_cache(&self, snapshot: Arc<StateSnapshot>) {
for scope in snapshot
.config
@ -558,6 +575,7 @@ impl TsServer {
}
}
#[tracing::instrument(skip_all)]
pub async fn find_references(
&self,
snapshot: Arc<StateSnapshot>,
@ -612,6 +630,7 @@ impl TsServer {
Ok(Some(all_symbols.into_iter().collect()))
}
#[tracing::instrument(skip_all)]
pub async fn get_navigation_tree(
&self,
snapshot: Arc<StateSnapshot>,
@ -625,6 +644,7 @@ impl TsServer {
self.request(snapshot, req, scope, token).await
}
#[tracing::instrument(skip_all)]
pub async fn get_supported_code_fixes(
&self,
snapshot: Arc<StateSnapshot>,
@ -639,6 +659,7 @@ impl TsServer {
})
}
#[tracing::instrument(skip_all)]
pub async fn get_quick_info(
&self,
snapshot: Arc<StateSnapshot>,
@ -655,6 +676,7 @@ impl TsServer {
}
#[allow(clippy::too_many_arguments)]
#[tracing::instrument(skip_all)]
pub async fn get_code_fixes(
&self,
snapshot: Arc<StateSnapshot>,
@ -686,6 +708,7 @@ impl TsServer {
}
#[allow(clippy::too_many_arguments)]
#[tracing::instrument(skip_all)]
pub async fn get_applicable_refactors(
&self,
snapshot: Arc<StateSnapshot>,
@ -718,6 +741,7 @@ impl TsServer {
})
}
#[tracing::instrument(skip_all)]
pub async fn get_combined_code_fix(
&self,
snapshot: Arc<StateSnapshot>,
@ -746,6 +770,7 @@ impl TsServer {
}
#[allow(clippy::too_many_arguments)]
#[tracing::instrument(skip_all)]
pub async fn get_edits_for_refactor(
&self,
snapshot: Arc<StateSnapshot>,
@ -775,6 +800,7 @@ impl TsServer {
})
}
#[tracing::instrument(skip_all)]
pub async fn get_edits_for_file_rename(
&self,
snapshot: Arc<StateSnapshot>,
@ -833,6 +859,7 @@ impl TsServer {
Ok(all_changes.into_iter().collect())
}
#[tracing::instrument(skip_all)]
pub async fn get_document_highlights(
&self,
snapshot: Arc<StateSnapshot>,
@ -853,6 +880,7 @@ impl TsServer {
self.request(snapshot, req, scope, token).await
}
#[tracing::instrument(skip_all)]
pub async fn get_definition(
&self,
snapshot: Arc<StateSnapshot>,
@ -878,6 +906,7 @@ impl TsServer {
})
}
#[tracing::instrument(skip_all)]
pub async fn get_type_definition(
&self,
snapshot: Arc<StateSnapshot>,
@ -905,6 +934,7 @@ impl TsServer {
}
#[allow(clippy::too_many_arguments)]
#[tracing::instrument(skip_all)]
pub async fn get_completions(
&self,
snapshot: Arc<StateSnapshot>,
@ -932,6 +962,7 @@ impl TsServer {
})
}
#[tracing::instrument(skip_all)]
pub async fn get_completion_details(
&self,
snapshot: Arc<StateSnapshot>,
@ -959,6 +990,7 @@ impl TsServer {
})
}
#[tracing::instrument(skip_all)]
pub async fn get_implementations(
&self,
snapshot: Arc<StateSnapshot>,
@ -1013,6 +1045,7 @@ impl TsServer {
Ok(Some(all_locations.into_iter().collect()))
}
#[tracing::instrument(skip_all)]
pub async fn get_outlining_spans(
&self,
snapshot: Arc<StateSnapshot>,
@ -1026,6 +1059,7 @@ impl TsServer {
self.request(snapshot, req, scope, token).await
}
#[tracing::instrument(skip_all)]
pub async fn provide_call_hierarchy_incoming_calls(
&self,
snapshot: Arc<StateSnapshot>,
@ -1074,6 +1108,7 @@ impl TsServer {
Ok(all_calls.into_iter().collect())
}
#[tracing::instrument(skip_all)]
pub async fn provide_call_hierarchy_outgoing_calls(
&self,
snapshot: Arc<StateSnapshot>,
@ -1100,6 +1135,7 @@ impl TsServer {
})
}
#[tracing::instrument(skip_all)]
pub async fn prepare_call_hierarchy(
&self,
snapshot: Arc<StateSnapshot>,
@ -1133,6 +1169,7 @@ impl TsServer {
})
}
#[tracing::instrument(skip_all)]
pub async fn find_rename_locations(
&self,
snapshot: Arc<StateSnapshot>,
@ -1190,6 +1227,7 @@ impl TsServer {
Ok(Some(all_locations.into_iter().collect()))
}
#[tracing::instrument(skip_all)]
pub async fn get_smart_selection_range(
&self,
snapshot: Arc<StateSnapshot>,
@ -1205,6 +1243,7 @@ impl TsServer {
self.request(snapshot, req, scope, token).await
}
#[tracing::instrument(skip_all)]
pub async fn get_encoded_semantic_classifications(
&self,
snapshot: Arc<StateSnapshot>,
@ -1224,6 +1263,7 @@ impl TsServer {
self.request(snapshot, req, scope, token).await
}
#[tracing::instrument(skip_all)]
pub async fn get_signature_help_items(
&self,
snapshot: Arc<StateSnapshot>,
@ -1241,6 +1281,7 @@ impl TsServer {
self.request(snapshot, req, scope, token).await
}
#[tracing::instrument(skip_all)]
pub async fn get_navigate_to_items(
&self,
snapshot: Arc<StateSnapshot>,
@ -1292,6 +1333,7 @@ impl TsServer {
Ok(all_items.into_iter().collect())
}
#[tracing::instrument(skip_all)]
pub async fn provide_inlay_hints(
&self,
snapshot: Arc<StateSnapshot>,
@ -1319,6 +1361,7 @@ impl TsServer {
where
R: de::DeserializeOwned,
{
let context = tracing::Span::current().context();
let mark = self
.performance
.mark(format!("tsc.request.{}", req.method()));
@ -1327,7 +1370,15 @@ impl TsServer {
if self
.sender
.send((req, scope, snapshot, tx, token.clone(), change))
.send((
req,
scope,
snapshot,
tx,
token.clone(),
change,
Some(context),
))
.is_err()
{
return Err(anyhow!("failed to send request to tsc thread"));
@ -1335,6 +1386,7 @@ impl TsServer {
tokio::select! {
value = &mut rx => {
let value = value??;
let _span = tracing::info_span!("Tsc response deserialization");
let r = Ok(serde_json::from_str(&value)?);
self.performance.measure(mark);
r
@ -3612,7 +3664,26 @@ impl CompletionEntryDetails {
None
};
let mut text_edit = original_item.text_edit.clone();
let mut code_action_descriptions = self
.code_actions
.iter()
.flatten()
.map(|a| Cow::Borrowed(a.description.as_str()))
.collect::<Vec<_>>();
if let Some(specifier_rewrite) = &data.specifier_rewrite {
for description in &mut code_action_descriptions {
let specifier_index = description
.char_indices()
.find_map(|(b, c)| (c == '\'' || c == '"').then_some(b));
if let Some(i) = specifier_index {
let mut specifier_part = description.to_mut().split_off(i);
specifier_part = specifier_part.replace(
&specifier_rewrite.old_specifier,
&specifier_rewrite.new_specifier,
);
description.to_mut().push_str(&specifier_part);
}
}
if let Some(text_edit) = &mut text_edit {
let new_text = match text_edit {
lsp::CompletionTextEdit::Edit(text_edit) => &mut text_edit.new_text,
@ -3639,6 +3710,16 @@ impl CompletionEntryDetails {
}
}
}
let code_action_description =
Some(code_action_descriptions.join("\n\n")).filter(|s| !s.is_empty());
let detail = Some(
[code_action_description, detail]
.into_iter()
.flatten()
.collect::<Vec<_>>()
.join("\n\n"),
)
.filter(|s| !s.is_empty());
let (command, additional_text_edits) = parse_code_actions(
self.code_actions.as_ref(),
data,
@ -3707,6 +3788,7 @@ impl CompletionInfo {
Ok(())
}
#[tracing::instrument(skip_all, fields(entries = %self.entries.len()))]
pub fn as_completion_response(
&self,
line_index: Arc<LineIndex>,
@ -4427,6 +4509,8 @@ struct State {
token: CancellationToken,
pending_requests: Option<UnboundedReceiver<Request>>,
mark: Option<PerformanceMark>,
context: Option<opentelemetry::Context>,
enable_tracing: Arc<AtomicBool>,
}
impl State {
@ -4435,6 +4519,7 @@ impl State {
specifier_map: Arc<TscSpecifierMap>,
performance: Arc<Performance>,
pending_requests: UnboundedReceiver<Request>,
enable_tracing: Arc<AtomicBool>,
) -> Self {
Self {
last_id: 1,
@ -4446,9 +4531,17 @@ impl State {
token: Default::default(),
mark: None,
pending_requests: Some(pending_requests),
context: None,
enable_tracing,
}
}
fn tracing_enabled(&self) -> bool {
self
.enable_tracing
.load(std::sync::atomic::Ordering::Relaxed)
}
fn get_document(&self, specifier: &ModuleSpecifier) -> Option<Arc<Document>> {
self
.state_snapshot
@ -4541,6 +4634,7 @@ fn op_load<'s>(
state: &mut OpState,
#[string] specifier: &str,
) -> Result<v8::Local<'s, v8::Value>, LoadError> {
let _span = tracing::info_span!("op_load").entered();
let state = state.borrow_mut::<State>();
let mark = state
.performance
@ -4572,6 +4666,7 @@ fn op_release(
state: &mut OpState,
#[string] specifier: &str,
) -> Result<(), deno_core::url::ParseError> {
let _span = tracing::info_span!("op_release").entered();
let state = state.borrow_mut::<State>();
let mark = state
.performance
@ -4590,6 +4685,7 @@ fn op_resolve(
#[string] base: String,
#[serde] specifiers: Vec<(bool, String)>,
) -> Result<Vec<Option<(String, Option<String>)>>, deno_core::url::ParseError> {
let _span = tracing::info_span!("op_resolve").entered();
op_resolve_inner(state, ResolveArgs { base, specifiers })
}
@ -4644,7 +4740,7 @@ async fn op_poll_requests(
// clear the resolution cache after each request
NodeResolutionThreadLocalCache::clear();
let Some((request, scope, snapshot, response_tx, token, change)) =
let Some((request, scope, snapshot, response_tx, token, change, context)) =
pending_requests.recv().await
else {
return None.into();
@ -4663,6 +4759,7 @@ async fn op_poll_requests(
.performance
.mark_with_args(format!("tsc.host.{}", request.method()), &request);
state.mark = Some(mark);
state.context = context;
Some(TscRequestArray {
request,
@ -4710,6 +4807,7 @@ fn op_respond(
#[string] response: String,
#[string] error: String,
) {
let _span = tracing::info_span!("op_respond").entered();
let state = state.borrow_mut::<State>();
state.performance.measure(state.mark.take().unwrap());
state.last_scope = None;
@ -4727,6 +4825,58 @@ fn op_respond(
}
}
struct TracingSpan(#[allow(dead_code)] Option<tracing::span::EnteredSpan>);
deno_core::external!(TracingSpan, "lsp::TracingSpan");
fn span_with_context(
state: &State,
span: tracing::Span,
) -> tracing::span::EnteredSpan {
if let Some(context) = &state.context {
span.set_parent(context.clone());
}
span.entered()
}
#[op2(fast)]
fn op_make_span(
op_state: &mut OpState,
#[string] s: &str,
needs_context: bool,
) -> *const c_void {
let state = op_state.borrow_mut::<State>();
if !state.tracing_enabled() {
return deno_core::ExternalPointer::new(TracingSpan(None)).into_raw();
}
let sp = tracing::info_span!("js", otel.name = format!("js::{s}").as_str());
let span = if needs_context {
span_with_context(state, sp)
} else {
sp.entered()
};
deno_core::ExternalPointer::new(TracingSpan(Some(span))).into_raw()
}
#[op2(fast)]
fn op_log_event(op_state: &OpState, #[string] msg: &str) {
let state = op_state.borrow::<State>();
if state.tracing_enabled() {
tracing::info!(msg = msg);
}
}
#[op2(fast)]
fn op_exit_span(op_state: &mut OpState, span: *const c_void, root: bool) {
let ptr = deno_core::ExternalPointer::<TracingSpan>::from_raw(span);
// SAFETY: trust me
let _span = unsafe { ptr.unsafely_take().0 };
let state = op_state.borrow_mut::<State>();
if root {
state.context = None;
}
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct ScriptNames {
@ -4737,6 +4887,7 @@ struct ScriptNames {
#[op2]
#[serde]
fn op_script_names(state: &mut OpState) -> ScriptNames {
let _span = tracing::info_span!("op_script_names").entered();
let state = state.borrow_mut::<State>();
let mark = state.performance.mark("tsc.op.op_script_names");
let mut result = ScriptNames {
@ -4910,19 +5061,21 @@ fn run_tsc_thread(
performance: Arc<Performance>,
specifier_map: Arc<TscSpecifierMap>,
maybe_inspector_server: Option<Arc<InspectorServer>>,
enable_tracing: Arc<AtomicBool>,
) {
let has_inspector_server = maybe_inspector_server.is_some();
// Create and setup a JsRuntime based on a snapshot. It is expected that the
// supplied snapshot is an isolate that contains the TypeScript language
// server.
let mut extensions =
deno_runtime::snapshot_info::get_extensions_in_snapshot();
extensions.push(deno_tsc::init_ops_and_esm(
performance,
specifier_map,
request_rx,
enable_tracing,
));
let mut tsc_runtime = JsRuntime::new(RuntimeOptions {
extensions: vec![deno_tsc::init_ops_and_esm(
performance,
specifier_map,
request_rx,
)],
extensions,
create_params: create_isolate_create_params(),
startup_snapshot: None,
startup_snapshot: deno_snapshots::CLI_SNAPSHOT,
inspector: has_inspector_server,
..Default::default()
});
@ -5004,12 +5157,16 @@ deno_core::extension!(deno_tsc,
op_script_version,
op_project_version,
op_poll_requests,
op_make_span,
op_exit_span,
op_log_event,
op_libs,
],
options = {
performance: Arc<Performance>,
specifier_map: Arc<TscSpecifierMap>,
request_rx: UnboundedReceiver<Request>,
enable_tracing: Arc<AtomicBool>,
},
state = |state, options| {
state.put(State::new(
@ -5017,6 +5174,7 @@ deno_core::extension!(deno_tsc,
options.specifier_map,
options.performance,
options.request_rx,
options.enable_tracing,
));
},
customizer = |ext: &mut deno_core::Extension| {
@ -5730,8 +5888,13 @@ mod tests {
fn setup_op_state(state_snapshot: Arc<StateSnapshot>) -> OpState {
let (_tx, rx) = mpsc::unbounded_channel();
let state =
State::new(state_snapshot, Default::default(), Default::default(), rx);
let state = State::new(
state_snapshot,
Default::default(),
Default::default(),
rx,
Arc::new(AtomicBool::new(true)),
);
let mut op_state = OpState::new(None, None);
op_state.put(state);
op_state

View file

@ -9,7 +9,6 @@ mod file_fetcher;
mod graph_container;
mod graph_util;
mod http_util;
mod js;
mod jsr;
mod lsp;
mod module_loader;

View file

@ -152,24 +152,16 @@ impl NpmFetchResolver {
// todo(#27198): use RegistryInfoProvider instead
let fetch_package_info = || async {
let info_url = deno_npm_cache::get_package_url(&self.npmrc, name);
let file_fetcher = self.file_fetcher.clone();
let registry_config = self.npmrc.get_registry_config(name);
// TODO(bartlomieju): this should error out, not use `.ok()`.
let maybe_auth_header =
deno_npm_cache::maybe_auth_header_for_npm_registry(registry_config)
.ok()?;
// spawn due to the lsp's `Send` requirement
let file = deno_core::unsync::spawn(async move {
file_fetcher
.fetch_bypass_permissions_with_maybe_auth(
&info_url,
maybe_auth_header,
)
.await
.ok()
})
.await
.ok()??;
let file = self
.file_fetcher
.fetch_bypass_permissions_with_maybe_auth(&info_url, maybe_auth_header)
.await
.ok()?;
serde_json::from_slice::<NpmPackageInfo>(&file.source).ok()
};
let info = fetch_package_info().await.map(Arc::new);

View file

@ -2,7 +2,7 @@
[package]
name = "denort"
version = "2.1.9"
version = "2.1.10"
authors.workspace = true
default-run = "denort"
edition.workspace = true

View file

@ -2,7 +2,7 @@
[package]
name = "deno_snapshots"
version = "0.4.0"
version = "0.5.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -28,6 +28,7 @@ use regex::Regex;
use crate::args::deno_json::TsConfigResolver;
use crate::args::CheckFlags;
use crate::args::CliOptions;
use crate::args::DenoSubcommand;
use crate::args::Flags;
use crate::args::TsConfig;
use crate::args::TsTypeLib;
@ -94,8 +95,16 @@ pub async fn check(
#[derive(Debug, thiserror::Error, deno_error::JsError)]
#[class(type)]
#[error("Type checking failed.")]
pub struct FailedTypeCheckingError;
#[error("Type checking failed.{}", if self.can_skip {
color_print::cstr!(
"\n\n <y>info:</y> The program failed type-checking, but it still might work correctly.\n <c>hint:</c> Re-run with <u>--no-check</u> to skip type-checking.",
)
} else {
""
})]
pub struct FailedTypeCheckingError {
can_skip: bool,
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum CheckError {
@ -148,6 +157,7 @@ pub struct TypeChecker {
npm_resolver: CliNpmResolver,
sys: CliSys,
tsconfig_resolver: Arc<TsConfigResolver>,
code_cache: Option<Arc<crate::cache::CodeCache>>,
}
impl TypeChecker {
@ -162,6 +172,7 @@ impl TypeChecker {
npm_resolver: CliNpmResolver,
sys: CliSys,
tsconfig_resolver: Arc<TsConfigResolver>,
code_cache: Option<Arc<crate::cache::CodeCache>>,
) -> Self {
Self {
caches,
@ -173,6 +184,7 @@ impl TypeChecker {
npm_resolver,
sys,
tsconfig_resolver,
code_cache,
}
}
@ -196,7 +208,15 @@ impl TypeChecker {
}
}
if failed {
Err(FailedTypeCheckingError.into())
Err(
FailedTypeCheckingError {
can_skip: !matches!(
self.cli_options.sub_command(),
DenoSubcommand::Check(_)
),
}
.into(),
)
} else {
Ok(diagnostics.into_graph())
}
@ -283,6 +303,7 @@ impl TypeChecker {
grouped_roots,
options,
seen_diagnotics: Default::default(),
code_cache: self.code_cache.clone(),
}),
))
}
@ -433,6 +454,7 @@ struct DiagnosticsByFolderRealIterator<'a> {
npm_check_state_hash: Option<u64>,
seen_diagnotics: HashSet<String>,
options: CheckOptions,
code_cache: Option<Arc<crate::cache::CodeCache>>,
}
impl<'a> Iterator for DiagnosticsByFolderRealIterator<'a> {
@ -550,20 +572,27 @@ impl<'a> DiagnosticsByFolderRealIterator<'a> {
let tsconfig_hash_data = FastInsecureHasher::new_deno_versioned()
.write_hashable(ts_config)
.finish();
let response = tsc::exec(tsc::Request {
config: ts_config.clone(),
debug: self.log_level == Some(log::Level::Debug),
graph: self.graph.clone(),
hash_data: tsconfig_hash_data,
maybe_npm: Some(tsc::RequestNpmState {
cjs_tracker: self.cjs_tracker.clone(),
node_resolver: self.node_resolver.clone(),
npm_resolver: self.npm_resolver.clone(),
}),
maybe_tsbuildinfo,
root_names,
check_mode: self.options.type_check_mode,
})?;
let code_cache = self.code_cache.as_ref().map(|c| {
let c: Arc<dyn deno_runtime::code_cache::CodeCache> = c.clone();
c
});
let response = tsc::exec(
tsc::Request {
config: ts_config.clone(),
debug: self.log_level == Some(log::Level::Debug),
graph: self.graph.clone(),
hash_data: tsconfig_hash_data,
maybe_npm: Some(tsc::RequestNpmState {
cjs_tracker: self.cjs_tracker.clone(),
node_resolver: self.node_resolver.clone(),
npm_resolver: self.npm_resolver.clone(),
}),
maybe_tsbuildinfo,
root_names,
check_mode: self.options.type_check_mode,
},
code_cache,
)?;
let mut response_diagnostics = response.diagnostics.filter(|d| {
self.should_include_diagnostic(self.options.type_check_mode, d)

View file

@ -22,6 +22,7 @@ use jupyter_runtime::messaging::StreamContent;
use tokio::sync::mpsc;
use tokio::sync::mpsc::UnboundedSender;
use tokio::sync::oneshot;
use tokio_util::sync::CancellationToken;
use crate::args::Flags;
use crate::args::JupyterFlags;
@ -390,7 +391,9 @@ impl JupyterReplSession {
line_text,
position,
} => JupyterReplResponse::LspCompletions(
self.lsp_completions(&line_text, position).await,
self
.lsp_completions(&line_text, position, CancellationToken::new())
.await,
),
JupyterReplRequest::JsGetProperties { object_id } => {
JupyterReplResponse::JsGetProperties(
@ -432,11 +435,12 @@ impl JupyterReplSession {
&mut self,
line_text: &str,
position: usize,
token: CancellationToken,
) -> Vec<ReplCompletionItem> {
self
.repl_session
.language_server
.completions(line_text, position)
.completions(line_text, position, token)
.await
}

View file

@ -94,6 +94,7 @@ use super::buffer::NodeRef;
use super::ts_estree::AstNode;
use super::ts_estree::MethodKind as TsEstreeMethodKind;
use super::ts_estree::PropertyKind;
use super::ts_estree::SourceKind;
use super::ts_estree::TsEsTreeBuilder;
use super::ts_estree::TsKeywordKind;
use super::ts_estree::TsModuleKind;
@ -120,7 +121,7 @@ pub fn serialize_swc_to_buffer(
})
.collect::<Vec<_>>();
ctx.write_program(&module.span, "module", children);
ctx.write_program(&module.span, SourceKind::Module, children);
}
Program::Script(script) => {
let children = script
@ -129,7 +130,7 @@ pub fn serialize_swc_to_buffer(
.map(|stmt| serialize_stmt(&mut ctx, stmt))
.collect::<Vec<_>>();
ctx.write_program(&script.span, "script", children);
ctx.write_program(&script.span, SourceKind::Script, children);
}
}

View file

@ -499,12 +499,16 @@ impl TsEsTreeBuilder {
pub fn write_program(
&mut self,
span: &Span,
source_kind: &str,
source_kind: SourceKind,
body: Vec<NodeRef>,
) -> NodeRef {
let id = self.ctx.append_node(AstNode::Program, span);
self.ctx.write_str(AstProp::SourceType, source_kind);
let kind = match source_kind {
SourceKind::Module => "module",
SourceKind::Script => "script",
};
self.ctx.write_str(AstProp::SourceType, kind);
self.ctx.write_ref_vec(AstProp::Body, &id, body);
self.ctx.set_root_idx(id.0);
@ -2903,3 +2907,9 @@ pub enum MethodKind {
Method,
Set,
}
#[derive(Debug)]
pub enum SourceKind {
Module,
Script,
}

View file

@ -11,6 +11,7 @@ use deno_core::unsync::spawn_blocking;
use deno_lib::version::DENO_VERSION_INFO;
use deno_runtime::WorkerExecutionMode;
use rustyline::error::ReadlineError;
use tokio_util::sync::CancellationToken;
use crate::args::CliOptions;
use crate::args::Flags;
@ -118,7 +119,11 @@ async fn read_line_and_poll(
line_text,
position,
}) => {
let result = repl_session.language_server.completions(&line_text, position).await;
let result = repl_session.language_server.completions(
&line_text,
position,
CancellationToken::new(),
).await;
message_handler.send(RustylineSyncResponse::LspCompletions(result)).unwrap();
}
None => {}, // channel closed

View file

@ -422,12 +422,7 @@ impl<'a> TaskRunner<'a> {
return Ok(0);
};
if let Some(npm_installer) = self.npm_installer {
npm_installer
.ensure_top_level_package_json_install()
.await?;
npm_installer.cache_packages(PackageCaching::All).await?;
}
self.maybe_npm_install().await?;
let cwd = match &self.task_flags.cwd {
Some(path) => canonicalize_path(&PathBuf::from(path))
@ -461,12 +456,7 @@ impl<'a> TaskRunner<'a> {
argv: &[String],
) -> Result<i32, deno_core::anyhow::Error> {
// ensure the npm packages are installed if using a managed resolver
if let Some(npm_installer) = self.npm_installer {
npm_installer
.ensure_top_level_package_json_install()
.await?;
npm_installer.cache_packages(PackageCaching::All).await?;
}
self.maybe_npm_install().await?;
let cwd = match &self.task_flags.cwd {
Some(path) => canonicalize_path(&PathBuf::from(path))?,
@ -542,6 +532,19 @@ impl<'a> TaskRunner<'a> {
.exit_code,
)
}
async fn maybe_npm_install(&self) -> Result<(), AnyError> {
if let Some(npm_installer) = self.npm_installer {
npm_installer
.ensure_top_level_package_json_install()
.await?;
npm_installer.cache_packages(PackageCaching::All).await?;
if let Some(lockfile) = self.cli_options.maybe_lockfile() {
lockfile.write_if_changed()?;
}
}
Ok(())
}
}
#[derive(Debug)]

View file

@ -11312,15 +11312,46 @@ var Diagnostics = {
var deno_exports = {};
__export(deno_exports, {
createDenoForkContext: () => createDenoForkContext,
enterSpan: () => enterSpan,
exitSpan: () => exitSpan,
parseNpmPackageReference: () => parseNpmPackageReference,
setEnterSpan: () => setEnterSpan,
setExitSpan: () => setExitSpan,
setIsNodeSourceFileCallback: () => setIsNodeSourceFileCallback,
setNodeOnlyGlobalNames: () => setNodeOnlyGlobalNames,
setTypesNodeIgnorableNames: () => setTypesNodeIgnorableNames,
spanned: () => spanned,
tryParseNpmPackageReference: () => tryParseNpmPackageReference
});
var isNodeSourceFile = () => false;
var nodeOnlyGlobalNames = /* @__PURE__ */ new Set();
var typesNodeIgnorableNames = /* @__PURE__ */ new Set();
var enterSpan = () => ({});
var exitSpan = () => {
};
function setEnterSpan(f) {
enterSpan = f;
}
function setExitSpan(f) {
exitSpan = f;
}
function spanned(name, f) {
const span = enterSpan(name);
let needsExit = true;
try {
const result = f();
if (result instanceof Promise) {
needsExit = false;
return result.finally(() => exitSpan(span));
} else {
return result;
}
} finally {
if (needsExit) {
exitSpan(span);
}
}
}
function setIsNodeSourceFileCallback(callback) {
isNodeSourceFile = callback;
}
@ -165737,6 +165768,10 @@ function getDefaultCommitCharacters(isNewIdentifierLocation) {
return allCommitCharacters;
}
function getCompletionsAtPosition(host, program, log, sourceFile, position, preferences, triggerCharacter, completionKind, cancellationToken, formatContext, includeSymbol = false) {
return spanned("Tsc::getCompletionsAtPosition", () => getCompletionsAtPositionInner(host, program, log, sourceFile, position, preferences, triggerCharacter, completionKind, cancellationToken, formatContext, includeSymbol));
}
function getCompletionsAtPositionInner(host, program, log, sourceFile, position, preferences, triggerCharacter, completionKind, cancellationToken, formatContext, includeSymbol = false) {
var _a;
const { previousToken } = getRelevantTokens(position, sourceFile);
if (triggerCharacter && !isInString(sourceFile, position, previousToken) && !isValidTrigger(sourceFile, triggerCharacter, previousToken, position)) {

View file

@ -11,6 +11,18 @@ const ops = core.ops;
let logDebug = false;
let logSource = "JS";
function spanned(name, f) {
if (!ops.op_make_span) {
return f();
}
const span = ops.op_make_span(name, false);
try {
return f();
} finally {
ops.op_exit_span(span);
}
}
// The map from the normalized specifier to the original.
// TypeScript normalizes the specifier in its internal processing,
// but the original specifier is needed when looking up the source from the runtime.
@ -384,7 +396,7 @@ export function clearScriptNamesCache() {
* specific "bindings" to the Deno environment that tsc needs to work.
*
* @type {ts.CompilerHost & ts.LanguageServiceHost} */
export const host = {
const hostImpl = {
fileExists(specifier) {
if (logDebug) {
debug(`host.fileExists("${specifier}")`);
@ -722,6 +734,34 @@ export const host = {
},
};
// these host methods are super noisy (often thousands of calls per TSC request)
const excluded = new Set([
"getScriptVersion",
"fileExists",
"getScriptSnapshot",
"getCompilationSettings",
"getCurrentDirectory",
"useCaseSensitiveFileNames",
"getModuleSpecifierCache",
"getGlobalTypingsCacheLocation",
"getSourceFile",
]);
/** @type {typeof hostImpl} */
export const host = {
log(msg) {
ops.op_log_event(msg);
},
};
for (const [key, value] of Object.entries(hostImpl)) {
if (typeof value === "function" && !excluded.has(key)) {
host[key] = (...args) => {
return spanned(key, () => value.bind(host)(...args));
};
} else {
host[key] = value;
}
}
// @ts-ignore Undocumented function.
const exportMapCache = ts.createCacheableExportInfoMap(host);

View file

@ -286,6 +286,8 @@ let hasStarted = false;
/** @param {boolean} enableDebugLogging */
export async function serverMainLoop(enableDebugLogging) {
ts.deno.setEnterSpan(ops.op_make_span);
ts.deno.setExitSpan(ops.op_exit_span);
if (hasStarted) {
throw new Error("The language server has already been initialized.");
}
@ -382,7 +384,7 @@ function arraysEqual(a, b) {
* @param {string | null} scope
* @param {PendingChange | null} maybeChange
*/
function serverRequest(id, method, args, scope, maybeChange) {
function serverRequestInner(id, method, args, scope, maybeChange) {
debug(`serverRequest()`, id, method, args, scope, maybeChange);
if (maybeChange !== null) {
const changedScripts = maybeChange[0];
@ -517,3 +519,19 @@ function serverRequest(id, method, args, scope, maybeChange) {
);
}
}
/**
* @param {number} id
* @param {string} method
* @param {any[]} args
* @param {string | null} scope
* @param {PendingChange | null} maybeChange
*/
function serverRequest(id, method, args, scope, maybeChange) {
const span = ops.op_make_span(`serverRequest(${method})`, true);
try {
serverRequestInner(id, method, args, scope, maybeChange);
} finally {
ops.op_exit_span(span, true);
}
}

View file

@ -2905,7 +2905,7 @@ declare namespace Deno {
/** Reads and resolves to the entire contents of a file as an array of bytes.
* `TextDecoder` can be used to transform the bytes to string if required.
* Reading a directory returns an empty data array.
* Rejects with an error when reading a directory.
*
* ```ts
* const decoder = new TextDecoder("utf-8");
@ -2925,7 +2925,7 @@ declare namespace Deno {
/** Synchronously reads and returns the entire contents of a file as an array
* of bytes. `TextDecoder` can be used to transform the bytes to string if
* required. Reading a directory returns an empty data array.
* required. Throws an error when reading a directory.
*
* ```ts
* const decoder = new TextDecoder("utf-8");

View file

@ -1391,13 +1391,60 @@ declare namespace Deno {
fix?(fixer: Fixer): FixData | Iterable<FixData>;
}
/**
* @category Linter
* @experimental
*/
export interface SourceCode {
/**
* Get the source test of a node. Omit `node` to get the
* full source code.
*/
getText(node?: Node): string;
/**
* Returns array of ancestors of the current node, excluding the
* current node.
*/
getAncestors(node: Node): Node[];
/**
* Get the full source code.
*/
text: string;
/**
* Get the root node of the file. It's always the `Program` node.
*/
ast: Program;
}
/**
* @category Linter
* @experimental
*/
export interface RuleContext {
/**
* The running rule id: `<plugin-name>/<rule-name>`
*/
id: string;
/**
* Name of the file that's currently being linted.
*/
filename: string;
/**
* Helper methods for working with the raw source code.
*/
sourceCode: SourceCode;
/**
* Report a lint error.
*/
report(data: ReportData): void;
/**
* @deprecated Use `ctx.filename` instead.
*/
getFilename(): string;
/**
* @deprecated Use `ctx.sourceCode` instead.
*/
getSourceCode(): SourceCode;
}
/**
@ -1473,7 +1520,9 @@ declare namespace Deno {
}
/**
* This API is a noop in `deno run`...
* This API is useful for testing lint plugins.
*
* It throws an error if it's not used in `deno test` subcommand.
* @category Linter
* @experimental
*/
@ -1483,6 +1532,17 @@ declare namespace Deno {
source: string,
): Diagnostic[];
/**
* @category Linter
* @experimental
*/
export interface Program {
type: "Program";
range: Range;
sourceType: "module" | "script";
body: Statement[];
}
/**
* @category Linter
* @experimental
@ -3843,6 +3903,7 @@ declare namespace Deno {
* @experimental
*/
export type Node =
| Program
| Expression
| Statement
| TypeNode

View file

@ -38,7 +38,6 @@ declare class GPUSupportedLimits {
maxBufferSize?: number;
maxVertexAttributes?: number;
maxVertexBufferArrayStride?: number;
maxInterStageShaderComponents?: number;
maxColorAttachments?: number;
maxColorAttachmentBytesPerSample?: number;
maxComputeWorkgroupStorageSize?: number;
@ -73,6 +72,8 @@ declare class GPUAdapterInfo {
readonly architecture: string;
readonly device: string;
readonly description: string;
readonly subgroupMinSize: number;
readonly subgroupMaxSize: number;
}
/** @category GPU */
@ -105,36 +106,55 @@ declare class GPUAdapter {
/** @category GPU */
interface GPUDeviceDescriptor extends GPUObjectDescriptorBase {
requiredFeatures?: GPUFeatureName[];
requiredLimits?: Record<string, number>;
requiredLimits?: Record<string, number | undefined>;
}
/** @category GPU */
type GPUFeatureName =
| "depth-clip-control"
| "depth32float-stencil8"
| "pipeline-statistics-query"
| "texture-compression-bc"
| "texture-compression-etc2"
| "texture-compression-astc"
| "timestamp-query"
| "indirect-first-instance"
| "shader-f16"
| "depth32float-stencil8"
| "texture-compression-bc"
| "texture-compression-bc-sliced-3d"
| "texture-compression-etc2"
| "texture-compression-astc"
| "rg11b10ufloat-renderable"
| "bgra8unorm-storage"
| "float32-filterable"
| "dual-source-blending"
| "subgroups"
// extended from spec
| "texture-format-16-bit-norm"
| "texture-compression-astc-hdr"
| "texture-adapter-specific-format-features"
| "pipeline-statistics-query"
| "timestamp-query-inside-passes"
| "mappable-primary-buffers"
| "sampled-texture-binding-array"
| "sampled-texture-array-dynamic-indexing"
| "sampled-texture-array-non-uniform-indexing"
| "unsized-binding-array"
| "texture-binding-array"
| "buffer-binding-array"
| "storage-resource-binding-array"
| "sampled-texture-and-storage-buffer-array-non-uniform-indexing"
| "uniform-buffer-and-storage-texture-array-non-uniform-indexing"
| "partially-bound-binding-array"
| "multi-draw-indirect"
| "multi-draw-indirect-count"
| "push-constants"
| "address-mode-clamp-to-zero"
| "address-mode-clamp-to-border"
| "texture-adapter-specific-format-features"
| "shader-float64"
| "vertex-attribute-64bit";
| "polygon-mode-line"
| "polygon-mode-point"
| "conservative-rasterization"
| "vertex-writable-storage"
| "clear-texture"
| "spirv-shader-passthrough"
| "multiview"
| "vertex-attribute-64-bit"
| "shader-f64"
| "shader-i16"
| "shader-primitive-index"
| "shader-early-depth-test";
/** @category GPU */
declare class GPUDevice extends EventTarget implements GPUObjectBase {
@ -146,6 +166,7 @@ declare class GPUDevice extends EventTarget implements GPUObjectBase {
readonly features: GPUSupportedFeatures;
readonly limits: GPUSupportedLimits;
readonly adapterInfo: GPUAdapterInfo;
readonly queue: GPUQueue;
destroy(): undefined;
@ -296,6 +317,7 @@ declare class GPUTextureView implements GPUObjectBase {
interface GPUTextureViewDescriptor extends GPUObjectDescriptorBase {
format?: GPUTextureFormat;
dimension?: GPUTextureViewDimension;
usage?: GPUTextureUsageFlags;
aspect?: GPUTextureAspect;
baseMipLevel?: number;
mipLevelCount?: number;
@ -744,7 +766,11 @@ type GPUBlendFactor =
| "one-minus-dst-alpha"
| "src-alpha-saturated"
| "constant"
| "one-minus-constant";
| "one-minus-constant"
| "src1"
| "one-minus-src1"
| "src1-alpha"
| "one-minus-src1-alpha";
/** @category GPU */
type GPUBlendOperation =
@ -758,8 +784,8 @@ type GPUBlendOperation =
interface GPUDepthStencilState {
format: GPUTextureFormat;
depthWriteEnabled: boolean;
depthCompare: GPUCompareFunction;
depthWriteEnabled?: boolean;
depthCompare?: GPUCompareFunction;
stencilFront?: GPUStencilFaceState;
stencilBack?: GPUStencilFaceState;
@ -852,7 +878,7 @@ interface GPUVertexAttribute {
}
/** @category GPU */
interface GPUImageDataLayout {
interface GPUTexelCopyBufferLayout {
offset?: number;
bytesPerRow?: number;
rowsPerImage?: number;
@ -884,20 +910,20 @@ declare class GPUCommandEncoder implements GPUObjectBase {
): undefined;
copyBufferToTexture(
source: GPUImageCopyBuffer,
destination: GPUImageCopyTexture,
source: GPUTexelCopyBufferInfo,
destination: GPUTexelCopyTextureInfo,
copySize: GPUExtent3D,
): undefined;
copyTextureToBuffer(
source: GPUImageCopyTexture,
destination: GPUImageCopyBuffer,
source: GPUTexelCopyTextureInfo,
destination: GPUTexelCopyBufferInfo,
copySize: GPUExtent3D,
): undefined;
copyTextureToTexture(
source: GPUImageCopyTexture,
destination: GPUImageCopyTexture,
source: GPUTexelCopyTextureInfo,
destination: GPUTexelCopyTextureInfo,
copySize: GPUExtent3D,
): undefined;
@ -928,12 +954,12 @@ declare class GPUCommandEncoder implements GPUObjectBase {
interface GPUCommandEncoderDescriptor extends GPUObjectDescriptorBase {}
/** @category GPU */
interface GPUImageCopyBuffer extends GPUImageDataLayout {
interface GPUTexelCopyBufferInfo extends GPUTexelCopyBufferLayout {
buffer: GPUBuffer;
}
/** @category GPU */
interface GPUImageCopyTexture {
interface GPUTexelCopyTextureInfo {
texture: GPUTexture;
mipLevel?: number;
origin?: GPUOrigin3D;
@ -944,13 +970,13 @@ interface GPUImageCopyTexture {
interface GPUProgrammablePassEncoder {
setBindGroup(
index: number,
bindGroup: GPUBindGroup,
bindGroup: GPUBindGroup | null,
dynamicOffsets?: number[],
): undefined;
setBindGroup(
index: number,
bindGroup: GPUBindGroup,
bindGroup: GPUBindGroup | null,
dynamicOffsetsData: Uint32Array,
dynamicOffsetsDataStart: number,
dynamicOffsetsDataLength: number,
@ -967,12 +993,12 @@ declare class GPUComputePassEncoder
label: string;
setBindGroup(
index: number,
bindGroup: GPUBindGroup,
bindGroup: GPUBindGroup | null,
dynamicOffsets?: number[],
): undefined;
setBindGroup(
index: number,
bindGroup: GPUBindGroup,
bindGroup: GPUBindGroup | null,
dynamicOffsetsData: Uint32Array,
dynamicOffsetsDataStart: number,
dynamicOffsetsDataLength: number,
@ -1046,12 +1072,12 @@ declare class GPURenderPassEncoder
label: string;
setBindGroup(
index: number,
bindGroup: GPUBindGroup,
bindGroup: GPUBindGroup | null,
dynamicOffsets?: number[],
): undefined;
setBindGroup(
index: number,
bindGroup: GPUBindGroup,
bindGroup: GPUBindGroup | null,
dynamicOffsetsData: Uint32Array,
dynamicOffsetsDataStart: number,
dynamicOffsetsDataLength: number,
@ -1198,12 +1224,12 @@ declare class GPURenderBundleEncoder
pushDebugGroup(groupLabel: string): undefined;
setBindGroup(
index: number,
bindGroup: GPUBindGroup,
bindGroup: GPUBindGroup | null,
dynamicOffsets?: number[],
): undefined;
setBindGroup(
index: number,
bindGroup: GPUBindGroup,
bindGroup: GPUBindGroup | null,
dynamicOffsetsData: Uint32Array,
dynamicOffsetsDataStart: number,
dynamicOffsetsDataLength: number,
@ -1255,9 +1281,9 @@ declare class GPUQueue implements GPUObjectBase {
): undefined;
writeTexture(
destination: GPUImageCopyTexture,
destination: GPUTexelCopyTextureInfo,
data: BufferSource,
dataLayout: GPUImageDataLayout,
dataLayout: GPUTexelCopyBufferLayout,
size: GPUExtent3D,
): undefined;
}

View file

@ -15,6 +15,9 @@ and limitations under the License.
declare namespace ts {
namespace deno {
function setEnterSpan(f: EnterSpan): void;
function setExitSpan(f: ExitSpan): void;
function spanned<T>(name: string, f: () => T): T;
function setIsNodeSourceFileCallback(callback: IsNodeSourceFileCallback): void;
function setNodeOnlyGlobalNames(names: Set<string>): void;
function setTypesNodeIgnorableNames(names: Set<string>): void;
@ -26,6 +29,10 @@ declare namespace ts {
function tryParseNpmPackageReference(text: string): NpmPackageReference | undefined;
function parseNpmPackageReference(text: string): NpmPackageReference;
type IsNodeSourceFileCallback = (sourceFile: ts.SourceFile) => boolean;
type EnterSpan = (name: string) => object;
type ExitSpan = (span: object) => void;
let enterSpan: EnterSpan;
let exitSpan: ExitSpan;
interface DenoForkContext {
hasNodeSourceFile: (node: ts.Node | undefined) => boolean;
getGlobalsForName: (id: ts.__String) => ts.SymbolTable;

View file

@ -4,6 +4,7 @@ use std::borrow::Cow;
use std::collections::HashMap;
use std::fmt;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::Arc;
use std::sync::OnceLock;
@ -107,35 +108,32 @@ fn get_asset_texts() -> Result<Vec<AssetText>, AnyError> {
macro_rules! maybe_compressed_source {
($file: expr) => {{
#[cfg(debug_assertions)]
{
StaticAssetSource::Uncompressed(include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/",
$file
)))
}
#[cfg(not(debug_assertions))]
{
StaticAssetSource::Compressed(CompressedSource::new(include_bytes!(
concat!(env!("OUT_DIR"), "/", $file, ".zstd")
)))
}
maybe_compressed_source!(compressed = $file, uncompressed = $file)
}};
(compressed = $comp: expr, uncompressed = $uncomp: expr) => {{
#[cfg(debug_assertions)]
#[cfg(feature = "hmr")]
{
StaticAssetSource::Uncompressed(include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/",
$uncomp
)))
StaticAssetSource::Owned(
concat!(env!("CARGO_MANIFEST_DIR"), "/", $uncomp),
std::sync::OnceLock::new(),
)
}
#[cfg(not(debug_assertions))]
#[cfg(not(feature = "hmr"))]
{
StaticAssetSource::Compressed(CompressedSource::new(include_bytes!(
concat!(env!("OUT_DIR"), "/", $comp, ".zstd")
)))
#[cfg(debug_assertions)]
{
StaticAssetSource::Uncompressed(include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/",
$uncomp
)))
}
#[cfg(not(debug_assertions))]
{
StaticAssetSource::Compressed(CompressedSource::new(include_bytes!(
concat!(env!("OUT_DIR"), "/", $comp, ".zstd")
)))
}
}
}};
}
@ -163,9 +161,11 @@ macro_rules! maybe_compressed_ext_lib {
#[derive(Clone)]
pub enum StaticAssetSource {
#[cfg_attr(debug_assertions, allow(dead_code))]
#[cfg_attr(any(debug_assertions, feature = "hmr"), allow(dead_code))]
Compressed(CompressedSource),
Uncompressed(&'static str),
#[cfg_attr(not(feature = "hmr"), allow(dead_code))]
Owned(&'static str, std::sync::OnceLock<Arc<str>>),
}
/// Like a `Cow` but the owned form is an `Arc<str>` instead of `String`
@ -227,6 +227,11 @@ impl StaticAssetSource {
MaybeStaticSource::Computed(compressed_source.get())
}
StaticAssetSource::Uncompressed(src) => MaybeStaticSource::Static(src),
StaticAssetSource::Owned(path, cell) => {
let str =
cell.get_or_init(|| std::fs::read_to_string(path).unwrap().into());
MaybeStaticSource::Computed((*str).clone())
}
}
}
@ -1301,7 +1306,7 @@ pub(crate) struct CompressedSource {
}
impl CompressedSource {
#[cfg_attr(debug_assertions, allow(dead_code))]
#[cfg_attr(any(debug_assertions, feature = "hmr"), allow(dead_code))]
pub(crate) const fn new(bytes: &'static [u8]) -> Self {
Self {
bytes,
@ -1372,10 +1377,82 @@ deno_core::extension!(deno_cli_tsc,
}
);
pub struct TscExtCodeCache {
cache: Arc<dyn deno_runtime::code_cache::CodeCache>,
}
impl TscExtCodeCache {
pub fn new(cache: Arc<dyn deno_runtime::code_cache::CodeCache>) -> Self {
Self { cache }
}
}
impl deno_core::ExtCodeCache for TscExtCodeCache {
fn get_code_cache_info(
&self,
specifier: &ModuleSpecifier,
code: &deno_core::ModuleSourceCode,
esm: bool,
) -> deno_core::SourceCodeCacheInfo {
use deno_runtime::code_cache::CodeCacheType;
let code_hash = FastInsecureHasher::new_deno_versioned()
.write_hashable(code)
.finish();
let data = self
.cache
.get_sync(
specifier,
if esm {
CodeCacheType::EsModule
} else {
CodeCacheType::Script
},
code_hash,
)
.map(Cow::from)
.inspect(|_| {
log::debug!(
"V8 code cache hit for Extension module: {specifier}, [{code_hash:?}]"
);
});
deno_core::SourceCodeCacheInfo {
hash: code_hash,
data,
}
}
fn code_cache_ready(
&self,
specifier: ModuleSpecifier,
source_hash: u64,
code_cache: &[u8],
esm: bool,
) {
use deno_runtime::code_cache::CodeCacheType;
log::debug!(
"Updating V8 code cache for Extension module: {specifier}, [{source_hash:?}]"
);
self.cache.set_sync(
specifier,
if esm {
CodeCacheType::EsModule
} else {
CodeCacheType::Script
},
source_hash,
code_cache,
);
}
}
/// Execute a request on the supplied snapshot, returning a response which
/// contains information, like any emitted files, diagnostics, statistics and
/// optionally an updated TypeScript build info.
pub fn exec(request: Request) -> Result<Response, ExecError> {
pub fn exec(
request: Request,
code_cache: Option<Arc<dyn deno_runtime::code_cache::CodeCache>>,
) -> Result<Response, ExecError> {
// tsc cannot handle root specifiers that don't have one of the "acceptable"
// extensions. Therefore, we have to check the root modules against their
// extensions and remap any that are unacceptable to tsc and add them to the
@ -1410,13 +1487,21 @@ pub fn exec(request: Request) -> Result<Response, ExecError> {
});
let exec_source = format!("globalThis.exec({request_value})");
let mut extensions =
deno_runtime::snapshot_info::get_extensions_in_snapshot();
extensions.push(deno_cli_tsc::init_ops_and_esm(
request,
root_map,
remapped_specifiers,
));
let extension_code_cache = code_cache.map(|cache| {
Rc::new(TscExtCodeCache::new(cache)) as Rc<dyn deno_core::ExtCodeCache>
});
let mut runtime = JsRuntime::new(RuntimeOptions {
extensions: vec![deno_cli_tsc::init_ops_and_esm(
request,
root_map,
remapped_specifiers,
)],
extensions,
create_params: create_isolate_create_params(),
startup_snapshot: deno_snapshots::CLI_SNAPSHOT,
extension_code_cache,
..Default::default()
});
@ -1446,11 +1531,13 @@ pub fn exec(request: Request) -> Result<Response, ExecError> {
#[cfg(test)]
mod tests {
use deno_core::futures::future;
use deno_core::parking_lot::Mutex;
use deno_core::serde_json;
use deno_core::OpState;
use deno_error::JsErrorBox;
use deno_graph::GraphKind;
use deno_graph::ModuleGraph;
use deno_runtime::code_cache::CodeCacheType;
use test_util::PathRef;
use super::Diagnostic;
@ -1525,6 +1612,12 @@ mod tests {
async fn test_exec(
specifier: &ModuleSpecifier,
) -> Result<Response, ExecError> {
test_exec_with_cache(specifier, None).await
}
async fn test_exec_with_cache(
specifier: &ModuleSpecifier,
code_cache: Option<Arc<dyn deno_runtime::code_cache::CodeCache>>,
) -> Result<Response, ExecError> {
let hash_data = 123; // something random
let fixtures = test_util::testdata_path().join("tsc2");
@ -1559,48 +1652,7 @@ mod tests {
root_names: vec![(specifier.clone(), MediaType::TypeScript)],
check_mode: TypeCheckMode::All,
};
exec(request)
}
// TODO(bartlomieju): this test is segfaulting in V8, saying that there are too
// few external references registered. It seems to be a bug in our snapshotting
// logic. Because when we create TSC snapshot we register a few ops that
// are called during snapshotting time, V8 expects at least as many references
// when it starts up. The thing is that these ops are one-off - ie. they will never
// be used again after the snapshot is taken. We should figure out a mechanism
// to allow removing some of the ops before taking a snapshot.
#[ignore]
#[tokio::test]
async fn test_compiler_snapshot() {
let mut js_runtime = JsRuntime::new(RuntimeOptions {
startup_snapshot: None,
extensions: vec![super::deno_cli_tsc::init_ops_and_esm(
Request {
check_mode: TypeCheckMode::All,
config: Arc::new(TsConfig(json!({}))),
debug: false,
graph: Arc::new(ModuleGraph::new(GraphKind::TypesOnly)),
hash_data: 0,
maybe_npm: None,
maybe_tsbuildinfo: None,
root_names: vec![],
},
HashMap::new(),
HashMap::new(),
)],
..Default::default()
});
js_runtime
.execute_script(
"<anon>",
r#"
if (!(globalThis.exec)) {
throw Error("bad");
}
console.log(`ts version: ${ts.version}`);
"#,
)
.unwrap();
exec(request, code_cache)
}
#[tokio::test]
@ -1821,4 +1873,115 @@ mod tests {
.expect("exec should not have errored");
assert!(!actual.diagnostics.has_diagnostic());
}
pub type SpecifierWithType = (ModuleSpecifier, CodeCacheType);
#[derive(Default)]
struct TestExtCodeCache {
cache: Mutex<HashMap<(SpecifierWithType, u64), Vec<u8>>>,
hits: Mutex<HashMap<SpecifierWithType, usize>>,
misses: Mutex<HashMap<SpecifierWithType, usize>>,
}
impl deno_runtime::code_cache::CodeCache for TestExtCodeCache {
fn get_sync(
&self,
specifier: &ModuleSpecifier,
code_cache_type: CodeCacheType,
source_hash: u64,
) -> Option<Vec<u8>> {
let result = self
.cache
.lock()
.get(&((specifier.clone(), code_cache_type), source_hash))
.cloned();
if result.is_some() {
*self
.hits
.lock()
.entry((specifier.clone(), code_cache_type))
.or_default() += 1;
} else {
*self
.misses
.lock()
.entry((specifier.clone(), code_cache_type))
.or_default() += 1;
}
result
}
fn set_sync(
&self,
specifier: ModuleSpecifier,
code_cache_type: CodeCacheType,
source_hash: u64,
data: &[u8],
) {
self
.cache
.lock()
.insert(((specifier, code_cache_type), source_hash), data.to_vec());
}
}
#[tokio::test]
async fn test_exec_code_cache() {
let code_cache = Arc::new(TestExtCodeCache::default());
let specifier = ModuleSpecifier::parse("https://deno.land/x/a.ts").unwrap();
let actual = test_exec_with_cache(&specifier, Some(code_cache.clone()))
.await
.expect("exec should not have errored");
assert!(!actual.diagnostics.has_diagnostic());
let expect = [
(
"ext:deno_cli_tsc/99_main_compiler.js",
CodeCacheType::EsModule,
),
("ext:deno_cli_tsc/98_lsp.js", CodeCacheType::EsModule),
("ext:deno_cli_tsc/97_ts_host.js", CodeCacheType::EsModule),
("ext:deno_cli_tsc/00_typescript.js", CodeCacheType::Script),
];
{
let mut files = HashMap::new();
for (((specifier, ty), _), _) in code_cache.cache.lock().iter() {
let specifier = specifier.to_string();
if files.contains_key(&specifier) {
panic!("should have only 1 entry per specifier");
}
files.insert(specifier, *ty);
}
// 99_main_compiler, 98_lsp, 97_ts_host, 00_typescript
assert_eq!(files.len(), 4);
assert_eq!(code_cache.hits.lock().len(), 0);
assert_eq!(code_cache.misses.lock().len(), 4);
for (specifier, ty) in &expect {
assert_eq!(files.get(*specifier), Some(ty));
}
code_cache.hits.lock().clear();
code_cache.misses.lock().clear();
}
{
let _ = test_exec_with_cache(&specifier, Some(code_cache.clone()))
.await
.expect("exec should not have errored");
// 99_main_compiler, 98_lsp, 97_ts_host, 00_typescript
assert_eq!(code_cache.hits.lock().len(), 4);
assert_eq!(code_cache.misses.lock().len(), 0);
for (specifier, ty) in expect {
let url = ModuleSpecifier::parse(specifier).unwrap();
assert_eq!(code_cache.hits.lock().get(&(url, ty)), Some(&1));
}
}
}
}

View file

@ -486,7 +486,7 @@ mod tests {
RuntimePermissionDescriptorParser::new(crate::sys::CliSys::default()),
);
let options = WorkerOptions {
startup_snapshot: crate::js::deno_isolate_init(),
startup_snapshot: deno_snapshots::CLI_SNAPSHOT,
..Default::default()
};

View file

@ -2,7 +2,7 @@
[package]
name = "deno_broadcast_channel"
version = "0.183.0"
version = "0.184.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -2,7 +2,7 @@
[package]
name = "deno_cache"
version = "0.121.0"
version = "0.122.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -2,7 +2,7 @@
[package]
name = "deno_canvas"
version = "0.58.0"
version = "0.59.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -2,7 +2,7 @@
[package]
name = "deno_console"
version = "0.189.0"
version = "0.190.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -2,7 +2,7 @@
[package]
name = "deno_cron"
version = "0.69.0"
version = "0.70.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -2,7 +2,7 @@
[package]
name = "deno_crypto"
version = "0.203.0"
version = "0.204.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -2,7 +2,7 @@
[package]
name = "deno_fetch"
version = "0.213.0"
version = "0.214.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
@ -19,6 +19,7 @@ bytes.workspace = true
data-url.workspace = true
deno_core.workspace = true
deno_error.workspace = true
deno_fs.workspace = true
deno_path_util.workspace = true
deno_permissions.workspace = true
deno_tls.workspace = true

View file

@ -9,6 +9,9 @@ use deno_core::url::Url;
use deno_core::CancelFuture;
use deno_core::OpState;
use deno_error::JsErrorBox;
use deno_fs::open_options_with_access_check;
use deno_fs::OpenOptions;
use deno_permissions::PermissionsContainer;
use http::StatusCode;
use http_body_util::BodyExt;
use tokio_util::io::ReaderStream;
@ -16,6 +19,16 @@ use tokio_util::io::ReaderStream;
use crate::CancelHandle;
use crate::CancelableResponseFuture;
use crate::FetchHandler;
use crate::FetchPermissions;
fn sync_permission_check<'a, P: FetchPermissions + 'static>(
permissions: &'a mut P,
api_name: &'static str,
) -> impl deno_fs::AccessCheckFn + 'a {
move |resolved, path, _options| {
permissions.check_read(resolved, path, api_name)
}
}
/// An implementation which tries to read file URLs from the file system via
/// tokio::fs.
@ -25,25 +38,52 @@ pub struct FsFetchHandler;
impl FetchHandler for FsFetchHandler {
fn fetch_file(
&self,
_state: &mut OpState,
state: &mut OpState,
url: &Url,
) -> (CancelableResponseFuture, Option<Rc<CancelHandle>>) {
let mut access_check = sync_permission_check::<PermissionsContainer>(
state.borrow_mut(),
"fetch()",
);
let cancel_handle = CancelHandle::new_rc();
let path_result = url.to_file_path();
let path = match url.to_file_path() {
Ok(path) => path,
Err(_) => {
let fut = async move { Err::<_, _>(()) };
return (
fut
.map_err(move |_| super::FetchError::NetworkError)
.or_cancel(&cancel_handle)
.boxed_local(),
Some(cancel_handle),
);
}
};
let path_and_opts_result = open_options_with_access_check(
OpenOptions {
read: true,
..Default::default()
},
&path,
Some(&mut access_check),
);
let response_fut = async move {
let path = path_result?;
let file = tokio::fs::File::open(path).map_err(|_| ()).await?;
let (path, opts) = path_and_opts_result?;
let file = tokio::fs::OpenOptions::from(opts)
.open(path)
.await
.map_err(|_| super::FetchError::NetworkError)?;
let stream = ReaderStream::new(file)
.map_ok(hyper::body::Frame::data)
.map_err(JsErrorBox::from_err);
let body = http_body_util::StreamBody::new(stream).boxed();
let response = http::Response::builder()
.status(StatusCode::OK)
.body(body)
.map_err(|_| ())?;
Ok::<_, ()>(response)
.map_err(move |_| super::FetchError::NetworkError)?;
Ok::<_, _>(response)
}
.map_err(move |_| super::FetchError::NetworkError)
.or_cancel(&cancel_handle)
.boxed_local();

View file

@ -47,7 +47,7 @@ use deno_core::RcRef;
use deno_core::Resource;
use deno_core::ResourceId;
use deno_error::JsErrorBox;
use deno_path_util::url_from_file_path;
use deno_fs::FsError;
use deno_path_util::PathToUrlError;
use deno_permissions::PermissionCheckError;
use deno_tls::rustls::RootCertStore;
@ -227,6 +227,20 @@ pub enum FetchError {
#[class(generic)]
#[error(transparent)]
Dns(hickory_resolver::ResolveError),
#[class("NotCapable")]
#[error("requires {0} access")]
NotCapable(&'static str),
}
impl From<deno_fs::FsError> for FetchError {
fn from(value: deno_fs::FsError) -> Self {
match value {
deno_fs::FsError::Io(_)
| deno_fs::FsError::FileBusy
| deno_fs::FsError::NotSupported => FetchError::NetworkError,
deno_fs::FsError::NotCapable(err) => FetchError::NotCapable(err),
}
}
}
pub type CancelableResponseFuture =
@ -390,9 +404,10 @@ pub trait FetchPermissions {
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
fn check_read<'a>(
&mut self,
resolved: bool,
p: &'a Path,
api_name: &str,
) -> Result<Cow<'a, Path>, PermissionCheckError>;
) -> Result<Cow<'a, Path>, FsError>;
}
impl FetchPermissions for deno_permissions::PermissionsContainer {
@ -408,14 +423,23 @@ impl FetchPermissions for deno_permissions::PermissionsContainer {
#[inline(always)]
fn check_read<'a>(
&mut self,
resolved: bool,
path: &'a Path,
api_name: &str,
) -> Result<Cow<'a, Path>, PermissionCheckError> {
) -> Result<Cow<'a, Path>, FsError> {
if resolved {
self
.check_special_file(path, api_name)
.map_err(FsError::NotCapable)?;
return Ok(Cow::Borrowed(path));
}
deno_permissions::PermissionsContainer::check_read_path(
self,
path,
Some(api_name),
)
.map_err(|_| FsError::NotCapable("read"))
}
}
@ -449,18 +473,9 @@ where
let scheme = url.scheme();
let (request_rid, cancel_handle_rid) = match scheme {
"file" => {
let path = url.to_file_path().map_err(|_| FetchError::NetworkError)?;
let permissions = state.borrow_mut::<FP>();
let path = permissions.check_read(&path, "fetch()")?;
let url = match path {
Cow::Owned(path) => url_from_file_path(&path)?,
Cow::Borrowed(_) => url,
};
if method != Method::GET {
return Err(FetchError::FsNotGet(method));
}
let Options {
file_fetch_handler, ..
} = state.borrow_mut::<Options>();

View file

@ -2,7 +2,7 @@
[package]
name = "deno_ffi"
version = "0.176.0"
version = "0.177.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -2,7 +2,7 @@
[package]
name = "deno_fs"
version = "0.99.0"
version = "0.100.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -9,7 +9,7 @@ use std::borrow::Cow;
use std::path::Path;
use std::path::PathBuf;
use deno_io::fs::FsError;
pub use deno_io::fs::FsError;
use deno_permissions::PermissionCheckError;
pub use crate::interface::AccessCheckCb;
@ -23,6 +23,7 @@ pub use crate::ops::FsOpsError;
pub use crate::ops::FsOpsErrorKind;
pub use crate::ops::OperationError;
use crate::ops::*;
pub use crate::std_fs::open_options_with_access_check;
pub use crate::std_fs::RealFs;
pub use crate::sync::MaybeSend;
pub use crate::sync::MaybeSync;

View file

@ -1063,11 +1063,22 @@ fn open_options(options: OpenOptions) -> fs::OpenOptions {
}
#[inline(always)]
fn open_with_access_check(
pub fn open_with_access_check(
options: OpenOptions,
path: &Path,
access_check: Option<AccessCheckCb>,
) -> FsResult<std::fs::File> {
let (path, opts) =
open_options_with_access_check(options, path, access_check)?;
Ok(opts.open(path)?)
}
#[inline(always)]
pub fn open_options_with_access_check(
options: OpenOptions,
path: &Path,
access_check: Option<AccessCheckCb>,
) -> FsResult<(PathBuf, fs::OpenOptions)> {
if let Some(access_check) = access_check {
let path_bytes = path.as_os_str().as_encoded_bytes();
let is_windows_device_path = cfg!(windows)
@ -1126,7 +1137,7 @@ fn open_with_access_check(
}
}
Ok(opts.open(&path)?)
Ok((path, opts))
} else {
// for unix
#[allow(unused_mut)]
@ -1137,6 +1148,6 @@ fn open_with_access_check(
use std::os::windows::fs::OpenOptionsExt;
opts.custom_flags(winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS);
}
Ok(opts.open(path)?)
Ok((path.to_path_buf(), opts))
}
}

View file

@ -2,7 +2,7 @@
[package]
name = "deno_http"
version = "0.187.0"
version = "0.188.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -2,7 +2,7 @@
[package]
name = "deno_io"
version = "0.99.0"
version = "0.100.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -2,7 +2,7 @@
[package]
name = "deno_kv"
version = "0.97.0"
version = "0.98.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -2,7 +2,7 @@
[package]
name = "deno_napi"
version = "0.120.0"
version = "0.121.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -2,7 +2,7 @@
[package]
name = "napi_sym"
version = "0.119.0"
version = "0.120.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -2,7 +2,7 @@
[package]
name = "deno_net"
version = "0.181.0"
version = "0.182.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -2,7 +2,7 @@
[package]
name = "deno_node"
version = "0.127.0"
version = "0.128.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -2,7 +2,7 @@
[package]
name = "deno_os"
version = "0.6.0"
version = "0.7.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -2,7 +2,7 @@
[package]
name = "deno_process"
version = "0.4.0"
version = "0.5.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -2,7 +2,7 @@
[package]
name = "deno_telemetry"
version = "0.11.0"
version = "0.12.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -2,7 +2,7 @@
[package]
name = "deno_tls"
version = "0.176.0"
version = "0.177.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -2,7 +2,7 @@
[package]
name = "deno_url"
version = "0.189.0"
version = "0.190.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

View file

@ -1520,6 +1520,7 @@ export {
ErrorEvent,
Event,
EventTarget,
EventTargetPrototype,
listenerCount,
MessageEvent,
ProgressEvent,

View file

@ -779,37 +779,35 @@ class ResourceStreamResourceSink {
* @param {any} sink
* @param {Uint8Array} chunk
*/
function readableStreamWriteChunkFn(reader, sink, chunk) {
async function readableStreamWriteChunkFn(reader, sink, chunk) {
// Empty chunk. Re-read.
if (chunk.length == 0) {
readableStreamReadFn(reader, sink);
await readableStreamReadFn(reader, sink);
return;
}
const res = op_readable_stream_resource_write_sync(sink.external, chunk);
if (res == 0) {
// Closed
reader.cancel("resource closed");
await reader.cancel("resource closed");
sink.close();
} else if (res == 1) {
// Successfully written (synchronous). Re-read.
readableStreamReadFn(reader, sink);
await readableStreamReadFn(reader, sink);
} else if (res == 2) {
// Full. If the channel is full, we perform an async await until we can write, and then return
// to a synchronous loop.
(async () => {
if (
await op_readable_stream_resource_write_buf(
sink.external,
chunk,
)
) {
readableStreamReadFn(reader, sink);
} else {
reader.cancel("resource closed");
sink.close();
}
})();
if (
await op_readable_stream_resource_write_buf(
sink.external,
chunk,
)
) {
await readableStreamReadFn(reader, sink);
} else {
await reader.cancel("resource closed");
sink.close();
}
}
}
@ -822,17 +820,23 @@ function readableStreamReadFn(reader, sink) {
// real resource.
let reentrant = true;
let gotChunk = undefined;
const promise = new Deferred();
readableStreamDefaultReaderRead(reader, {
chunkSteps(chunk) {
// If the chunk has non-zero length, write it
if (reentrant) {
gotChunk = chunk;
} else {
readableStreamWriteChunkFn(reader, sink, chunk);
PromisePrototypeThen(
readableStreamWriteChunkFn(reader, sink, chunk),
() => promise.resolve(),
(e) => promise.reject(e),
);
}
},
closeSteps() {
sink.close();
promise.resolve();
},
errorSteps(error) {
const success = op_readable_stream_resource_write_error(
@ -842,15 +846,29 @@ function readableStreamReadFn(reader, sink) {
// We don't cancel the reader if there was an error reading. We'll let the downstream
// consumer close the resource after it receives the error.
if (!success) {
reader.cancel("resource closed");
PromisePrototypeThen(
reader.cancel("resource closed"),
() => {
sink.close();
promise.resolve();
},
(e) => promise.reject(e),
);
} else {
sink.close();
promise.resolve();
}
sink.close();
},
});
reentrant = false;
if (gotChunk) {
readableStreamWriteChunkFn(reader, sink, gotChunk);
PromisePrototypeThen(
readableStreamWriteChunkFn(reader, sink, gotChunk),
() => promise.resolve(),
(e) => promise.reject(e),
);
}
return promise.promise;
}
/**
@ -873,7 +891,9 @@ function resourceForReadableStream(stream, length) {
PromisePrototypeCatch(
PromisePrototypeThen(
op_readable_stream_resource_await_close(rid),
() => reader.cancel("resource closed"),
() => {
PromisePrototypeCatch(reader.cancel("resource closed"), () => {});
},
),
() => {},
);
@ -884,7 +904,9 @@ function resourceForReadableStream(stream, length) {
);
// Trigger the first read
readableStreamReadFn(reader, sink);
PromisePrototypeCatch(readableStreamReadFn(reader, sink), (err) => {
PromisePrototypeCatch(reader.cancel(err), () => {});
});
return rid;
}

View file

@ -2,7 +2,7 @@
[package]
name = "deno_web"
version = "0.220.0"
version = "0.221.0"
authors.workspace = true
edition.workspace = true
license.workspace = true

File diff suppressed because it is too large Load diff

View file

@ -7,146 +7,17 @@
/// <reference path="./lib.deno_webgpu.d.ts" />
import { primordials } from "ext:core/mod.js";
import {
op_webgpu_surface_configure,
op_webgpu_surface_create,
op_webgpu_surface_get_current_texture,
op_webgpu_surface_present,
} from "ext:core/ops";
import { GPUCanvasContext, UnsafeWindowSurface } from "ext:core/ops";
const {
ObjectDefineProperty,
ObjectPrototypeIsPrototypeOf,
Symbol,
SymbolFor,
TypeError,
} = primordials;
import * as webidl from "ext:deno_webidl/00_webidl.js";
import { createFilteredInspectProxy } from "ext:deno_console/01_console.js";
import { loadWebGPU } from "ext:deno_webgpu/00_init.js";
const _surfaceRid = Symbol("[[surfaceRid]]");
const _configuration = Symbol("[[configuration]]");
const _canvas = Symbol("[[canvas]]");
const _currentTexture = Symbol("[[currentTexture]]");
const _present = Symbol("[[present]]");
const _dim = Symbol("[[dimensions]]");
class GPUCanvasContext {
/** @type {number} */
[_surfaceRid];
[_configuration];
[_canvas];
/** @type {GPUTexture | undefined} */
[_currentTexture];
[_dim];
get canvas() {
webidl.assertBranded(this, GPUCanvasContextPrototype);
return this[_canvas];
}
constructor() {
webidl.illegalConstructor();
}
configure(configuration) {
webidl.assertBranded(this, GPUCanvasContextPrototype);
const prefix = "Failed to execute 'configure' on 'GPUCanvasContext'";
webidl.requiredArguments(arguments.length, 1, { prefix });
configuration = webidl.converters.GPUCanvasConfiguration(configuration, {
prefix,
context: "Argument 1",
});
const { _device, assertDevice } = loadWebGPU();
this[_device] = configuration.device[_device];
this[_configuration] = configuration;
const device = assertDevice(this, {
prefix,
context: "configuration.device",
});
const { err } = op_webgpu_surface_configure({
surfaceRid: this[_surfaceRid],
deviceRid: device.rid,
format: configuration.format,
viewFormats: configuration.viewFormats,
usage: configuration.usage,
width: this[_dim].width,
height: this[_dim].height,
alphaMode: configuration.alphaMode,
});
device.pushError(err);
}
unconfigure() {
const { _device } = loadWebGPU();
webidl.assertBranded(this, GPUCanvasContextPrototype);
this[_configuration] = null;
this[_device] = null;
}
getCurrentTexture() {
webidl.assertBranded(this, GPUCanvasContextPrototype);
const prefix =
"Failed to execute 'getCurrentTexture' on 'GPUCanvasContext'";
if (this[_configuration] === null) {
throw new DOMException("Context is not configured", "InvalidStateError");
}
const { createGPUTexture, assertDevice } = loadWebGPU();
const device = assertDevice(this, { prefix, context: "this" });
if (this[_currentTexture]) {
return this[_currentTexture];
}
const { rid } = op_webgpu_surface_get_current_texture(
device.rid,
this[_surfaceRid],
);
const texture = createGPUTexture(
{
size: {
width: this[_dim].width,
height: this[_dim].height,
depthOrArrayLayers: 1,
},
mipLevelCount: 1,
sampleCount: 1,
dimension: "2d",
format: this[_configuration].format,
usage: this[_configuration].usage,
},
device,
rid,
);
device.trackResource(texture);
this[_currentTexture] = texture;
return texture;
}
// Required to present the texture; browser don't need this.
[_present]() {
const { assertDevice } = loadWebGPU();
webidl.assertBranded(this, GPUCanvasContextPrototype);
const prefix = "Failed to execute 'present' on 'GPUCanvasContext'";
const device = assertDevice(this[_currentTexture], {
prefix,
context: "this",
});
op_webgpu_surface_present(device.rid, this[_surfaceRid]);
this[_currentTexture].destroy();
this[_currentTexture] = undefined;
}
[SymbolFor("Deno.privateCustomInspect")](inspect, inspectOptions) {
ObjectDefineProperty(GPUCanvasContext, SymbolFor("Deno.privateCustomInspect"), {
__proto__: null,
value(inspect, inspectOptions) {
return inspect(
createFilteredInspectProxy({
object: this,
@ -157,60 +28,8 @@ class GPUCanvasContext {
}),
inspectOptions,
);
}
}
},
});
const GPUCanvasContextPrototype = GPUCanvasContext.prototype;
function createCanvasContext(options) {
// lazy load webgpu if needed
const canvasContext = webidl.createBranded(GPUCanvasContext);
canvasContext[_surfaceRid] = options.surfaceRid;
canvasContext[_canvas] = options.canvas;
canvasContext[_dim] = { width: options.width, height: options.height };
return canvasContext;
}
// External webgpu surfaces
// TODO(@littledivy): This will extend `OffscreenCanvas` when we add it.
class UnsafeWindowSurface {
#ctx;
#surfaceRid;
#options;
constructor(options) {
if (typeof options !== "object") {
throw new TypeError("options must be provided.");
}
if (
typeof options.width !== "number" || typeof options.height !== "number"
) {
throw new TypeError("width and height must be provided.");
}
this.#surfaceRid = op_webgpu_surface_create(
options.system,
options.windowHandle,
options.displayHandle,
);
this.#options = options;
}
getContext(context) {
if (context !== "webgpu") {
throw new TypeError("Only 'webgpu' context is supported");
}
this.#ctx = createCanvasContext({
surfaceRid: this.#surfaceRid,
...this.#options,
});
return this.#ctx;
}
present() {
this.#ctx[_present]();
}
}
export { GPUCanvasContext, UnsafeWindowSurface };

View file

@ -2,7 +2,7 @@
[package]
name = "deno_webgpu"
version = "0.156.0"
version = "0.157.0"
authors = ["the Deno authors"]
edition.workspace = true
license = "MIT"
@ -27,6 +27,9 @@ tokio = { workspace = true, features = ["full"] }
wgpu-types = { workspace = true, features = ["serde"] }
raw-window-handle = { workspace = true }
thiserror.workspace = true
indexmap.workspace = true
serde_json.workspace = true
deno_unsync.workspace = true
[target.'cfg(not(target_arch = "wasm32"))'.dependencies.wgpu-core]
workspace = true

478
ext/webgpu/adapter.rs Normal file
View file

@ -0,0 +1,478 @@
// Copyright 2018-2025 the Deno authors. MIT license.
use std::collections::HashSet;
use std::rc::Rc;
use std::sync::Arc;
use deno_core::cppgc::SameObject;
use deno_core::op2;
use deno_core::v8;
use deno_core::GarbageCollected;
use deno_core::OpState;
use deno_core::WebIDL;
use tokio::sync::Mutex;
use super::device::GPUDevice;
use crate::webidl::features_to_feature_names;
use crate::webidl::GPUFeatureName;
use crate::Instance;
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPURequestAdapterOptions {
pub power_preference: Option<GPUPowerPreference>,
#[webidl(default = false)]
pub force_fallback_adapter: bool,
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUPowerPreference {
LowPower,
HighPerformance,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
struct GPUDeviceDescriptor {
#[webidl(default = String::new())]
label: String,
#[webidl(default = vec![])]
required_features: Vec<GPUFeatureName>,
#[webidl(default = Default::default())]
#[options(enforce_range = true)]
required_limits: indexmap::IndexMap<String, Option<u64>>,
}
pub struct GPUAdapter {
pub instance: Instance,
pub id: wgpu_core::id::AdapterId,
pub features: SameObject<GPUSupportedFeatures>,
pub limits: SameObject<GPUSupportedLimits>,
pub info: Rc<SameObject<GPUAdapterInfo>>,
}
impl Drop for GPUAdapter {
fn drop(&mut self) {
self.instance.adapter_drop(self.id);
}
}
impl GarbageCollected for GPUAdapter {}
#[op2]
impl GPUAdapter {
#[getter]
#[global]
fn info(&self, scope: &mut v8::HandleScope) -> v8::Global<v8::Object> {
self.info.get(scope, |_| {
let info = self.instance.adapter_get_info(self.id);
let limits = self.instance.adapter_limits(self.id);
GPUAdapterInfo {
info,
subgroup_min_size: limits.min_subgroup_size,
subgroup_max_size: limits.max_subgroup_size,
}
})
}
#[getter]
#[global]
fn features(&self, scope: &mut v8::HandleScope) -> v8::Global<v8::Object> {
self.features.get(scope, |scope| {
let features = self.instance.adapter_features(self.id);
let features = features_to_feature_names(features);
GPUSupportedFeatures::new(scope, features)
})
}
#[getter]
#[global]
fn limits(&self, scope: &mut v8::HandleScope) -> v8::Global<v8::Object> {
self.limits.get(scope, |_| {
let adapter_limits = self.instance.adapter_limits(self.id);
GPUSupportedLimits(adapter_limits)
})
}
#[getter]
fn is_fallback_adapter(&self) -> bool {
// TODO(lucacasonato): report correctly from wgpu
false
}
#[async_method(fake)]
#[global]
fn request_device(
&self,
state: &mut OpState,
isolate_ptr: *mut v8::Isolate,
scope: &mut v8::HandleScope,
#[webidl] descriptor: GPUDeviceDescriptor,
) -> Result<v8::Global<v8::Value>, CreateDeviceError> {
let features = self.instance.adapter_features(self.id);
let supported_features = features_to_feature_names(features);
let required_features = descriptor
.required_features
.iter()
.cloned()
.collect::<HashSet<_>>();
if !required_features.is_subset(&supported_features) {
return Err(CreateDeviceError::RequiredFeaturesNotASubset);
}
let required_limits = serde_json::from_value(serde_json::to_value(
descriptor.required_limits,
)?)?;
let wgpu_descriptor = wgpu_types::DeviceDescriptor {
label: crate::transform_label(descriptor.label.clone()),
required_features: super::webidl::feature_names_to_features(
descriptor.required_features,
),
required_limits,
memory_hints: Default::default(),
};
let (device, queue) = self.instance.adapter_request_device(
self.id,
&wgpu_descriptor,
std::env::var("DENO_WEBGPU_TRACE")
.ok()
.as_ref()
.map(std::path::Path::new),
None,
None,
)?;
let (lost_sender, lost_receiver) = tokio::sync::oneshot::channel();
let (uncaptured_sender, mut uncaptured_receiver) =
tokio::sync::mpsc::unbounded_channel();
let (
uncaptured_sender_is_closed_sender,
mut uncaptured_sender_is_closed_receiver,
) = tokio::sync::oneshot::channel::<()>();
let device = GPUDevice {
instance: self.instance.clone(),
id: device,
queue,
label: descriptor.label,
queue_obj: SameObject::new(),
adapter_info: self.info.clone(),
error_handler: Arc::new(super::error::DeviceErrorHandler::new(
lost_sender,
uncaptured_sender,
uncaptured_sender_is_closed_sender,
)),
adapter: self.id,
lost_receiver: Mutex::new(Some(lost_receiver)),
limits: SameObject::new(),
features: SameObject::new(),
};
let device = deno_core::cppgc::make_cppgc_object(scope, device);
let event_target_setup = state.borrow::<crate::EventTargetSetup>();
let webidl_brand = v8::Local::new(scope, event_target_setup.brand.clone());
device.set(scope, webidl_brand, webidl_brand);
let set_event_target_data =
v8::Local::new(scope, event_target_setup.set_event_target_data.clone())
.cast::<v8::Function>();
let null = v8::null(scope);
set_event_target_data.call(scope, null.into(), &[device.into()]);
let key = v8::String::new(scope, "dispatchEvent").unwrap();
let val = device.get(scope, key.into()).unwrap();
let func = v8::Global::new(scope, val.try_cast::<v8::Function>().unwrap());
let device = v8::Global::new(scope, device.cast::<v8::Value>());
let error_event_class = state.borrow::<crate::ErrorEventClass>().0.clone();
let context = scope.get_current_context();
let context = v8::Global::new(scope, context);
let task_device = device.clone();
deno_unsync::spawn(async move {
loop {
// TODO(@crowlKats): check for uncaptured_receiver.is_closed instead once tokio is upgraded
if !matches!(
uncaptured_sender_is_closed_receiver.try_recv(),
Err(tokio::sync::oneshot::error::TryRecvError::Empty)
) {
break;
}
let Some(error) = uncaptured_receiver.recv().await else {
break;
};
// SAFETY: eh, it's safe
let isolate: &mut v8::Isolate = unsafe { &mut *isolate_ptr };
let scope = &mut v8::HandleScope::with_context(isolate, &context);
let error = deno_core::error::to_v8_error(scope, &error);
let error_event_class =
v8::Local::new(scope, error_event_class.clone());
let constructor =
v8::Local::<v8::Function>::try_from(error_event_class).unwrap();
let kind = v8::String::new(scope, "uncapturederror").unwrap();
let obj = v8::Object::new(scope);
let key = v8::String::new(scope, "error").unwrap();
obj.set(scope, key.into(), error);
let event = constructor
.new_instance(scope, &[kind.into(), obj.into()])
.unwrap();
let recv = v8::Local::new(scope, task_device.clone());
func.open(scope).call(scope, recv, &[event.into()]);
}
});
Ok(device)
}
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum CreateDeviceError {
#[class(type)]
#[error("requiredFeatures must be a subset of the adapter features")]
RequiredFeaturesNotASubset,
#[class(inherit)]
#[error(transparent)]
Serde(#[from] serde_json::Error),
#[class(type)]
#[error(transparent)]
Device(#[from] wgpu_core::instance::RequestDeviceError),
}
pub struct GPUSupportedLimits(pub wgpu_types::Limits);
impl GarbageCollected for GPUSupportedLimits {}
#[op2]
impl GPUSupportedLimits {
#[getter]
fn maxTextureDimension1D(&self) -> u32 {
self.0.max_texture_dimension_1d
}
#[getter]
fn maxTextureDimension2D(&self) -> u32 {
self.0.max_texture_dimension_2d
}
#[getter]
fn maxTextureDimension3D(&self) -> u32 {
self.0.max_texture_dimension_3d
}
#[getter]
fn maxTextureArrayLayers(&self) -> u32 {
self.0.max_texture_array_layers
}
#[getter]
fn maxBindGroups(&self) -> u32 {
self.0.max_bind_groups
}
// TODO(@crowlKats): support max_bind_groups_plus_vertex_buffers
#[getter]
fn maxBindingsPerBindGroup(&self) -> u32 {
self.0.max_bindings_per_bind_group
}
#[getter]
fn maxDynamicUniformBuffersPerPipelineLayout(&self) -> u32 {
self.0.max_dynamic_uniform_buffers_per_pipeline_layout
}
#[getter]
fn maxDynamicStorageBuffersPerPipelineLayout(&self) -> u32 {
self.0.max_dynamic_storage_buffers_per_pipeline_layout
}
#[getter]
fn maxSampledTexturesPerShaderStage(&self) -> u32 {
self.0.max_sampled_textures_per_shader_stage
}
#[getter]
fn maxSamplersPerShaderStage(&self) -> u32 {
self.0.max_samplers_per_shader_stage
}
#[getter]
fn maxStorageBuffersPerShaderStage(&self) -> u32 {
self.0.max_storage_buffers_per_shader_stage
}
#[getter]
fn maxStorageTexturesPerShaderStage(&self) -> u32 {
self.0.max_storage_textures_per_shader_stage
}
#[getter]
fn maxUniformBuffersPerShaderStage(&self) -> u32 {
self.0.max_uniform_buffers_per_shader_stage
}
#[getter]
fn maxUniformBufferBindingSize(&self) -> u32 {
self.0.max_uniform_buffer_binding_size
}
#[getter]
fn maxStorageBufferBindingSize(&self) -> u32 {
self.0.max_storage_buffer_binding_size
}
#[getter]
fn minUniformBufferOffsetAlignment(&self) -> u32 {
self.0.min_uniform_buffer_offset_alignment
}
#[getter]
fn minStorageBufferOffsetAlignment(&self) -> u32 {
self.0.min_storage_buffer_offset_alignment
}
#[getter]
fn maxVertexBuffers(&self) -> u32 {
self.0.max_vertex_buffers
}
#[getter]
#[number]
fn maxBufferSize(&self) -> u64 {
self.0.max_buffer_size
}
#[getter]
fn maxVertexAttributes(&self) -> u32 {
self.0.max_vertex_attributes
}
#[getter]
fn maxVertexBufferArrayStride(&self) -> u32 {
self.0.max_vertex_buffer_array_stride
}
// TODO(@crowlKats): support max_inter_stage_shader_variables
#[getter]
fn maxColorAttachments(&self) -> u32 {
self.0.max_color_attachments
}
#[getter]
fn maxColorAttachmentBytesPerSample(&self) -> u32 {
self.0.max_color_attachment_bytes_per_sample
}
#[getter]
fn maxComputeWorkgroupStorageSize(&self) -> u32 {
self.0.max_compute_workgroup_storage_size
}
#[getter]
fn maxComputeInvocationsPerWorkgroup(&self) -> u32 {
self.0.max_compute_invocations_per_workgroup
}
#[getter]
fn maxComputeWorkgroupSizeX(&self) -> u32 {
self.0.max_compute_workgroup_size_x
}
#[getter]
fn maxComputeWorkgroupSizeY(&self) -> u32 {
self.0.max_compute_workgroup_size_y
}
#[getter]
fn maxComputeWorkgroupSizeZ(&self) -> u32 {
self.0.max_compute_workgroup_size_z
}
#[getter]
fn maxComputeWorkgroupsPerDimension(&self) -> u32 {
self.0.max_compute_workgroups_per_dimension
}
}
pub struct GPUSupportedFeatures(v8::Global<v8::Value>);
impl GarbageCollected for GPUSupportedFeatures {}
impl GPUSupportedFeatures {
pub fn new(
scope: &mut v8::HandleScope,
features: HashSet<GPUFeatureName>,
) -> Self {
let set = v8::Set::new(scope);
for feature in features {
let key = v8::String::new(scope, feature.as_str()).unwrap();
set.add(scope, key.into());
}
Self(v8::Global::new(scope, <v8::Local<v8::Value>>::from(set)))
}
}
#[op2]
impl GPUSupportedFeatures {
#[global]
#[symbol("setlike_set")]
fn set(&self) -> v8::Global<v8::Value> {
self.0.clone()
}
}
pub struct GPUAdapterInfo {
pub info: wgpu_types::AdapterInfo,
pub subgroup_min_size: u32,
pub subgroup_max_size: u32,
}
impl GarbageCollected for GPUAdapterInfo {}
#[op2]
impl GPUAdapterInfo {
#[getter]
#[string]
fn vendor(&self) -> String {
self.info.vendor.to_string()
}
#[getter]
#[string]
fn architecture(&self) -> &'static str {
"" // TODO: wgpu#2170
}
#[getter]
#[string]
fn device(&self) -> String {
self.info.device.to_string()
}
#[getter]
#[string]
fn description(&self) -> String {
self.info.name.clone()
}
#[getter]
fn subgroup_min_size(&self) -> u32 {
self.subgroup_min_size
}
#[getter]
fn subgroup_max_size(&self) -> u32 {
self.subgroup_max_size
}
}

122
ext/webgpu/bind_group.rs Normal file
View file

@ -0,0 +1,122 @@
// Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use deno_core::cppgc::Ptr;
use deno_core::op2;
use deno_core::v8::HandleScope;
use deno_core::v8::Local;
use deno_core::v8::Value;
use deno_core::webidl::ContextFn;
use deno_core::webidl::WebIdlConverter;
use deno_core::webidl::WebIdlError;
use deno_core::webidl::WebIdlInterfaceConverter;
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use crate::buffer::GPUBuffer;
use crate::sampler::GPUSampler;
use crate::texture::GPUTextureView;
use crate::Instance;
pub struct GPUBindGroup {
pub instance: Instance,
pub id: wgpu_core::id::BindGroupId,
pub label: String,
}
impl Drop for GPUBindGroup {
fn drop(&mut self) {
self.instance.bind_group_drop(self.id);
}
}
impl WebIdlInterfaceConverter for GPUBindGroup {
const NAME: &'static str = "GPUBindGroup";
}
impl GarbageCollected for GPUBindGroup {}
#[op2]
impl GPUBindGroup {
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUBindGroupDescriptor {
#[webidl(default = String::new())]
pub label: String,
pub layout: Ptr<super::bind_group_layout::GPUBindGroupLayout>,
pub entries: Vec<GPUBindGroupEntry>,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUBindGroupEntry {
#[options(enforce_range = true)]
pub binding: u32,
pub resource: GPUBindingResource,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUBufferBinding {
pub buffer: Ptr<GPUBuffer>,
#[webidl(default = 0)]
#[options(enforce_range = true)]
pub offset: u64,
#[options(enforce_range = true)]
pub size: Option<u64>,
}
pub(crate) enum GPUBindingResource {
Sampler(Ptr<GPUSampler>),
TextureView(Ptr<GPUTextureView>),
BufferBinding(GPUBufferBinding),
}
impl<'a> WebIdlConverter<'a> for GPUBindingResource {
type Options = ();
fn convert<'b>(
scope: &mut HandleScope<'a>,
value: Local<'a, Value>,
prefix: Cow<'static, str>,
context: ContextFn<'b>,
options: &Self::Options,
) -> Result<Self, WebIdlError> {
<Ptr<GPUSampler>>::convert(
scope,
value,
prefix.clone(),
context.borrowed(),
options,
)
.map(Self::Sampler)
.or_else(|_| {
<Ptr<GPUTextureView>>::convert(
scope,
value,
prefix.clone(),
context.borrowed(),
options,
)
.map(Self::TextureView)
})
.or_else(|_| {
GPUBufferBinding::convert(scope, value, prefix, context, options)
.map(Self::BufferBinding)
})
}
}

View file

@ -0,0 +1,180 @@
// Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::op2;
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use crate::texture::GPUTextureViewDimension;
use crate::Instance;
pub struct GPUBindGroupLayout {
pub instance: Instance,
pub id: wgpu_core::id::BindGroupLayoutId,
pub label: String,
}
impl Drop for GPUBindGroupLayout {
fn drop(&mut self) {
self.instance.bind_group_layout_drop(self.id);
}
}
impl deno_core::webidl::WebIdlInterfaceConverter for GPUBindGroupLayout {
const NAME: &'static str = "GPUBindGroupLayout";
}
impl GarbageCollected for GPUBindGroupLayout {}
#[op2]
impl GPUBindGroupLayout {
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUBindGroupLayoutDescriptor {
#[webidl(default = String::new())]
pub label: String,
pub entries: Vec<GPUBindGroupLayoutEntry>,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUBindGroupLayoutEntry {
#[options(enforce_range = true)]
pub binding: u32,
#[options(enforce_range = true)]
pub visibility: u32,
pub buffer: Option<GPUBufferBindingLayout>,
pub sampler: Option<GPUSamplerBindingLayout>,
pub texture: Option<GPUTextureBindingLayout>,
pub storage_texture: Option<GPUStorageTextureBindingLayout>,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUBufferBindingLayout {
#[webidl(default = GPUBufferBindingType::Uniform)]
pub r#type: GPUBufferBindingType,
#[webidl(default = false)]
pub has_dynamic_offset: bool,
#[webidl(default = 0)]
pub min_binding_size: u64,
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUBufferBindingType {
Uniform,
Storage,
ReadOnlyStorage,
}
impl From<GPUBufferBindingType> for wgpu_types::BufferBindingType {
fn from(value: GPUBufferBindingType) -> Self {
match value {
GPUBufferBindingType::Uniform => Self::Uniform,
GPUBufferBindingType::Storage => Self::Storage { read_only: false },
GPUBufferBindingType::ReadOnlyStorage => {
Self::Storage { read_only: true }
}
}
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUSamplerBindingLayout {
#[webidl(default = GPUSamplerBindingType::Filtering)]
pub r#type: GPUSamplerBindingType,
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUSamplerBindingType {
Filtering,
NonFiltering,
Comparison,
}
impl From<GPUSamplerBindingType> for wgpu_types::SamplerBindingType {
fn from(value: GPUSamplerBindingType) -> Self {
match value {
GPUSamplerBindingType::Filtering => Self::Filtering,
GPUSamplerBindingType::NonFiltering => Self::NonFiltering,
GPUSamplerBindingType::Comparison => Self::Comparison,
}
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUTextureBindingLayout {
#[webidl(default = GPUTextureSampleType::Float)]
pub sample_type: GPUTextureSampleType,
#[webidl(default = GPUTextureViewDimension::D2)]
pub view_dimension: GPUTextureViewDimension,
#[webidl(default = false)]
pub multisampled: bool,
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUTextureSampleType {
Float,
UnfilterableFloat,
Depth,
Sint,
Uint,
}
impl From<GPUTextureSampleType> for wgpu_types::TextureSampleType {
fn from(value: GPUTextureSampleType) -> Self {
match value {
GPUTextureSampleType::Float => Self::Float { filterable: true },
GPUTextureSampleType::UnfilterableFloat => {
Self::Float { filterable: false }
}
GPUTextureSampleType::Depth => Self::Depth,
GPUTextureSampleType::Sint => Self::Sint,
GPUTextureSampleType::Uint => Self::Uint,
}
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUStorageTextureBindingLayout {
#[webidl(default = GPUStorageTextureAccess::WriteOnly)]
pub access: GPUStorageTextureAccess,
pub format: super::texture::GPUTextureFormat,
#[webidl(default = GPUTextureViewDimension::D2)]
pub view_dimension: GPUTextureViewDimension,
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUStorageTextureAccess {
WriteOnly,
ReadOnly,
ReadWrite,
}
impl From<GPUStorageTextureAccess> for wgpu_types::StorageTextureAccess {
fn from(value: GPUStorageTextureAccess) -> Self {
match value {
GPUStorageTextureAccess::WriteOnly => Self::WriteOnly,
GPUStorageTextureAccess::ReadOnly => Self::ReadOnly,
GPUStorageTextureAccess::ReadWrite => Self::ReadWrite,
}
}
}

View file

@ -1,323 +0,0 @@
// Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::rc::Rc;
use deno_core::error::ResourceError;
use deno_core::op2;
use deno_core::OpState;
use deno_core::Resource;
use deno_core::ResourceId;
use serde::Deserialize;
use super::error::WebGpuResult;
pub(crate) struct WebGpuBindGroupLayout(
pub(crate) crate::Instance,
pub(crate) wgpu_core::id::BindGroupLayoutId,
);
impl Resource for WebGpuBindGroupLayout {
fn name(&self) -> Cow<str> {
"webGPUBindGroupLayout".into()
}
fn close(self: Rc<Self>) {
gfx_select!(self.1 => self.0.bind_group_layout_drop(self.1));
}
}
pub(crate) struct WebGpuBindGroup(
pub(crate) crate::Instance,
pub(crate) wgpu_core::id::BindGroupId,
);
impl Resource for WebGpuBindGroup {
fn name(&self) -> Cow<str> {
"webGPUBindGroup".into()
}
fn close(self: Rc<Self>) {
gfx_select!(self.1 => self.0.bind_group_drop(self.1));
}
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct GpuBufferBindingLayout {
r#type: GpuBufferBindingType,
has_dynamic_offset: bool,
min_binding_size: u64,
}
#[derive(Deserialize)]
#[serde(rename_all = "kebab-case")]
enum GpuBufferBindingType {
Uniform,
Storage,
ReadOnlyStorage,
}
impl From<GpuBufferBindingType> for wgpu_types::BufferBindingType {
fn from(binding_type: GpuBufferBindingType) -> Self {
match binding_type {
GpuBufferBindingType::Uniform => wgpu_types::BufferBindingType::Uniform,
GpuBufferBindingType::Storage => {
wgpu_types::BufferBindingType::Storage { read_only: false }
}
GpuBufferBindingType::ReadOnlyStorage => {
wgpu_types::BufferBindingType::Storage { read_only: true }
}
}
}
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct GpuSamplerBindingLayout {
r#type: wgpu_types::SamplerBindingType,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct GpuTextureBindingLayout {
sample_type: GpuTextureSampleType,
view_dimension: wgpu_types::TextureViewDimension,
multisampled: bool,
}
#[derive(Deserialize)]
#[serde(rename_all = "kebab-case")]
enum GpuTextureSampleType {
Float,
UnfilterableFloat,
Depth,
Sint,
Uint,
}
impl From<GpuTextureSampleType> for wgpu_types::TextureSampleType {
fn from(sample_type: GpuTextureSampleType) -> Self {
match sample_type {
GpuTextureSampleType::Float => {
wgpu_types::TextureSampleType::Float { filterable: true }
}
GpuTextureSampleType::UnfilterableFloat => {
wgpu_types::TextureSampleType::Float { filterable: false }
}
GpuTextureSampleType::Depth => wgpu_types::TextureSampleType::Depth,
GpuTextureSampleType::Sint => wgpu_types::TextureSampleType::Sint,
GpuTextureSampleType::Uint => wgpu_types::TextureSampleType::Uint,
}
}
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct GpuStorageTextureBindingLayout {
access: wgpu_types::StorageTextureAccess,
format: wgpu_types::TextureFormat,
view_dimension: wgpu_types::TextureViewDimension,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct GpuBindGroupLayoutEntry {
binding: u32,
visibility: u32,
#[serde(flatten)]
binding_type: GpuBindingType,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
enum GpuBindingType {
Buffer(GpuBufferBindingLayout),
Sampler(GpuSamplerBindingLayout),
Texture(GpuTextureBindingLayout),
StorageTexture(GpuStorageTextureBindingLayout),
}
impl From<GpuBindingType> for wgpu_types::BindingType {
fn from(binding_type: GpuBindingType) -> wgpu_types::BindingType {
match binding_type {
GpuBindingType::Buffer(buffer) => wgpu_types::BindingType::Buffer {
ty: buffer.r#type.into(),
has_dynamic_offset: buffer.has_dynamic_offset,
min_binding_size: std::num::NonZeroU64::new(buffer.min_binding_size),
},
GpuBindingType::Sampler(sampler) => {
wgpu_types::BindingType::Sampler(sampler.r#type)
}
GpuBindingType::Texture(texture) => wgpu_types::BindingType::Texture {
sample_type: texture.sample_type.into(),
view_dimension: texture.view_dimension,
multisampled: texture.multisampled,
},
GpuBindingType::StorageTexture(storage_texture) => {
wgpu_types::BindingType::StorageTexture {
access: storage_texture.access,
format: storage_texture.format,
view_dimension: storage_texture.view_dimension,
}
}
}
}
}
#[op2]
#[serde]
pub fn op_webgpu_create_bind_group_layout(
state: &mut OpState,
#[smi] device_rid: ResourceId,
#[string] label: Cow<str>,
#[serde] entries: Vec<GpuBindGroupLayoutEntry>,
) -> Result<WebGpuResult, ResourceError> {
let instance = state.borrow::<super::Instance>();
let device_resource = state
.resource_table
.get::<super::WebGpuDevice>(device_rid)?;
let device = device_resource.1;
let entries = entries
.into_iter()
.map(|entry| {
wgpu_types::BindGroupLayoutEntry {
binding: entry.binding,
visibility: wgpu_types::ShaderStages::from_bits(entry.visibility)
.unwrap(),
ty: entry.binding_type.into(),
count: None, // native-only
}
})
.collect::<Vec<_>>();
let descriptor = wgpu_core::binding_model::BindGroupLayoutDescriptor {
label: Some(label),
entries: Cow::from(entries),
};
gfx_put!(device => instance.device_create_bind_group_layout(
device,
&descriptor,
None
) => state, WebGpuBindGroupLayout)
}
#[op2]
#[serde]
pub fn op_webgpu_create_pipeline_layout(
state: &mut OpState,
#[smi] device_rid: ResourceId,
#[string] label: Cow<str>,
#[serde] bind_group_layouts: Vec<u32>,
) -> Result<WebGpuResult, ResourceError> {
let instance = state.borrow::<super::Instance>();
let device_resource = state
.resource_table
.get::<super::WebGpuDevice>(device_rid)?;
let device = device_resource.1;
let bind_group_layouts = bind_group_layouts
.into_iter()
.map(|rid| {
let bind_group_layout =
state.resource_table.get::<WebGpuBindGroupLayout>(rid)?;
Ok(bind_group_layout.1)
})
.collect::<Result<Vec<_>, ResourceError>>()?;
let descriptor = wgpu_core::binding_model::PipelineLayoutDescriptor {
label: Some(label),
bind_group_layouts: Cow::from(bind_group_layouts),
push_constant_ranges: Default::default(),
};
gfx_put!(device => instance.device_create_pipeline_layout(
device,
&descriptor,
None
) => state, super::pipeline::WebGpuPipelineLayout)
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct GpuBindGroupEntry {
binding: u32,
kind: String,
resource: ResourceId,
offset: Option<u64>,
size: Option<u64>,
}
#[op2]
#[serde]
pub fn op_webgpu_create_bind_group(
state: &mut OpState,
#[smi] device_rid: ResourceId,
#[string] label: Cow<str>,
#[smi] layout: ResourceId,
#[serde] entries: Vec<GpuBindGroupEntry>,
) -> Result<WebGpuResult, ResourceError> {
let instance = state.borrow::<super::Instance>();
let device_resource = state
.resource_table
.get::<super::WebGpuDevice>(device_rid)?;
let device = device_resource.1;
let entries = entries
.into_iter()
.map(|entry| {
Ok(wgpu_core::binding_model::BindGroupEntry {
binding: entry.binding,
resource: match entry.kind.as_str() {
"GPUSampler" => {
let sampler_resource =
state
.resource_table
.get::<super::sampler::WebGpuSampler>(entry.resource)?;
wgpu_core::binding_model::BindingResource::Sampler(
sampler_resource.1,
)
}
"GPUTextureView" => {
let texture_view_resource =
state
.resource_table
.get::<super::texture::WebGpuTextureView>(entry.resource)?;
wgpu_core::binding_model::BindingResource::TextureView(
texture_view_resource.1,
)
}
"GPUBufferBinding" => {
let buffer_resource =
state
.resource_table
.get::<super::buffer::WebGpuBuffer>(entry.resource)?;
wgpu_core::binding_model::BindingResource::Buffer(
wgpu_core::binding_model::BufferBinding {
buffer_id: buffer_resource.1,
offset: entry.offset.unwrap_or(0),
size: std::num::NonZeroU64::new(entry.size.unwrap_or(0)),
},
)
}
_ => unreachable!(),
},
})
})
.collect::<Result<Vec<_>, ResourceError>>()?;
let bind_group_layout =
state.resource_table.get::<WebGpuBindGroupLayout>(layout)?;
let descriptor = wgpu_core::binding_model::BindGroupDescriptor {
label: Some(label),
layout: bind_group_layout.1,
entries: Cow::from(entries),
};
gfx_put!(device => instance.device_create_bind_group(
device,
&descriptor,
None
) => state, WebGpuBindGroup)
}

View file

@ -1,209 +1,265 @@
// Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::RefCell;
use std::rc::Rc;
use std::sync::Arc;
use std::sync::Mutex;
use std::time::Duration;
use deno_core::futures::channel::oneshot;
use deno_core::op2;
use deno_core::OpState;
use deno_core::Resource;
use deno_core::ResourceId;
use deno_core::v8;
use deno_core::webidl::WebIdlInterfaceConverter;
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_error::JsErrorBox;
use wgpu_core::device::HostMap as MapMode;
use super::error::WebGpuResult;
use crate::Instance;
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUBufferDescriptor {
#[webidl(default = String::new())]
pub label: String,
pub size: u64,
#[options(enforce_range = true)]
pub usage: u32,
#[webidl(default = false)]
pub mapped_at_creation: bool,
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum BufferError {
#[class(inherit)]
#[class(generic)]
#[error(transparent)]
Resource(
#[from]
#[inherit]
deno_core::error::ResourceError,
),
#[class(type)]
#[error("usage is not valid")]
InvalidUsage,
Canceled(#[from] oneshot::Canceled),
#[class("DOMExceptionOperationError")]
#[error(transparent)]
Access(wgpu_core::resource::BufferAccessError),
Access(#[from] wgpu_core::resource::BufferAccessError),
#[class("DOMExceptionOperationError")]
#[error("{0}")]
Operation(&'static str),
#[class(inherit)]
#[error(transparent)]
Other(#[from] JsErrorBox),
}
pub(crate) struct WebGpuBuffer(
pub(crate) super::Instance,
pub(crate) wgpu_core::id::BufferId,
);
impl Resource for WebGpuBuffer {
fn name(&self) -> Cow<str> {
"webGPUBuffer".into()
}
pub struct GPUBuffer {
pub instance: Instance,
pub error_handler: super::error::ErrorHandler,
fn close(self: Rc<Self>) {
gfx_select!(self.1 => self.0.buffer_drop(self.1, true));
pub id: wgpu_core::id::BufferId,
pub device: wgpu_core::id::DeviceId,
pub label: String,
pub size: u64,
pub usage: u32,
pub map_state: RefCell<&'static str>,
pub map_mode: RefCell<Option<MapMode>>,
pub mapped_js_buffers: RefCell<Vec<v8::Global<v8::ArrayBuffer>>>,
}
impl Drop for GPUBuffer {
fn drop(&mut self) {
self.instance.buffer_drop(self.id);
}
}
struct WebGpuBufferMapped(*mut u8, usize);
impl Resource for WebGpuBufferMapped {
fn name(&self) -> Cow<str> {
"webGPUBufferMapped".into()
}
impl WebIdlInterfaceConverter for GPUBuffer {
const NAME: &'static str = "GPUBuffer";
}
impl GarbageCollected for GPUBuffer {}
#[op2]
#[serde]
pub fn op_webgpu_create_buffer(
state: &mut OpState,
#[smi] device_rid: ResourceId,
#[string] label: Cow<str>,
#[number] size: u64,
usage: u32,
mapped_at_creation: bool,
) -> Result<WebGpuResult, BufferError> {
let instance = state.borrow::<super::Instance>();
let device_resource = state
.resource_table
.get::<super::WebGpuDevice>(device_rid)?;
let device = device_resource.1;
let descriptor = wgpu_core::resource::BufferDescriptor {
label: Some(label),
size,
usage: wgpu_types::BufferUsages::from_bits(usage)
.ok_or(BufferError::InvalidUsage)?,
mapped_at_creation,
};
gfx_put!(device => instance.device_create_buffer(
device,
&descriptor,
None
) => state, WebGpuBuffer)
}
#[op2(async)]
#[serde]
pub async fn op_webgpu_buffer_get_map_async(
state: Rc<RefCell<OpState>>,
#[smi] buffer_rid: ResourceId,
#[smi] device_rid: ResourceId,
mode: u32,
#[number] offset: u64,
#[number] size: u64,
) -> Result<WebGpuResult, BufferError> {
let device;
let done = Arc::new(Mutex::new(None));
{
let state_ = state.borrow();
let instance = state_.borrow::<super::Instance>();
let buffer_resource =
state_.resource_table.get::<WebGpuBuffer>(buffer_rid)?;
let buffer = buffer_resource.1;
let device_resource = state_
.resource_table
.get::<super::WebGpuDevice>(device_rid)?;
device = device_resource.1;
let done_ = done.clone();
let callback = Box::new(move |status| {
*done_.lock().unwrap() = Some(status);
});
let maybe_err = gfx_select!(buffer => instance.buffer_map_async(
buffer,
offset,
Some(size),
wgpu_core::resource::BufferMapOperation {
host: match mode {
1 => wgpu_core::device::HostMap::Read,
2 => wgpu_core::device::HostMap::Write,
_ => unreachable!(),
},
callback: Some(wgpu_core::resource::BufferMapCallback::from_rust(callback)),
}
))
.err();
if maybe_err.is_some() {
return Ok(WebGpuResult::maybe_err(maybe_err));
}
impl GPUBuffer {
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
loop {
let result = done.lock().unwrap().take();
match result {
Some(Ok(())) => return Ok(WebGpuResult::empty()),
Some(Err(e)) => return Err(BufferError::Access(e)),
None => {
#[getter]
#[number]
fn size(&self) -> u64 {
self.size
}
#[getter]
fn usage(&self) -> u32 {
self.usage
}
#[getter]
#[string]
fn map_state(&self) -> &'static str {
*self.map_state.borrow()
}
#[async_method]
async fn map_async(
&self,
#[webidl(options(enforce_range = true))] mode: u32,
#[webidl(default = 0)] offset: u64,
#[webidl] size: Option<u64>,
) -> Result<(), BufferError> {
let read_mode = (mode & 0x0001) == 0x0001;
let write_mode = (mode & 0x0002) == 0x0002;
if (read_mode && write_mode) || (!read_mode && !write_mode) {
return Err(BufferError::Operation(
"exactly one of READ or WRITE map mode must be set",
));
}
let mode = if read_mode {
MapMode::Read
} else {
assert!(write_mode);
MapMode::Write
};
{
*self.map_state.borrow_mut() = "pending";
}
let (sender, receiver) =
oneshot::channel::<wgpu_core::resource::BufferAccessResult>();
{
let callback = Box::new(move |status| {
sender.send(status).unwrap();
});
let err = self
.instance
.buffer_map_async(
self.id,
offset,
size,
wgpu_core::resource::BufferMapOperation {
host: mode,
callback: Some(callback),
},
)
.err();
if err.is_some() {
self.error_handler.push_error(err);
return Err(BufferError::Operation("validation error occurred"));
}
}
let done = Rc::new(RefCell::new(false));
let done_ = done.clone();
let device_poll_fut = async move {
while !*done.borrow() {
{
let state = state.borrow();
let instance = state.borrow::<super::Instance>();
gfx_select!(device => instance.device_poll(device, wgpu_types::Maintain::Poll)).unwrap();
self
.instance
.device_poll(self.device, wgpu_types::Maintain::wait())
.unwrap();
}
tokio::time::sleep(Duration::from_millis(10)).await;
}
}
}
}
#[op2]
#[serde]
pub fn op_webgpu_buffer_get_mapped_range(
state: &mut OpState,
#[smi] buffer_rid: ResourceId,
#[number] offset: u64,
#[number] size: Option<u64>,
#[buffer] buf: &mut [u8],
) -> Result<WebGpuResult, BufferError> {
let instance = state.borrow::<super::Instance>();
let buffer_resource = state.resource_table.get::<WebGpuBuffer>(buffer_rid)?;
let buffer = buffer_resource.1;
let (slice_pointer, range_size) =
gfx_select!(buffer => instance.buffer_get_mapped_range(
buffer,
offset,
size
))
.map_err(BufferError::Access)?;
// SAFETY: guarantee to be safe from wgpu
let slice = unsafe {
std::slice::from_raw_parts_mut(slice_pointer, range_size as usize)
};
buf.copy_from_slice(slice);
let rid = state
.resource_table
.add(WebGpuBufferMapped(slice_pointer, range_size as usize));
Ok(WebGpuResult::rid(rid))
}
#[op2]
#[serde]
pub fn op_webgpu_buffer_unmap(
state: &mut OpState,
#[smi] buffer_rid: ResourceId,
#[smi] mapped_rid: ResourceId,
#[buffer] buf: Option<&[u8]>,
) -> Result<WebGpuResult, BufferError> {
let mapped_resource = state
.resource_table
.take::<WebGpuBufferMapped>(mapped_rid)?;
let instance = state.borrow::<super::Instance>();
let buffer_resource = state.resource_table.get::<WebGpuBuffer>(buffer_rid)?;
let buffer = buffer_resource.1;
if let Some(buf) = buf {
// SAFETY: guarantee to be safe from wgpu
let slice = unsafe {
std::slice::from_raw_parts_mut(mapped_resource.0, mapped_resource.1)
Ok::<(), BufferError>(())
};
slice.copy_from_slice(buf);
let receiver_fut = async move {
receiver.await??;
let mut done = done_.borrow_mut();
*done = true;
Ok::<(), BufferError>(())
};
tokio::try_join!(device_poll_fut, receiver_fut)?;
*self.map_state.borrow_mut() = "mapped";
*self.map_mode.borrow_mut() = Some(mode);
Ok(())
}
gfx_ok!(buffer => instance.buffer_unmap(buffer))
fn get_mapped_range<'s>(
&self,
scope: &mut v8::HandleScope<'s>,
#[webidl(default = 0)] offset: u64,
#[webidl] size: Option<u64>,
) -> Result<v8::Local<'s, v8::ArrayBuffer>, BufferError> {
let (slice_pointer, range_size) = self
.instance
.buffer_get_mapped_range(self.id, offset, size)
.map_err(BufferError::Access)?;
let mode = self.map_mode.borrow();
let mode = mode.as_ref().unwrap();
let bs = if mode == &MapMode::Write {
unsafe extern "C" fn noop_deleter_callback(
_data: *mut std::ffi::c_void,
_byte_length: usize,
_deleter_data: *mut std::ffi::c_void,
) {
}
// SAFETY: creating a backing store from the pointer and length provided by wgpu
unsafe {
v8::ArrayBuffer::new_backing_store_from_ptr(
slice_pointer.as_ptr() as _,
range_size as usize,
noop_deleter_callback,
std::ptr::null_mut(),
)
}
} else {
// SAFETY: creating a vector from the pointer and length provided by wgpu
let slice = unsafe {
std::slice::from_raw_parts(slice_pointer.as_ptr(), range_size as usize)
};
v8::ArrayBuffer::new_backing_store_from_vec(slice.to_vec())
};
let shared_bs = bs.make_shared();
let ab = v8::ArrayBuffer::with_backing_store(scope, &shared_bs);
if mode == &MapMode::Write {
self
.mapped_js_buffers
.borrow_mut()
.push(v8::Global::new(scope, ab));
}
Ok(ab)
}
#[nofast]
fn unmap(&self, scope: &mut v8::HandleScope) -> Result<(), BufferError> {
for ab in self.mapped_js_buffers.replace(vec![]) {
let ab = ab.open(scope);
ab.detach(None);
}
self
.instance
.buffer_unmap(self.id)
.map_err(BufferError::Access)?;
*self.map_state.borrow_mut() = "unmapped";
Ok(())
}
#[fast]
fn destroy(&self) -> Result<(), JsErrorBox> {
self
.instance
.buffer_destroy(self.id)
.map_err(|e| JsErrorBox::generic(e.to_string()))
}
}

View file

@ -1,413 +0,0 @@
// Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::RefCell;
use std::rc::Rc;
use deno_core::error::ResourceError;
use deno_core::op2;
use deno_core::OpState;
use deno_core::Resource;
use deno_core::ResourceId;
use serde::Deserialize;
use super::error::WebGpuResult;
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum BundleError {
#[class(inherit)]
#[error(transparent)]
Resource(
#[from]
#[inherit]
ResourceError,
),
#[class(type)]
#[error("size must be larger than 0")]
InvalidSize,
}
struct WebGpuRenderBundleEncoder(
RefCell<wgpu_core::command::RenderBundleEncoder>,
);
impl Resource for WebGpuRenderBundleEncoder {
fn name(&self) -> Cow<str> {
"webGPURenderBundleEncoder".into()
}
}
pub(crate) struct WebGpuRenderBundle(
pub(crate) super::Instance,
pub(crate) wgpu_core::id::RenderBundleId,
);
impl Resource for WebGpuRenderBundle {
fn name(&self) -> Cow<str> {
"webGPURenderBundle".into()
}
fn close(self: Rc<Self>) {
gfx_select!(self.1 => self.0.render_bundle_drop(self.1));
}
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CreateRenderBundleEncoderArgs {
device_rid: ResourceId,
label: String,
color_formats: Vec<Option<wgpu_types::TextureFormat>>,
depth_stencil_format: Option<wgpu_types::TextureFormat>,
sample_count: u32,
depth_read_only: bool,
stencil_read_only: bool,
}
#[op2]
#[serde]
pub fn op_webgpu_create_render_bundle_encoder(
state: &mut OpState,
#[serde] args: CreateRenderBundleEncoderArgs,
) -> Result<WebGpuResult, ResourceError> {
let device_resource = state
.resource_table
.get::<super::WebGpuDevice>(args.device_rid)?;
let device = device_resource.1;
let depth_stencil = args.depth_stencil_format.map(|format| {
wgpu_types::RenderBundleDepthStencil {
format,
depth_read_only: args.depth_read_only,
stencil_read_only: args.stencil_read_only,
}
});
let descriptor = wgpu_core::command::RenderBundleEncoderDescriptor {
label: Some(Cow::Owned(args.label)),
color_formats: Cow::from(args.color_formats),
sample_count: args.sample_count,
depth_stencil,
multiview: None,
};
let res =
wgpu_core::command::RenderBundleEncoder::new(&descriptor, device, None);
let (render_bundle_encoder, maybe_err) = match res {
Ok(encoder) => (encoder, None),
Err(e) => (
wgpu_core::command::RenderBundleEncoder::dummy(device),
Some(e),
),
};
let rid = state
.resource_table
.add(WebGpuRenderBundleEncoder(RefCell::new(
render_bundle_encoder,
)));
Ok(WebGpuResult::rid_err(rid, maybe_err))
}
#[op2]
#[serde]
pub fn op_webgpu_render_bundle_encoder_finish(
state: &mut OpState,
#[smi] render_bundle_encoder_rid: ResourceId,
#[string] label: Cow<str>,
) -> Result<WebGpuResult, ResourceError> {
let render_bundle_encoder_resource =
state
.resource_table
.take::<WebGpuRenderBundleEncoder>(render_bundle_encoder_rid)?;
let render_bundle_encoder = Rc::try_unwrap(render_bundle_encoder_resource)
.ok()
.expect("unwrapping render_bundle_encoder_resource should succeed")
.0
.into_inner();
let instance = state.borrow::<super::Instance>();
gfx_put!(render_bundle_encoder.parent() => instance.render_bundle_encoder_finish(
render_bundle_encoder,
&wgpu_core::command::RenderBundleDescriptor {
label: Some(label),
},
None
) => state, WebGpuRenderBundle)
}
#[op2]
#[serde]
pub fn op_webgpu_render_bundle_encoder_set_bind_group(
state: &mut OpState,
#[smi] render_bundle_encoder_rid: ResourceId,
index: u32,
#[smi] bind_group: ResourceId,
#[buffer] dynamic_offsets_data: &[u32],
#[number] dynamic_offsets_data_start: usize,
#[number] dynamic_offsets_data_length: usize,
) -> Result<WebGpuResult, ResourceError> {
let bind_group_resource =
state
.resource_table
.get::<super::binding::WebGpuBindGroup>(bind_group)?;
let render_bundle_encoder_resource =
state
.resource_table
.get::<WebGpuRenderBundleEncoder>(render_bundle_encoder_rid)?;
let start = dynamic_offsets_data_start;
let len = dynamic_offsets_data_length;
// Assert that length and start are both in bounds
assert!(start <= dynamic_offsets_data.len());
assert!(len <= dynamic_offsets_data.len() - start);
let dynamic_offsets_data = &dynamic_offsets_data[start..start + len];
// SAFETY: the raw pointer and length are of the same slice, and that slice
// lives longer than the below function invocation.
unsafe {
wgpu_core::command::bundle_ffi::wgpu_render_bundle_set_bind_group(
&mut render_bundle_encoder_resource.0.borrow_mut(),
index,
bind_group_resource.1,
dynamic_offsets_data.as_ptr(),
dynamic_offsets_data.len(),
);
}
Ok(WebGpuResult::empty())
}
#[op2]
#[serde]
pub fn op_webgpu_render_bundle_encoder_push_debug_group(
state: &mut OpState,
#[smi] render_bundle_encoder_rid: ResourceId,
#[string] group_label: &str,
) -> Result<WebGpuResult, ResourceError> {
let render_bundle_encoder_resource =
state
.resource_table
.get::<WebGpuRenderBundleEncoder>(render_bundle_encoder_rid)?;
let label = std::ffi::CString::new(group_label).unwrap();
// SAFETY: the string the raw pointer points to lives longer than the below
// function invocation.
unsafe {
wgpu_core::command::bundle_ffi::wgpu_render_bundle_push_debug_group(
&mut render_bundle_encoder_resource.0.borrow_mut(),
label.as_ptr(),
);
}
Ok(WebGpuResult::empty())
}
#[op2]
#[serde]
pub fn op_webgpu_render_bundle_encoder_pop_debug_group(
state: &mut OpState,
#[smi] render_bundle_encoder_rid: ResourceId,
) -> Result<WebGpuResult, ResourceError> {
let render_bundle_encoder_resource =
state
.resource_table
.get::<WebGpuRenderBundleEncoder>(render_bundle_encoder_rid)?;
wgpu_core::command::bundle_ffi::wgpu_render_bundle_pop_debug_group(
&mut render_bundle_encoder_resource.0.borrow_mut(),
);
Ok(WebGpuResult::empty())
}
#[op2]
#[serde]
pub fn op_webgpu_render_bundle_encoder_insert_debug_marker(
state: &mut OpState,
#[smi] render_bundle_encoder_rid: ResourceId,
#[string] marker_label: &str,
) -> Result<WebGpuResult, ResourceError> {
let render_bundle_encoder_resource =
state
.resource_table
.get::<WebGpuRenderBundleEncoder>(render_bundle_encoder_rid)?;
let label = std::ffi::CString::new(marker_label).unwrap();
// SAFETY: the string the raw pointer points to lives longer than the below
// function invocation.
unsafe {
wgpu_core::command::bundle_ffi::wgpu_render_bundle_insert_debug_marker(
&mut render_bundle_encoder_resource.0.borrow_mut(),
label.as_ptr(),
);
}
Ok(WebGpuResult::empty())
}
#[op2]
#[serde]
pub fn op_webgpu_render_bundle_encoder_set_pipeline(
state: &mut OpState,
#[smi] render_bundle_encoder_rid: ResourceId,
#[smi] pipeline: ResourceId,
) -> Result<WebGpuResult, ResourceError> {
let render_pipeline_resource =
state
.resource_table
.get::<super::pipeline::WebGpuRenderPipeline>(pipeline)?;
let render_bundle_encoder_resource =
state
.resource_table
.get::<WebGpuRenderBundleEncoder>(render_bundle_encoder_rid)?;
wgpu_core::command::bundle_ffi::wgpu_render_bundle_set_pipeline(
&mut render_bundle_encoder_resource.0.borrow_mut(),
render_pipeline_resource.1,
);
Ok(WebGpuResult::empty())
}
#[op2]
#[serde]
pub fn op_webgpu_render_bundle_encoder_set_index_buffer(
state: &mut OpState,
#[smi] render_bundle_encoder_rid: ResourceId,
#[smi] buffer: ResourceId,
#[serde] index_format: wgpu_types::IndexFormat,
#[number] offset: u64,
#[number] size: u64,
) -> Result<WebGpuResult, BundleError> {
let buffer_resource = state
.resource_table
.get::<super::buffer::WebGpuBuffer>(buffer)?;
let render_bundle_encoder_resource =
state
.resource_table
.get::<WebGpuRenderBundleEncoder>(render_bundle_encoder_rid)?;
let size =
Some(std::num::NonZeroU64::new(size).ok_or(BundleError::InvalidSize)?);
render_bundle_encoder_resource
.0
.borrow_mut()
.set_index_buffer(buffer_resource.1, index_format, offset, size);
Ok(WebGpuResult::empty())
}
#[op2]
#[serde]
pub fn op_webgpu_render_bundle_encoder_set_vertex_buffer(
state: &mut OpState,
#[smi] render_bundle_encoder_rid: ResourceId,
slot: u32,
#[smi] buffer: ResourceId,
#[number] offset: u64,
#[number] size: Option<u64>,
) -> Result<WebGpuResult, BundleError> {
let buffer_resource = state
.resource_table
.get::<super::buffer::WebGpuBuffer>(buffer)?;
let render_bundle_encoder_resource =
state
.resource_table
.get::<WebGpuRenderBundleEncoder>(render_bundle_encoder_rid)?;
let size = if let Some(size) = size {
Some(std::num::NonZeroU64::new(size).ok_or(BundleError::InvalidSize)?)
} else {
None
};
wgpu_core::command::bundle_ffi::wgpu_render_bundle_set_vertex_buffer(
&mut render_bundle_encoder_resource.0.borrow_mut(),
slot,
buffer_resource.1,
offset,
size,
);
Ok(WebGpuResult::empty())
}
#[op2]
#[serde]
pub fn op_webgpu_render_bundle_encoder_draw(
state: &mut OpState,
#[smi] render_bundle_encoder_rid: ResourceId,
vertex_count: u32,
instance_count: u32,
first_vertex: u32,
first_instance: u32,
) -> Result<WebGpuResult, ResourceError> {
let render_bundle_encoder_resource =
state
.resource_table
.get::<WebGpuRenderBundleEncoder>(render_bundle_encoder_rid)?;
wgpu_core::command::bundle_ffi::wgpu_render_bundle_draw(
&mut render_bundle_encoder_resource.0.borrow_mut(),
vertex_count,
instance_count,
first_vertex,
first_instance,
);
Ok(WebGpuResult::empty())
}
#[op2]
#[serde]
pub fn op_webgpu_render_bundle_encoder_draw_indexed(
state: &mut OpState,
#[smi] render_bundle_encoder_rid: ResourceId,
index_count: u32,
instance_count: u32,
first_index: u32,
base_vertex: i32,
first_instance: u32,
) -> Result<WebGpuResult, ResourceError> {
let render_bundle_encoder_resource =
state
.resource_table
.get::<WebGpuRenderBundleEncoder>(render_bundle_encoder_rid)?;
wgpu_core::command::bundle_ffi::wgpu_render_bundle_draw_indexed(
&mut render_bundle_encoder_resource.0.borrow_mut(),
index_count,
instance_count,
first_index,
base_vertex,
first_instance,
);
Ok(WebGpuResult::empty())
}
#[op2]
#[serde]
pub fn op_webgpu_render_bundle_encoder_draw_indirect(
state: &mut OpState,
#[smi] render_bundle_encoder_rid: ResourceId,
#[smi] indirect_buffer: ResourceId,
#[number] indirect_offset: u64,
) -> Result<WebGpuResult, ResourceError> {
let buffer_resource = state
.resource_table
.get::<super::buffer::WebGpuBuffer>(indirect_buffer)?;
let render_bundle_encoder_resource =
state
.resource_table
.get::<WebGpuRenderBundleEncoder>(render_bundle_encoder_rid)?;
wgpu_core::command::bundle_ffi::wgpu_render_bundle_draw_indirect(
&mut render_bundle_encoder_resource.0.borrow_mut(),
buffer_resource.1,
indirect_offset,
);
Ok(WebGpuResult::empty())
}

View file

@ -1,5 +1,6 @@
// Copyright 2018-2025 the Deno authors. MIT license.
use std::cell::RefCell;
use std::ffi::c_void;
#[cfg(any(
target_os = "linux",
@ -9,11 +10,17 @@ use std::ffi::c_void;
))]
use std::ptr::NonNull;
use deno_core::cppgc::SameObject;
use deno_core::op2;
use deno_core::v8;
use deno_core::v8::Local;
use deno_core::v8::Value;
use deno_core::FromV8;
use deno_core::GarbageCollected;
use deno_core::OpState;
use deno_core::ResourceId;
use deno_error::JsErrorBox;
use crate::surface::WebGpuSurface;
use crate::surface::GPUCanvasContext;
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum ByowError {
@ -65,46 +72,173 @@ pub enum ByowError {
NSViewDisplay,
}
#[op2(fast)]
#[smi]
pub fn op_webgpu_surface_create(
state: &mut OpState,
#[string] system: &str,
p1: *const c_void,
p2: *const c_void,
) -> Result<ResourceId, ByowError> {
let instance = state
.try_borrow::<super::Instance>()
.ok_or(ByowError::WebGPUNotInitiated)?;
// Security note:
//
// The `p1` and `p2` parameters are pointers to platform-specific window
// handles.
//
// The code below works under the assumption that:
//
// - handles can only be created by the FFI interface which
// enforces --allow-ffi.
//
// - `*const c_void` deserizalizes null and v8::External.
//
// - Only FFI can export v8::External to user code.
if p1.is_null() {
return Err(ByowError::InvalidParameters);
// TODO(@littledivy): This will extend `OffscreenCanvas` when we add it.
pub struct UnsafeWindowSurface {
pub id: wgpu_core::id::SurfaceId,
pub width: RefCell<u32>,
pub height: RefCell<u32>,
pub context: SameObject<GPUCanvasContext>,
}
impl GarbageCollected for UnsafeWindowSurface {}
#[op2]
impl UnsafeWindowSurface {
#[constructor]
#[cppgc]
fn new(
state: &mut OpState,
#[from_v8] options: UnsafeWindowSurfaceOptions,
) -> Result<UnsafeWindowSurface, ByowError> {
let instance = state
.try_borrow::<super::Instance>()
.ok_or(ByowError::WebGPUNotInitiated)?;
// Security note:
//
// The `window_handle` and `display_handle` options are pointers to
// platform-specific window handles.
//
// The code below works under the assumption that:
//
// - handles can only be created by the FFI interface which
// enforces --allow-ffi.
//
// - `*const c_void` deserizalizes null and v8::External.
//
// - Only FFI can export v8::External to user code.
if options.window_handle.is_null() {
return Err(ByowError::InvalidParameters);
}
let (win_handle, display_handle) = raw_window(
options.system,
options.window_handle,
options.display_handle,
)?;
// SAFETY: see above comment
let id = unsafe {
instance
.instance_create_surface(display_handle, win_handle, None)
.map_err(ByowError::CreateSurface)?
};
Ok(UnsafeWindowSurface {
id,
width: RefCell::new(options.width),
height: RefCell::new(options.height),
context: SameObject::new(),
})
}
let (win_handle, display_handle) = raw_window(system, p1, p2)?;
// SAFETY: see above comment
let surface = unsafe {
instance
.instance_create_surface(display_handle, win_handle, None)
.map_err(ByowError::CreateSurface)?
};
#[global]
fn get_context(
&self,
#[this] this: v8::Global<v8::Object>,
scope: &mut v8::HandleScope,
) -> v8::Global<v8::Object> {
self.context.get(scope, |_| GPUCanvasContext {
surface_id: self.id,
width: self.width.clone(),
height: self.height.clone(),
config: RefCell::new(None),
texture: RefCell::new(None),
canvas: this,
})
}
let rid = state
.resource_table
.add(WebGpuSurface(instance.clone(), surface));
Ok(rid)
#[nofast]
fn present(&self, scope: &mut v8::HandleScope) -> Result<(), JsErrorBox> {
let Some(context) = self.context.try_unwrap(scope) else {
return Err(JsErrorBox::type_error("getContext was never called"));
};
context.present().map_err(JsErrorBox::from_err)
}
}
struct UnsafeWindowSurfaceOptions {
system: UnsafeWindowSurfaceSystem,
window_handle: *const c_void,
display_handle: *const c_void,
width: u32,
height: u32,
}
#[derive(Eq, PartialEq)]
enum UnsafeWindowSurfaceSystem {
Cocoa,
Win32,
X11,
Wayland,
}
impl<'a> FromV8<'a> for UnsafeWindowSurfaceOptions {
type Error = JsErrorBox;
fn from_v8(
scope: &mut v8::HandleScope<'a>,
value: Local<'a, Value>,
) -> Result<Self, Self::Error> {
let obj = value
.try_cast::<v8::Object>()
.map_err(|_| JsErrorBox::type_error("is not an object"))?;
let key = v8::String::new(scope, "system").unwrap();
let val = obj
.get(scope, key.into())
.ok_or_else(|| JsErrorBox::type_error("missing field 'system'"))?;
let s = String::from_v8(scope, val).unwrap();
let system = match s.as_str() {
"cocoa" => UnsafeWindowSurfaceSystem::Cocoa,
"win32" => UnsafeWindowSurfaceSystem::Win32,
"x11" => UnsafeWindowSurfaceSystem::X11,
"wayland" => UnsafeWindowSurfaceSystem::Wayland,
_ => {
return Err(JsErrorBox::type_error(format!(
"Invalid system kind '{s}'"
)))
}
};
let key = v8::String::new(scope, "windowHandle").unwrap();
let val = obj
.get(scope, key.into())
.ok_or_else(|| JsErrorBox::type_error("missing field 'windowHandle'"))?;
let Some(window_handle) = deno_core::_ops::to_external_option(&val) else {
return Err(JsErrorBox::type_error("expected external"));
};
let key = v8::String::new(scope, "displayHandle").unwrap();
let val = obj
.get(scope, key.into())
.ok_or_else(|| JsErrorBox::type_error("missing field 'displayHandle'"))?;
let Some(display_handle) = deno_core::_ops::to_external_option(&val) else {
return Err(JsErrorBox::type_error("expected external"));
};
let key = v8::String::new(scope, "width").unwrap();
let val = obj
.get(scope, key.into())
.ok_or_else(|| JsErrorBox::type_error("missing field 'width'"))?;
let width = deno_core::convert::Number::<u32>::from_v8(scope, val)?.0;
let key = v8::String::new(scope, "height").unwrap();
let val = obj
.get(scope, key.into())
.ok_or_else(|| JsErrorBox::type_error("missing field 'height'"))?;
let height = deno_core::convert::Number::<u32>::from_v8(scope, val)?.0;
Ok(Self {
system,
window_handle,
display_handle,
width,
height,
})
}
}
type RawHandles = (
@ -114,11 +248,11 @@ type RawHandles = (
#[cfg(target_os = "macos")]
fn raw_window(
system: &str,
system: UnsafeWindowSurfaceSystem,
_ns_window: *const c_void,
ns_view: *const c_void,
) -> Result<RawHandles, ByowError> {
if system != "cocoa" {
if system != UnsafeWindowSurfaceSystem::Cocoa {
return Err(ByowError::InvalidSystem);
}
@ -136,12 +270,12 @@ fn raw_window(
#[cfg(target_os = "windows")]
fn raw_window(
system: &str,
system: UnsafeWindowSurfaceSystem,
window: *const c_void,
hinstance: *const c_void,
) -> Result<RawHandles, ByowError> {
use raw_window_handle::WindowsDisplayHandle;
if system != "win32" {
if system != UnsafeWindowSurfaceSystem::Win32 {
return Err(ByowError::InvalidSystem);
}
@ -162,12 +296,12 @@ fn raw_window(
#[cfg(any(target_os = "linux", target_os = "freebsd", target_os = "openbsd"))]
fn raw_window(
system: &str,
system: UnsafeWindowSurfaceSystem,
window: *const c_void,
display: *const c_void,
) -> Result<RawHandles, ByowError> {
let (win_handle, display_handle);
if system == "x11" {
if system == UnsafeWindowSurfaceSystem::X11 {
win_handle = raw_window_handle::RawWindowHandle::Xlib(
raw_window_handle::XlibWindowHandle::new(window as *mut c_void as _),
);
@ -178,7 +312,7 @@ fn raw_window(
0,
),
);
} else if system == "wayland" {
} else if system == UnsafeWindowSurfaceSystem::Wayland {
win_handle = raw_window_handle::RawWindowHandle::Wayland(
raw_window_handle::WaylandWindowHandle::new(
NonNull::new(window as *mut c_void).ok_or(ByowError::NullWindow)?,
@ -205,7 +339,7 @@ fn raw_window(
target_os = "openbsd",
)))]
fn raw_window(
_system: &str,
_system: UnsafeWindowSurfaceSystem,
_window: *const c_void,
_display: *const c_void,
) -> Result<RawHandles, deno_error::JsErrorBox> {

View file

@ -0,0 +1,52 @@
// Copyright 2018-2025 the Deno authors. MIT license.
use std::cell::OnceCell;
use deno_core::op2;
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use crate::Instance;
pub struct GPUCommandBuffer {
pub instance: Instance,
pub id: wgpu_core::id::CommandBufferId,
pub label: String,
pub consumed: OnceCell<()>,
}
impl Drop for GPUCommandBuffer {
fn drop(&mut self) {
if self.consumed.get().is_none() {
self.instance.command_buffer_drop(self.id);
}
}
}
impl deno_core::webidl::WebIdlInterfaceConverter for GPUCommandBuffer {
const NAME: &'static str = "GPUCommandBuffer";
}
impl GarbageCollected for GPUCommandBuffer {}
#[op2]
impl GPUCommandBuffer {
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUCommandBufferDescriptor {
#[webidl(default = String::new())]
pub label: String,
}

View file

@ -2,633 +2,394 @@
use std::borrow::Cow;
use std::cell::RefCell;
use std::rc::Rc;
use deno_core::error::ResourceError;
use deno_core::cppgc::Ptr;
use deno_core::op2;
use deno_core::OpState;
use deno_core::Resource;
use deno_core::ResourceId;
use serde::Deserialize;
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_error::JsErrorBox;
use wgpu_core::command::PassChannel;
use wgpu_types::TexelCopyBufferInfo;
use super::error::WebGpuResult;
use crate::WebGpuQuerySet;
use crate::buffer::GPUBuffer;
use crate::command_buffer::GPUCommandBuffer;
use crate::compute_pass::GPUComputePassEncoder;
use crate::queue::GPUTexelCopyTextureInfo;
use crate::render_pass::GPULoadOp;
use crate::render_pass::GPURenderPassEncoder;
use crate::webidl::GPUExtent3D;
use crate::Instance;
pub(crate) struct WebGpuCommandEncoder(
pub(crate) super::Instance,
pub(crate) wgpu_core::id::CommandEncoderId, // TODO: should maybe be option?
);
impl Resource for WebGpuCommandEncoder {
fn name(&self) -> Cow<str> {
"webGPUCommandEncoder".into()
}
pub struct GPUCommandEncoder {
pub instance: Instance,
pub error_handler: super::error::ErrorHandler,
fn close(self: Rc<Self>) {
gfx_select!(self.1 => self.0.command_encoder_drop(self.1));
pub id: wgpu_core::id::CommandEncoderId,
pub label: String,
}
impl Drop for GPUCommandEncoder {
fn drop(&mut self) {
self.instance.command_encoder_drop(self.id);
}
}
pub(crate) struct WebGpuCommandBuffer(
pub(crate) super::Instance,
pub(crate) RefCell<Option<wgpu_core::id::CommandBufferId>>,
);
impl Resource for WebGpuCommandBuffer {
fn name(&self) -> Cow<str> {
"webGPUCommandBuffer".into()
}
fn close(self: Rc<Self>) {
if let Some(id) = *self.1.borrow() {
gfx_select!(id => self.0.command_buffer_drop(id));
}
}
}
impl GarbageCollected for GPUCommandEncoder {}
#[op2]
#[serde]
pub fn op_webgpu_create_command_encoder(
state: &mut OpState,
#[smi] device_rid: ResourceId,
#[string] label: Cow<str>,
) -> Result<WebGpuResult, ResourceError> {
let instance = state.borrow::<super::Instance>();
let device_resource = state
.resource_table
.get::<super::WebGpuDevice>(device_rid)?;
let device = device_resource.1;
impl GPUCommandEncoder {
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
let descriptor = wgpu_types::CommandEncoderDescriptor { label: Some(label) };
gfx_put!(device => instance.device_create_command_encoder(
device,
&descriptor,
None
) => state, WebGpuCommandEncoder)
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct GpuRenderPassColorAttachment {
view: ResourceId,
resolve_target: Option<ResourceId>,
clear_value: Option<wgpu_types::Color>,
load_op: wgpu_core::command::LoadOp,
store_op: wgpu_core::command::StoreOp,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct GpuRenderPassDepthStencilAttachment {
view: ResourceId,
depth_clear_value: Option<f32>,
depth_load_op: Option<wgpu_core::command::LoadOp>,
depth_store_op: Option<wgpu_core::command::StoreOp>,
depth_read_only: bool,
stencil_clear_value: u32,
stencil_load_op: Option<wgpu_core::command::LoadOp>,
stencil_store_op: Option<wgpu_core::command::StoreOp>,
stencil_read_only: bool,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct GPURenderPassTimestampWrites {
query_set: ResourceId,
beginning_of_pass_write_index: Option<u32>,
end_of_pass_write_index: Option<u32>,
}
#[op2]
#[serde]
pub fn op_webgpu_command_encoder_begin_render_pass(
state: &mut OpState,
#[smi] command_encoder_rid: ResourceId,
#[string] label: Cow<str>,
#[serde] color_attachments: Vec<Option<GpuRenderPassColorAttachment>>,
#[serde] depth_stencil_attachment: Option<
GpuRenderPassDepthStencilAttachment,
>,
#[smi] occlusion_query_set: Option<ResourceId>,
#[serde] timestamp_writes: Option<GPURenderPassTimestampWrites>,
) -> Result<WebGpuResult, ResourceError> {
let command_encoder_resource = state
.resource_table
.get::<WebGpuCommandEncoder>(command_encoder_rid)?;
let color_attachments = color_attachments
.into_iter()
.map(|color_attachment| {
let rp_at = if let Some(at) = color_attachment.as_ref() {
let texture_view_resource =
state
.resource_table
.get::<super::texture::WebGpuTextureView>(at.view)?;
let resolve_target = at
.resolve_target
.map(|rid| {
state
.resource_table
.get::<super::texture::WebGpuTextureView>(rid)
#[required(1)]
#[cppgc]
fn begin_render_pass(
&self,
#[webidl] descriptor: crate::render_pass::GPURenderPassDescriptor,
) -> Result<GPURenderPassEncoder, JsErrorBox> {
let color_attachments = Cow::Owned(
descriptor
.color_attachments
.into_iter()
.map(|attachment| {
attachment.into_option().map(|attachment| {
wgpu_core::command::RenderPassColorAttachment {
view: attachment.view.id,
resolve_target: attachment.resolve_target.map(|target| target.id),
load_op: attachment
.load_op
.with_default_value(attachment.clear_value.map(Into::into)),
store_op: attachment.store_op.into(),
}
})
.transpose()?
.map(|texture| texture.1);
})
.collect::<Vec<_>>(),
);
Some(wgpu_core::command::RenderPassColorAttachment {
view: texture_view_resource.1,
resolve_target,
channel: wgpu_core::command::PassChannel {
load_op: at.load_op,
store_op: at.store_op,
clear_value: at.clear_value.unwrap_or_default(),
read_only: false,
let depth_stencil_attachment =
descriptor.depth_stencil_attachment.map(|attachment| {
if attachment.depth_load_op.as_ref().is_some_and(|op| matches!(op, GPULoadOp::Clear)) && attachment.depth_clear_value.is_none() {
return Err(JsErrorBox::type_error(r#"'depthClearValue' must be specified when 'depthLoadOp' is "clear""#));
}
Ok(wgpu_core::command::RenderPassDepthStencilAttachment {
view: attachment.view.id,
depth: PassChannel {
load_op: attachment.depth_load_op.map(|load_op| load_op.with_value(attachment.depth_clear_value)),
store_op: attachment.depth_store_op.map(Into::into),
read_only: attachment.depth_read_only,
},
stencil: PassChannel {
load_op: attachment.stencil_load_op.map(|load_op| load_op.with_value(Some(attachment.stencil_clear_value))),
store_op: attachment.stencil_store_op.map(Into::into),
read_only: attachment.stencil_read_only,
},
})
} else {
None
};
Ok(rp_at)
})
.collect::<Result<Vec<_>, ResourceError>>()?;
}).transpose()?;
let mut processed_depth_stencil_attachment = None;
if let Some(attachment) = depth_stencil_attachment {
let texture_view_resource =
state
.resource_table
.get::<super::texture::WebGpuTextureView>(attachment.view)?;
processed_depth_stencil_attachment =
Some(wgpu_core::command::RenderPassDepthStencilAttachment {
view: texture_view_resource.1,
depth: wgpu_core::command::PassChannel {
load_op: attachment
.depth_load_op
.unwrap_or(wgpu_core::command::LoadOp::Load),
store_op: attachment
.depth_store_op
.unwrap_or(wgpu_core::command::StoreOp::Store),
// In "01_webgpu.js", `depthLoadOp` is cheked to ensure its value is not "clear"
// when `depthClearValue` is undefined, so the default 0.0 doesn't matter.
clear_value: attachment.depth_clear_value.unwrap_or(0.0),
read_only: attachment.depth_read_only,
},
stencil: wgpu_core::command::PassChannel {
load_op: attachment
.stencil_load_op
.unwrap_or(wgpu_core::command::LoadOp::Load),
store_op: attachment
.stencil_store_op
.unwrap_or(wgpu_core::command::StoreOp::Store),
clear_value: attachment.stencil_clear_value,
read_only: attachment.stencil_read_only,
},
let timestamp_writes =
descriptor.timestamp_writes.map(|timestamp_writes| {
wgpu_core::command::PassTimestampWrites {
query_set: timestamp_writes.query_set.id,
beginning_of_pass_write_index: timestamp_writes
.beginning_of_pass_write_index,
end_of_pass_write_index: timestamp_writes.end_of_pass_write_index,
}
});
let wgpu_descriptor = wgpu_core::command::RenderPassDescriptor {
label: crate::transform_label(descriptor.label.clone()),
color_attachments,
depth_stencil_attachment: depth_stencil_attachment.as_ref(),
timestamp_writes: timestamp_writes.as_ref(),
occlusion_query_set: descriptor
.occlusion_query_set
.map(|query_set| query_set.id),
};
let (render_pass, err) = self
.instance
.command_encoder_create_render_pass(self.id, &wgpu_descriptor);
self.error_handler.push_error(err);
Ok(GPURenderPassEncoder {
instance: self.instance.clone(),
error_handler: self.error_handler.clone(),
render_pass: RefCell::new(render_pass),
label: descriptor.label,
})
}
let timestamp_writes = if let Some(timestamp_writes) = timestamp_writes {
let query_set_resource = state
.resource_table
.get::<WebGpuQuerySet>(timestamp_writes.query_set)?;
let query_set = query_set_resource.1;
#[cppgc]
fn begin_compute_pass(
&self,
#[webidl] descriptor: crate::compute_pass::GPUComputePassDescriptor,
) -> GPUComputePassEncoder {
let timestamp_writes =
descriptor.timestamp_writes.map(|timestamp_writes| {
wgpu_core::command::PassTimestampWrites {
query_set: timestamp_writes.query_set.id,
beginning_of_pass_write_index: timestamp_writes
.beginning_of_pass_write_index,
end_of_pass_write_index: timestamp_writes.end_of_pass_write_index,
}
});
Some(wgpu_core::command::RenderPassTimestampWrites {
query_set,
beginning_of_pass_write_index: timestamp_writes
.beginning_of_pass_write_index,
end_of_pass_write_index: timestamp_writes.end_of_pass_write_index,
})
} else {
None
};
let wgpu_descriptor = wgpu_core::command::ComputePassDescriptor {
label: crate::transform_label(descriptor.label.clone()),
timestamp_writes: timestamp_writes.as_ref(),
};
let occlusion_query_set_resource = occlusion_query_set
.map(|rid| state.resource_table.get::<WebGpuQuerySet>(rid))
.transpose()?
.map(|query_set| query_set.1);
let (compute_pass, err) = self
.instance
.command_encoder_create_compute_pass(self.id, &wgpu_descriptor);
let descriptor = wgpu_core::command::RenderPassDescriptor {
label: Some(label),
color_attachments: Cow::from(color_attachments),
depth_stencil_attachment: processed_depth_stencil_attachment.as_ref(),
timestamp_writes: timestamp_writes.as_ref(),
occlusion_query_set: occlusion_query_set_resource,
};
self.error_handler.push_error(err);
let render_pass = wgpu_core::command::RenderPass::new(
command_encoder_resource.1,
&descriptor,
);
GPUComputePassEncoder {
instance: self.instance.clone(),
error_handler: self.error_handler.clone(),
compute_pass: RefCell::new(compute_pass),
label: descriptor.label,
}
}
let rid = state
.resource_table
.add(super::render_pass::WebGpuRenderPass(RefCell::new(
render_pass,
)));
#[required(5)]
fn copy_buffer_to_buffer(
&self,
#[webidl] source: Ptr<GPUBuffer>,
#[webidl(options(enforce_range = true))] source_offset: u64,
#[webidl] destination: Ptr<GPUBuffer>,
#[webidl(options(enforce_range = true))] destination_offset: u64,
#[webidl(options(enforce_range = true))] size: u64,
) {
let err = self
.instance
.command_encoder_copy_buffer_to_buffer(
self.id,
source.id,
source_offset,
destination.id,
destination_offset,
size,
)
.err();
Ok(WebGpuResult::rid(rid))
self.error_handler.push_error(err);
}
#[required(3)]
fn copy_buffer_to_texture(
&self,
#[webidl] source: GPUTexelCopyBufferInfo,
#[webidl] destination: GPUTexelCopyTextureInfo,
#[webidl] copy_size: GPUExtent3D,
) {
let source = TexelCopyBufferInfo {
buffer: source.buffer.id,
layout: wgpu_types::TexelCopyBufferLayout {
offset: source.offset,
bytes_per_row: source.bytes_per_row,
rows_per_image: source.rows_per_image,
},
};
let destination = wgpu_types::TexelCopyTextureInfo {
texture: destination.texture.id,
mip_level: destination.mip_level,
origin: destination.origin.into(),
aspect: destination.aspect.into(),
};
let err = self
.instance
.command_encoder_copy_buffer_to_texture(
self.id,
&source,
&destination,
&copy_size.into(),
)
.err();
self.error_handler.push_error(err);
}
#[required(3)]
fn copy_texture_to_buffer(
&self,
#[webidl] source: GPUTexelCopyTextureInfo,
#[webidl] destination: GPUTexelCopyBufferInfo,
#[webidl] copy_size: GPUExtent3D,
) {
let source = wgpu_types::TexelCopyTextureInfo {
texture: source.texture.id,
mip_level: source.mip_level,
origin: source.origin.into(),
aspect: source.aspect.into(),
};
let destination = TexelCopyBufferInfo {
buffer: destination.buffer.id,
layout: wgpu_types::TexelCopyBufferLayout {
offset: destination.offset,
bytes_per_row: destination.bytes_per_row,
rows_per_image: destination.rows_per_image,
},
};
let err = self
.instance
.command_encoder_copy_texture_to_buffer(
self.id,
&source,
&destination,
&copy_size.into(),
)
.err();
self.error_handler.push_error(err);
}
#[required(3)]
fn copy_texture_to_texture(
&self,
#[webidl] source: GPUTexelCopyTextureInfo,
#[webidl] destination: GPUTexelCopyTextureInfo,
#[webidl] copy_size: GPUExtent3D,
) {
let source = wgpu_types::TexelCopyTextureInfo {
texture: source.texture.id,
mip_level: source.mip_level,
origin: source.origin.into(),
aspect: source.aspect.into(),
};
let destination = wgpu_types::TexelCopyTextureInfo {
texture: destination.texture.id,
mip_level: destination.mip_level,
origin: destination.origin.into(),
aspect: destination.aspect.into(),
};
let err = self
.instance
.command_encoder_copy_texture_to_texture(
self.id,
&source,
&destination,
&copy_size.into(),
)
.err();
self.error_handler.push_error(err);
}
#[required(1)]
fn clear_buffer(
&self,
#[webidl] buffer: Ptr<GPUBuffer>,
#[webidl(default = 0, options(enforce_range = true))] offset: u64,
#[webidl(options(enforce_range = true))] size: Option<u64>,
) {
let err = self
.instance
.command_encoder_clear_buffer(self.id, buffer.id, offset, size)
.err();
self.error_handler.push_error(err);
}
#[required(5)]
fn resolve_query_set(
&self,
#[webidl] query_set: Ptr<super::query_set::GPUQuerySet>,
#[webidl(options(enforce_range = true))] first_query: u32,
#[webidl(options(enforce_range = true))] query_count: u32,
#[webidl] destination: Ptr<GPUBuffer>,
#[webidl(options(enforce_range = true))] destination_offset: u64,
) {
let err = self
.instance
.command_encoder_resolve_query_set(
self.id,
query_set.id,
first_query,
query_count,
destination.id,
destination_offset,
)
.err();
self.error_handler.push_error(err);
}
#[cppgc]
fn finish(
&self,
#[webidl] descriptor: crate::command_buffer::GPUCommandBufferDescriptor,
) -> GPUCommandBuffer {
let wgpu_descriptor = wgpu_types::CommandBufferDescriptor {
label: crate::transform_label(descriptor.label.clone()),
};
let (id, err) = self
.instance
.command_encoder_finish(self.id, &wgpu_descriptor);
self.error_handler.push_error(err);
GPUCommandBuffer {
instance: self.instance.clone(),
id,
label: descriptor.label,
consumed: Default::default(),
}
}
fn push_debug_group(&self, #[webidl] group_label: String) {
let err = self
.instance
.command_encoder_push_debug_group(self.id, &group_label)
.err();
self.error_handler.push_error(err);
}
#[fast]
fn pop_debug_group(&self) {
let err = self.instance.command_encoder_pop_debug_group(self.id).err();
self.error_handler.push_error(err);
}
fn insert_debug_marker(&self, #[webidl] marker_label: String) {
let err = self
.instance
.command_encoder_insert_debug_marker(self.id, &marker_label)
.err();
self.error_handler.push_error(err);
}
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct GPUComputePassTimestampWrites {
query_set: ResourceId,
beginning_of_pass_write_index: Option<u32>,
end_of_pass_write_index: Option<u32>,
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUCommandEncoderDescriptor {
#[webidl(default = String::new())]
pub label: String,
}
#[op2]
#[serde]
pub fn op_webgpu_command_encoder_begin_compute_pass(
state: &mut OpState,
#[smi] command_encoder_rid: ResourceId,
#[string] label: Cow<str>,
#[serde] timestamp_writes: Option<GPUComputePassTimestampWrites>,
) -> Result<WebGpuResult, ResourceError> {
let command_encoder_resource = state
.resource_table
.get::<WebGpuCommandEncoder>(command_encoder_rid)?;
let timestamp_writes = if let Some(timestamp_writes) = timestamp_writes {
let query_set_resource = state
.resource_table
.get::<WebGpuQuerySet>(timestamp_writes.query_set)?;
let query_set = query_set_resource.1;
Some(wgpu_core::command::ComputePassTimestampWrites {
query_set,
beginning_of_pass_write_index: timestamp_writes
.beginning_of_pass_write_index,
end_of_pass_write_index: timestamp_writes.end_of_pass_write_index,
})
} else {
None
};
let descriptor = wgpu_core::command::ComputePassDescriptor {
label: Some(label),
timestamp_writes: timestamp_writes.as_ref(),
};
let compute_pass = wgpu_core::command::ComputePass::new(
command_encoder_resource.1,
&descriptor,
);
let rid = state
.resource_table
.add(super::compute_pass::WebGpuComputePass(RefCell::new(
compute_pass,
)));
Ok(WebGpuResult::rid(rid))
}
#[op2]
#[serde]
pub fn op_webgpu_command_encoder_copy_buffer_to_buffer(
state: &mut OpState,
#[smi] command_encoder_rid: ResourceId,
#[smi] source: ResourceId,
#[number] source_offset: u64,
#[smi] destination: ResourceId,
#[number] destination_offset: u64,
#[number] size: u64,
) -> Result<WebGpuResult, ResourceError> {
let instance = state.borrow::<super::Instance>();
let command_encoder_resource = state
.resource_table
.get::<WebGpuCommandEncoder>(command_encoder_rid)?;
let command_encoder = command_encoder_resource.1;
let source_buffer_resource = state
.resource_table
.get::<super::buffer::WebGpuBuffer>(source)?;
let source_buffer = source_buffer_resource.1;
let destination_buffer_resource =
state
.resource_table
.get::<super::buffer::WebGpuBuffer>(destination)?;
let destination_buffer = destination_buffer_resource.1;
gfx_ok!(command_encoder => instance.command_encoder_copy_buffer_to_buffer(
command_encoder,
source_buffer,
source_offset,
destination_buffer,
destination_offset,
size
))
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct GpuImageCopyBuffer {
buffer: ResourceId,
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUTexelCopyBufferInfo {
pub buffer: Ptr<GPUBuffer>,
#[webidl(default = 0)]
#[options(enforce_range = true)]
offset: u64,
#[options(enforce_range = true)]
bytes_per_row: Option<u32>,
#[options(enforce_range = true)]
rows_per_image: Option<u32>,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct GpuImageCopyTexture {
pub texture: ResourceId,
pub mip_level: u32,
pub origin: wgpu_types::Origin3d,
pub aspect: wgpu_types::TextureAspect,
}
#[op2]
#[serde]
pub fn op_webgpu_command_encoder_copy_buffer_to_texture(
state: &mut OpState,
#[smi] command_encoder_rid: ResourceId,
#[serde] source: GpuImageCopyBuffer,
#[serde] destination: GpuImageCopyTexture,
#[serde] copy_size: wgpu_types::Extent3d,
) -> Result<WebGpuResult, ResourceError> {
let instance = state.borrow::<super::Instance>();
let command_encoder_resource = state
.resource_table
.get::<WebGpuCommandEncoder>(command_encoder_rid)?;
let command_encoder = command_encoder_resource.1;
let source_buffer_resource =
state
.resource_table
.get::<super::buffer::WebGpuBuffer>(source.buffer)?;
let destination_texture_resource =
state
.resource_table
.get::<super::texture::WebGpuTexture>(destination.texture)?;
let source = wgpu_core::command::ImageCopyBuffer {
buffer: source_buffer_resource.1,
layout: wgpu_types::ImageDataLayout {
offset: source.offset,
bytes_per_row: source.bytes_per_row,
rows_per_image: source.rows_per_image,
},
};
let destination = wgpu_core::command::ImageCopyTexture {
texture: destination_texture_resource.id,
mip_level: destination.mip_level,
origin: destination.origin,
aspect: destination.aspect,
};
gfx_ok!(command_encoder => instance.command_encoder_copy_buffer_to_texture(
command_encoder,
&source,
&destination,
&copy_size
))
}
#[op2]
#[serde]
pub fn op_webgpu_command_encoder_copy_texture_to_buffer(
state: &mut OpState,
#[smi] command_encoder_rid: ResourceId,
#[serde] source: GpuImageCopyTexture,
#[serde] destination: GpuImageCopyBuffer,
#[serde] copy_size: wgpu_types::Extent3d,
) -> Result<WebGpuResult, ResourceError> {
let instance = state.borrow::<super::Instance>();
let command_encoder_resource = state
.resource_table
.get::<WebGpuCommandEncoder>(command_encoder_rid)?;
let command_encoder = command_encoder_resource.1;
let source_texture_resource =
state
.resource_table
.get::<super::texture::WebGpuTexture>(source.texture)?;
let destination_buffer_resource =
state
.resource_table
.get::<super::buffer::WebGpuBuffer>(destination.buffer)?;
let source = wgpu_core::command::ImageCopyTexture {
texture: source_texture_resource.id,
mip_level: source.mip_level,
origin: source.origin,
aspect: source.aspect,
};
let destination = wgpu_core::command::ImageCopyBuffer {
buffer: destination_buffer_resource.1,
layout: wgpu_types::ImageDataLayout {
offset: destination.offset,
bytes_per_row: destination.bytes_per_row,
rows_per_image: destination.rows_per_image,
},
};
gfx_ok!(command_encoder => instance.command_encoder_copy_texture_to_buffer(
command_encoder,
&source,
&destination,
&copy_size
))
}
#[op2]
#[serde]
pub fn op_webgpu_command_encoder_copy_texture_to_texture(
state: &mut OpState,
#[smi] command_encoder_rid: ResourceId,
#[serde] source: GpuImageCopyTexture,
#[serde] destination: GpuImageCopyTexture,
#[serde] copy_size: wgpu_types::Extent3d,
) -> Result<WebGpuResult, ResourceError> {
let instance = state.borrow::<super::Instance>();
let command_encoder_resource = state
.resource_table
.get::<WebGpuCommandEncoder>(command_encoder_rid)?;
let command_encoder = command_encoder_resource.1;
let source_texture_resource =
state
.resource_table
.get::<super::texture::WebGpuTexture>(source.texture)?;
let destination_texture_resource =
state
.resource_table
.get::<super::texture::WebGpuTexture>(destination.texture)?;
let source = wgpu_core::command::ImageCopyTexture {
texture: source_texture_resource.id,
mip_level: source.mip_level,
origin: source.origin,
aspect: source.aspect,
};
let destination = wgpu_core::command::ImageCopyTexture {
texture: destination_texture_resource.id,
mip_level: destination.mip_level,
origin: destination.origin,
aspect: destination.aspect,
};
gfx_ok!(command_encoder => instance.command_encoder_copy_texture_to_texture(
command_encoder,
&source,
&destination,
&copy_size
))
}
#[op2]
#[serde]
pub fn op_webgpu_command_encoder_clear_buffer(
state: &mut OpState,
#[smi] command_encoder_rid: ResourceId,
#[smi] buffer_rid: ResourceId,
#[number] offset: u64,
#[number] size: u64,
) -> Result<WebGpuResult, ResourceError> {
let instance = state.borrow::<super::Instance>();
let command_encoder_resource = state
.resource_table
.get::<WebGpuCommandEncoder>(command_encoder_rid)?;
let command_encoder = command_encoder_resource.1;
let destination_resource = state
.resource_table
.get::<super::buffer::WebGpuBuffer>(buffer_rid)?;
gfx_ok!(command_encoder => instance.command_encoder_clear_buffer(
command_encoder,
destination_resource.1,
offset,
Some(size)
))
}
#[op2]
#[serde]
pub fn op_webgpu_command_encoder_push_debug_group(
state: &mut OpState,
#[smi] command_encoder_rid: ResourceId,
#[string] group_label: &str,
) -> Result<WebGpuResult, ResourceError> {
let instance = state.borrow::<super::Instance>();
let command_encoder_resource = state
.resource_table
.get::<WebGpuCommandEncoder>(command_encoder_rid)?;
let command_encoder = command_encoder_resource.1;
gfx_ok!(command_encoder => instance.command_encoder_push_debug_group(command_encoder, group_label))
}
#[op2]
#[serde]
pub fn op_webgpu_command_encoder_pop_debug_group(
state: &mut OpState,
#[smi] command_encoder_rid: ResourceId,
) -> Result<WebGpuResult, ResourceError> {
let instance = state.borrow::<super::Instance>();
let command_encoder_resource = state
.resource_table
.get::<WebGpuCommandEncoder>(command_encoder_rid)?;
let command_encoder = command_encoder_resource.1;
gfx_ok!(command_encoder => instance.command_encoder_pop_debug_group(command_encoder))
}
#[op2]
#[serde]
pub fn op_webgpu_command_encoder_insert_debug_marker(
state: &mut OpState,
#[smi] command_encoder_rid: ResourceId,
#[string] marker_label: &str,
) -> Result<WebGpuResult, ResourceError> {
let instance = state.borrow::<super::Instance>();
let command_encoder_resource = state
.resource_table
.get::<WebGpuCommandEncoder>(command_encoder_rid)?;
let command_encoder = command_encoder_resource.1;
gfx_ok!(command_encoder => instance.command_encoder_insert_debug_marker(
command_encoder,
marker_label
))
}
#[op2]
#[serde]
pub fn op_webgpu_command_encoder_write_timestamp(
state: &mut OpState,
#[smi] command_encoder_rid: ResourceId,
#[smi] query_set: ResourceId,
query_index: u32,
) -> Result<WebGpuResult, ResourceError> {
let instance = state.borrow::<super::Instance>();
let command_encoder_resource = state
.resource_table
.get::<WebGpuCommandEncoder>(command_encoder_rid)?;
let command_encoder = command_encoder_resource.1;
let query_set_resource = state
.resource_table
.get::<super::WebGpuQuerySet>(query_set)?;
gfx_ok!(command_encoder => instance.command_encoder_write_timestamp(
command_encoder,
query_set_resource.1,
query_index
))
}
#[op2]
#[serde]
pub fn op_webgpu_command_encoder_resolve_query_set(
state: &mut OpState,
#[smi] command_encoder_rid: ResourceId,
#[smi] query_set: ResourceId,
first_query: u32,
query_count: u32,
#[smi] destination: ResourceId,
#[number] destination_offset: u64,
) -> Result<WebGpuResult, ResourceError> {
let instance = state.borrow::<super::Instance>();
let command_encoder_resource = state
.resource_table
.get::<WebGpuCommandEncoder>(command_encoder_rid)?;
let command_encoder = command_encoder_resource.1;
let query_set_resource = state
.resource_table
.get::<super::WebGpuQuerySet>(query_set)?;
let destination_resource = state
.resource_table
.get::<super::buffer::WebGpuBuffer>(destination)?;
gfx_ok!(command_encoder => instance.command_encoder_resolve_query_set(
command_encoder,
query_set_resource.1,
first_query,
query_count,
destination_resource.1,
destination_offset
))
}
#[op2]
#[serde]
pub fn op_webgpu_command_encoder_finish(
state: &mut OpState,
#[smi] command_encoder_rid: ResourceId,
#[string] label: Cow<str>,
) -> Result<WebGpuResult, ResourceError> {
let command_encoder_resource = state
.resource_table
.take::<WebGpuCommandEncoder>(command_encoder_rid)?;
let command_encoder = command_encoder_resource.1;
let instance = state.borrow::<super::Instance>();
let descriptor = wgpu_types::CommandBufferDescriptor { label: Some(label) };
let (val, maybe_err) = gfx_select!(command_encoder => instance.command_encoder_finish(
command_encoder,
&descriptor
));
let rid = state.resource_table.add(WebGpuCommandBuffer(
instance.clone(),
RefCell::new(Some(val)),
));
Ok(WebGpuResult::rid_err(rid, maybe_err))
}

View file

@ -3,209 +3,231 @@
use std::borrow::Cow;
use std::cell::RefCell;
use deno_core::error::ResourceError;
use deno_core::cppgc::Ptr;
use deno_core::op2;
use deno_core::OpState;
use deno_core::Resource;
use deno_core::ResourceId;
use deno_core::v8;
use deno_core::webidl::IntOptions;
use deno_core::webidl::Nullable;
use deno_core::webidl::WebIdlConverter;
use deno_core::webidl::WebIdlError;
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use super::error::WebGpuResult;
use crate::Instance;
pub(crate) struct WebGpuComputePass(
pub(crate) RefCell<wgpu_core::command::ComputePass>,
);
impl Resource for WebGpuComputePass {
fn name(&self) -> Cow<str> {
"webGPUComputePass".into()
pub struct GPUComputePassEncoder {
pub instance: Instance,
pub error_handler: super::error::ErrorHandler,
pub compute_pass: RefCell<wgpu_core::command::ComputePass>,
pub label: String,
}
impl GarbageCollected for GPUComputePassEncoder {}
#[op2]
impl GPUComputePassEncoder {
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
fn set_pipeline(
&self,
#[webidl] pipeline: Ptr<crate::compute_pipeline::GPUComputePipeline>,
) {
let err = self
.instance
.compute_pass_set_pipeline(
&mut self.compute_pass.borrow_mut(),
pipeline.id,
)
.err();
self.error_handler.push_error(err);
}
fn dispatch_workgroups(
&self,
#[webidl(options(enforce_range = true))] work_group_count_x: u32,
#[webidl(default = 1, options(enforce_range = true))]
work_group_count_y: u32,
#[webidl(default = 1, options(enforce_range = true))]
work_group_count_z: u32,
) {
let err = self
.instance
.compute_pass_dispatch_workgroups(
&mut self.compute_pass.borrow_mut(),
work_group_count_x,
work_group_count_y,
work_group_count_z,
)
.err();
self.error_handler.push_error(err);
}
fn dispatch_workgroups_indirect(
&self,
#[webidl] indirect_buffer: Ptr<crate::buffer::GPUBuffer>,
#[webidl(options(enforce_range = true))] indirect_offset: u64,
) {
let err = self
.instance
.compute_pass_dispatch_workgroups_indirect(
&mut self.compute_pass.borrow_mut(),
indirect_buffer.id,
indirect_offset,
)
.err();
self.error_handler.push_error(err);
}
#[fast]
fn end(&self) {
let err = self
.instance
.compute_pass_end(&mut self.compute_pass.borrow_mut())
.err();
self.error_handler.push_error(err);
}
fn push_debug_group(&self, #[webidl] group_label: String) {
let err = self
.instance
.compute_pass_push_debug_group(
&mut self.compute_pass.borrow_mut(),
&group_label,
0, // wgpu#975
)
.err();
self.error_handler.push_error(err);
}
#[fast]
fn pop_debug_group(&self) {
let err = self
.instance
.compute_pass_pop_debug_group(&mut self.compute_pass.borrow_mut())
.err();
self.error_handler.push_error(err);
}
fn insert_debug_marker(&self, #[webidl] marker_label: String) {
let err = self
.instance
.compute_pass_insert_debug_marker(
&mut self.compute_pass.borrow_mut(),
&marker_label,
0, // wgpu#975
)
.err();
self.error_handler.push_error(err);
}
fn set_bind_group<'a>(
&self,
scope: &mut v8::HandleScope<'a>,
#[webidl(options(enforce_range = true))] index: u32,
#[webidl] bind_group: Nullable<Ptr<crate::bind_group::GPUBindGroup>>,
dynamic_offsets: v8::Local<'a, v8::Value>,
dynamic_offsets_data_start: v8::Local<'a, v8::Value>,
dynamic_offsets_data_length: v8::Local<'a, v8::Value>,
) -> Result<(), WebIdlError> {
const PREFIX: &str =
"Failed to execute 'setBindGroup' on 'GPUComputePassEncoder'";
let err = if let Ok(uint_32) = dynamic_offsets.try_cast::<v8::Uint32Array>()
{
let start = u64::convert(
scope,
dynamic_offsets_data_start,
Cow::Borrowed(PREFIX),
(|| Cow::Borrowed("Argument 4")).into(),
&IntOptions {
clamp: false,
enforce_range: true,
},
)? as usize;
let len = u32::convert(
scope,
dynamic_offsets_data_length,
Cow::Borrowed(PREFIX),
(|| Cow::Borrowed("Argument 5")).into(),
&IntOptions {
clamp: false,
enforce_range: true,
},
)? as usize;
let ab = uint_32.buffer(scope).unwrap();
let ptr = ab.data().unwrap();
let ab_len = ab.byte_length() / 4;
// SAFETY: compute_pass_set_bind_group internally calls extend_from_slice with this slice
let data =
unsafe { std::slice::from_raw_parts(ptr.as_ptr() as _, ab_len) };
let offsets = &data[start..(start + len)];
self
.instance
.compute_pass_set_bind_group(
&mut self.compute_pass.borrow_mut(),
index,
bind_group.into_option().map(|bind_group| bind_group.id),
offsets,
)
.err()
} else {
let offsets = <Option<Vec<u32>>>::convert(
scope,
dynamic_offsets,
Cow::Borrowed(PREFIX),
(|| Cow::Borrowed("Argument 3")).into(),
&IntOptions {
clamp: false,
enforce_range: true,
},
)?
.unwrap_or_default();
self
.instance
.compute_pass_set_bind_group(
&mut self.compute_pass.borrow_mut(),
index,
bind_group.into_option().map(|bind_group| bind_group.id),
&offsets,
)
.err()
};
self.error_handler.push_error(err);
Ok(())
}
}
#[op2]
#[serde]
pub fn op_webgpu_compute_pass_set_pipeline(
state: &mut OpState,
#[smi] compute_pass_rid: ResourceId,
#[smi] pipeline: ResourceId,
) -> Result<WebGpuResult, ResourceError> {
let compute_pipeline_resource =
state
.resource_table
.get::<super::pipeline::WebGpuComputePipeline>(pipeline)?;
let compute_pass_resource = state
.resource_table
.get::<WebGpuComputePass>(compute_pass_rid)?;
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUComputePassDescriptor {
#[webidl(default = String::new())]
pub label: String,
wgpu_core::command::compute_commands::wgpu_compute_pass_set_pipeline(
&mut compute_pass_resource.0.borrow_mut(),
compute_pipeline_resource.1,
);
Ok(WebGpuResult::empty())
pub timestamp_writes: Option<GPUComputePassTimestampWrites>,
}
#[op2]
#[serde]
pub fn op_webgpu_compute_pass_dispatch_workgroups(
state: &mut OpState,
#[smi] compute_pass_rid: ResourceId,
x: u32,
y: u32,
z: u32,
) -> Result<WebGpuResult, ResourceError> {
let compute_pass_resource = state
.resource_table
.get::<WebGpuComputePass>(compute_pass_rid)?;
wgpu_core::command::compute_commands::wgpu_compute_pass_dispatch_workgroups(
&mut compute_pass_resource.0.borrow_mut(),
x,
y,
z,
);
Ok(WebGpuResult::empty())
}
#[op2]
#[serde]
pub fn op_webgpu_compute_pass_dispatch_workgroups_indirect(
state: &mut OpState,
#[smi] compute_pass_rid: ResourceId,
#[smi] indirect_buffer: ResourceId,
#[number] indirect_offset: u64,
) -> Result<WebGpuResult, ResourceError> {
let buffer_resource = state
.resource_table
.get::<super::buffer::WebGpuBuffer>(indirect_buffer)?;
let compute_pass_resource = state
.resource_table
.get::<WebGpuComputePass>(compute_pass_rid)?;
wgpu_core::command::compute_commands::wgpu_compute_pass_dispatch_workgroups_indirect(
&mut compute_pass_resource.0.borrow_mut(),
buffer_resource.1,
indirect_offset,
);
Ok(WebGpuResult::empty())
}
#[op2]
#[serde]
pub fn op_webgpu_compute_pass_end(
state: &mut OpState,
#[smi] command_encoder_rid: ResourceId,
#[smi] compute_pass_rid: ResourceId,
) -> Result<WebGpuResult, ResourceError> {
let command_encoder_resource = state
.resource_table
.get::<super::command_encoder::WebGpuCommandEncoder>(
command_encoder_rid,
)?;
let command_encoder = command_encoder_resource.1;
let compute_pass_resource = state
.resource_table
.take::<WebGpuComputePass>(compute_pass_rid)?;
let compute_pass = &compute_pass_resource.0.borrow();
let instance = state.borrow::<super::Instance>();
gfx_ok!(command_encoder => instance.command_encoder_run_compute_pass(
command_encoder,
compute_pass
))
}
#[op2]
#[serde]
pub fn op_webgpu_compute_pass_set_bind_group(
state: &mut OpState,
#[smi] compute_pass_rid: ResourceId,
index: u32,
#[smi] bind_group: ResourceId,
#[buffer] dynamic_offsets_data: &[u32],
#[number] dynamic_offsets_data_start: usize,
#[number] dynamic_offsets_data_length: usize,
) -> Result<WebGpuResult, ResourceError> {
let bind_group_resource =
state
.resource_table
.get::<super::binding::WebGpuBindGroup>(bind_group)?;
let compute_pass_resource = state
.resource_table
.get::<WebGpuComputePass>(compute_pass_rid)?;
let start = dynamic_offsets_data_start;
let len = dynamic_offsets_data_length;
// Assert that length and start are both in bounds
assert!(start <= dynamic_offsets_data.len());
assert!(len <= dynamic_offsets_data.len() - start);
let dynamic_offsets_data: &[u32] = &dynamic_offsets_data[start..start + len];
wgpu_core::command::compute_commands::wgpu_compute_pass_set_bind_group(
&mut compute_pass_resource.0.borrow_mut(),
index,
bind_group_resource.1,
dynamic_offsets_data,
);
Ok(WebGpuResult::empty())
}
#[op2]
#[serde]
pub fn op_webgpu_compute_pass_push_debug_group(
state: &mut OpState,
#[smi] compute_pass_rid: ResourceId,
#[string] group_label: &str,
) -> Result<WebGpuResult, ResourceError> {
let compute_pass_resource = state
.resource_table
.get::<WebGpuComputePass>(compute_pass_rid)?;
wgpu_core::command::compute_commands::wgpu_compute_pass_push_debug_group(
&mut compute_pass_resource.0.borrow_mut(),
group_label,
0, // wgpu#975
);
Ok(WebGpuResult::empty())
}
#[op2]
#[serde]
pub fn op_webgpu_compute_pass_pop_debug_group(
state: &mut OpState,
#[smi] compute_pass_rid: ResourceId,
) -> Result<WebGpuResult, ResourceError> {
let compute_pass_resource = state
.resource_table
.get::<WebGpuComputePass>(compute_pass_rid)?;
wgpu_core::command::compute_commands::wgpu_compute_pass_pop_debug_group(
&mut compute_pass_resource.0.borrow_mut(),
);
Ok(WebGpuResult::empty())
}
#[op2]
#[serde]
pub fn op_webgpu_compute_pass_insert_debug_marker(
state: &mut OpState,
#[smi] compute_pass_rid: ResourceId,
#[string] marker_label: &str,
) -> Result<WebGpuResult, ResourceError> {
let compute_pass_resource = state
.resource_table
.get::<WebGpuComputePass>(compute_pass_rid)?;
wgpu_core::command::compute_commands::wgpu_compute_pass_insert_debug_marker(
&mut compute_pass_resource.0.borrow_mut(),
marker_label,
0, // wgpu#975
);
Ok(WebGpuResult::empty())
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUComputePassTimestampWrites {
pub query_set: Ptr<crate::query_set::GPUQuerySet>,
#[options(enforce_range = true)]
pub beginning_of_pass_write_index: Option<u32>,
#[options(enforce_range = true)]
pub end_of_pass_write_index: Option<u32>,
}

View file

@ -0,0 +1,82 @@
// Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::cppgc::Ptr;
use deno_core::op2;
use deno_core::webidl::WebIdlInterfaceConverter;
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use indexmap::IndexMap;
use crate::bind_group_layout::GPUBindGroupLayout;
use crate::shader::GPUShaderModule;
use crate::webidl::GPUPipelineLayoutOrGPUAutoLayoutMode;
use crate::Instance;
pub struct GPUComputePipeline {
pub instance: Instance,
pub error_handler: super::error::ErrorHandler,
pub id: wgpu_core::id::ComputePipelineId,
pub label: String,
}
impl Drop for GPUComputePipeline {
fn drop(&mut self) {
self.instance.compute_pipeline_drop(self.id);
}
}
impl WebIdlInterfaceConverter for GPUComputePipeline {
const NAME: &'static str = "GPUComputePipeline";
}
impl GarbageCollected for GPUComputePipeline {}
#[op2]
impl GPUComputePipeline {
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
#[cppgc]
fn get_bind_group_layout(&self, #[webidl] index: u32) -> GPUBindGroupLayout {
let (id, err) = self
.instance
.compute_pipeline_get_bind_group_layout(self.id, index, None);
self.error_handler.push_error(err);
// TODO(wgpu): needs to support retrieving the label
GPUBindGroupLayout {
instance: self.instance.clone(),
id,
label: "".to_string(),
}
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUComputePipelineDescriptor {
#[webidl(default = String::new())]
pub label: String,
pub compute: GPUProgrammableStage,
pub layout: GPUPipelineLayoutOrGPUAutoLayoutMode,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUProgrammableStage {
pub module: Ptr<GPUShaderModule>,
pub entry_point: Option<String>,
#[webidl(default = Default::default())]
pub constants: IndexMap<String, f64>,
}

888
ext/webgpu/device.rs Normal file
View file

@ -0,0 +1,888 @@
// Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::RefCell;
use std::num::NonZeroU64;
use std::rc::Rc;
use deno_core::cppgc::SameObject;
use deno_core::op2;
use deno_core::v8;
use deno_core::webidl::WebIdlInterfaceConverter;
use deno_core::GarbageCollected;
use deno_error::JsErrorBox;
use wgpu_core::binding_model::BindingResource;
use wgpu_core::pipeline::ProgrammableStageDescriptor;
use wgpu_types::BindingType;
use super::bind_group::GPUBindGroup;
use super::bind_group::GPUBindingResource;
use super::bind_group_layout::GPUBindGroupLayout;
use super::buffer::GPUBuffer;
use super::compute_pipeline::GPUComputePipeline;
use super::pipeline_layout::GPUPipelineLayout;
use super::queue::GPUQueue;
use super::sampler::GPUSampler;
use super::shader::GPUShaderModule;
use super::texture::GPUTexture;
use crate::adapter::GPUAdapterInfo;
use crate::adapter::GPUSupportedFeatures;
use crate::adapter::GPUSupportedLimits;
use crate::command_encoder::GPUCommandEncoder;
use crate::query_set::GPUQuerySet;
use crate::render_bundle::GPURenderBundleEncoder;
use crate::render_pipeline::GPURenderPipeline;
use crate::webidl::features_to_feature_names;
use crate::Instance;
pub struct GPUDevice {
pub instance: Instance,
pub id: wgpu_core::id::DeviceId,
pub adapter: wgpu_core::id::AdapterId,
pub queue: wgpu_core::id::QueueId,
pub label: String,
pub features: SameObject<GPUSupportedFeatures>,
pub limits: SameObject<GPUSupportedLimits>,
pub adapter_info: Rc<SameObject<GPUAdapterInfo>>,
pub queue_obj: SameObject<GPUQueue>,
pub error_handler: super::error::ErrorHandler,
pub lost_receiver:
tokio::sync::Mutex<Option<tokio::sync::oneshot::Receiver<()>>>,
}
impl Drop for GPUDevice {
fn drop(&mut self) {
self.instance.device_drop(self.id);
}
}
impl WebIdlInterfaceConverter for GPUDevice {
const NAME: &'static str = "GPUDevice";
}
impl GarbageCollected for GPUDevice {}
// EventTarget is extended in JS
#[op2]
impl GPUDevice {
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
#[getter]
#[global]
fn features(&self, scope: &mut v8::HandleScope) -> v8::Global<v8::Object> {
self.features.get(scope, |scope| {
let features = self.instance.device_features(self.id);
let features = features_to_feature_names(features);
GPUSupportedFeatures::new(scope, features)
})
}
#[getter]
#[global]
fn limits(&self, scope: &mut v8::HandleScope) -> v8::Global<v8::Object> {
self.limits.get(scope, |_| {
let limits = self.instance.device_limits(self.id);
GPUSupportedLimits(limits)
})
}
#[getter]
#[global]
fn adapter_info(
&self,
scope: &mut v8::HandleScope,
) -> v8::Global<v8::Object> {
self.adapter_info.get(scope, |_| {
let info = self.instance.adapter_get_info(self.adapter);
let limits = self.instance.adapter_limits(self.adapter);
GPUAdapterInfo {
info,
subgroup_min_size: limits.min_subgroup_size,
subgroup_max_size: limits.max_subgroup_size,
}
})
}
#[getter]
#[global]
fn queue(&self, scope: &mut v8::HandleScope) -> v8::Global<v8::Object> {
self.queue_obj.get(scope, |_| GPUQueue {
id: self.queue,
error_handler: self.error_handler.clone(),
instance: self.instance.clone(),
label: self.label.clone(),
})
}
#[fast]
fn destroy(&self) {
self.instance.device_destroy(self.id);
}
#[required(1)]
#[cppgc]
fn create_buffer(
&self,
#[webidl] descriptor: super::buffer::GPUBufferDescriptor,
) -> Result<GPUBuffer, JsErrorBox> {
let wgpu_descriptor = wgpu_core::resource::BufferDescriptor {
label: crate::transform_label(descriptor.label.clone()),
size: descriptor.size,
usage: wgpu_types::BufferUsages::from_bits(descriptor.usage)
.ok_or_else(|| JsErrorBox::type_error("usage is not valid"))?,
mapped_at_creation: descriptor.mapped_at_creation,
};
let (id, err) =
self
.instance
.device_create_buffer(self.id, &wgpu_descriptor, None);
self.error_handler.push_error(err);
Ok(GPUBuffer {
instance: self.instance.clone(),
error_handler: self.error_handler.clone(),
id,
device: self.id,
label: descriptor.label,
size: descriptor.size,
usage: descriptor.usage,
map_state: RefCell::new(if descriptor.mapped_at_creation {
"mapped"
} else {
"unmapped"
}),
map_mode: RefCell::new(if descriptor.mapped_at_creation {
Some(wgpu_core::device::HostMap::Write)
} else {
None
}),
mapped_js_buffers: RefCell::new(vec![]),
})
}
#[required(1)]
#[cppgc]
fn create_texture(
&self,
#[webidl] descriptor: super::texture::GPUTextureDescriptor,
) -> Result<GPUTexture, JsErrorBox> {
let wgpu_descriptor = wgpu_core::resource::TextureDescriptor {
label: crate::transform_label(descriptor.label.clone()),
size: descriptor.size.into(),
mip_level_count: descriptor.mip_level_count,
sample_count: descriptor.sample_count,
dimension: descriptor.dimension.clone().into(),
format: descriptor.format.clone().into(),
usage: wgpu_types::TextureUsages::from_bits(descriptor.usage)
.ok_or_else(|| JsErrorBox::type_error("usage is not valid"))?,
view_formats: descriptor
.view_formats
.into_iter()
.map(Into::into)
.collect(),
};
let (id, err) =
self
.instance
.device_create_texture(self.id, &wgpu_descriptor, None);
self.error_handler.push_error(err);
Ok(GPUTexture {
instance: self.instance.clone(),
error_handler: self.error_handler.clone(),
id,
label: descriptor.label,
size: wgpu_descriptor.size,
mip_level_count: wgpu_descriptor.mip_level_count,
sample_count: wgpu_descriptor.sample_count,
dimension: descriptor.dimension,
format: descriptor.format,
usage: descriptor.usage,
})
}
#[cppgc]
fn create_sampler(
&self,
#[webidl] descriptor: super::sampler::GPUSamplerDescriptor,
) -> Result<GPUSampler, JsErrorBox> {
let wgpu_descriptor = wgpu_core::resource::SamplerDescriptor {
label: crate::transform_label(descriptor.label.clone()),
address_modes: [
descriptor.address_mode_u.into(),
descriptor.address_mode_v.into(),
descriptor.address_mode_w.into(),
],
mag_filter: descriptor.mag_filter.into(),
min_filter: descriptor.min_filter.into(),
mipmap_filter: descriptor.mipmap_filter.into(),
lod_min_clamp: descriptor.lod_min_clamp,
lod_max_clamp: descriptor.lod_max_clamp,
compare: descriptor.compare.map(Into::into),
anisotropy_clamp: descriptor.max_anisotropy,
border_color: None,
};
let (id, err) =
self
.instance
.device_create_sampler(self.id, &wgpu_descriptor, None);
self.error_handler.push_error(err);
Ok(GPUSampler {
instance: self.instance.clone(),
id,
label: descriptor.label,
})
}
#[required(1)]
#[cppgc]
fn create_bind_group_layout(
&self,
#[webidl]
descriptor: super::bind_group_layout::GPUBindGroupLayoutDescriptor,
) -> Result<GPUBindGroupLayout, JsErrorBox> {
let mut entries = Vec::with_capacity(descriptor.entries.len());
for entry in descriptor.entries {
let n_entries = [
entry.buffer.is_some(),
entry.sampler.is_some(),
entry.texture.is_some(),
entry.storage_texture.is_some(),
]
.into_iter()
.filter(|t| *t)
.count();
if n_entries != 1 {
return Err(JsErrorBox::type_error("Only one of 'buffer', 'sampler', 'texture' and 'storageTexture' may be specified"));
}
let ty = if let Some(buffer) = entry.buffer {
BindingType::Buffer {
ty: buffer.r#type.into(),
has_dynamic_offset: buffer.has_dynamic_offset,
min_binding_size: NonZeroU64::new(buffer.min_binding_size),
}
} else if let Some(sampler) = entry.sampler {
BindingType::Sampler(sampler.r#type.into())
} else if let Some(texture) = entry.texture {
BindingType::Texture {
sample_type: texture.sample_type.into(),
view_dimension: texture.view_dimension.into(),
multisampled: texture.multisampled,
}
} else if let Some(storage_texture) = entry.storage_texture {
BindingType::StorageTexture {
access: storage_texture.access.into(),
format: storage_texture.format.into(),
view_dimension: storage_texture.view_dimension.into(),
}
} else {
unreachable!()
};
entries.push(wgpu_types::BindGroupLayoutEntry {
binding: entry.binding,
visibility: wgpu_types::ShaderStages::from_bits(entry.visibility)
.ok_or_else(|| JsErrorBox::type_error("usage is not valid"))?,
ty,
count: None, // native-only
});
}
let wgpu_descriptor = wgpu_core::binding_model::BindGroupLayoutDescriptor {
label: crate::transform_label(descriptor.label.clone()),
entries: Cow::Owned(entries),
};
let (id, err) = self.instance.device_create_bind_group_layout(
self.id,
&wgpu_descriptor,
None,
);
self.error_handler.push_error(err);
Ok(GPUBindGroupLayout {
instance: self.instance.clone(),
id,
label: descriptor.label,
})
}
#[required(1)]
#[cppgc]
fn create_pipeline_layout(
&self,
#[webidl] descriptor: super::pipeline_layout::GPUPipelineLayoutDescriptor,
) -> GPUPipelineLayout {
let bind_group_layouts = descriptor
.bind_group_layouts
.into_iter()
.map(|bind_group_layout| bind_group_layout.id)
.collect();
let wgpu_descriptor = wgpu_core::binding_model::PipelineLayoutDescriptor {
label: crate::transform_label(descriptor.label.clone()),
bind_group_layouts: Cow::Owned(bind_group_layouts),
push_constant_ranges: Default::default(),
};
let (id, err) = self.instance.device_create_pipeline_layout(
self.id,
&wgpu_descriptor,
None,
);
self.error_handler.push_error(err);
GPUPipelineLayout {
instance: self.instance.clone(),
id,
label: descriptor.label,
}
}
#[required(1)]
#[cppgc]
fn create_bind_group(
&self,
#[webidl] descriptor: super::bind_group::GPUBindGroupDescriptor,
) -> GPUBindGroup {
let entries = descriptor
.entries
.into_iter()
.map(|entry| wgpu_core::binding_model::BindGroupEntry {
binding: entry.binding,
resource: match entry.resource {
GPUBindingResource::Sampler(sampler) => {
BindingResource::Sampler(sampler.id)
}
GPUBindingResource::TextureView(texture_view) => {
BindingResource::TextureView(texture_view.id)
}
GPUBindingResource::BufferBinding(buffer_binding) => {
BindingResource::Buffer(wgpu_core::binding_model::BufferBinding {
buffer_id: buffer_binding.buffer.id,
offset: buffer_binding.offset,
size: buffer_binding.size.and_then(NonZeroU64::new),
})
}
},
})
.collect::<Vec<_>>();
let wgpu_descriptor = wgpu_core::binding_model::BindGroupDescriptor {
label: crate::transform_label(descriptor.label.clone()),
layout: descriptor.layout.id,
entries: Cow::Owned(entries),
};
let (id, err) =
self
.instance
.device_create_bind_group(self.id, &wgpu_descriptor, None);
self.error_handler.push_error(err);
GPUBindGroup {
instance: self.instance.clone(),
id,
label: descriptor.label,
}
}
#[required(1)]
#[cppgc]
fn create_shader_module(
&self,
#[webidl] descriptor: super::shader::GPUShaderModuleDescriptor,
) -> GPUShaderModule {
let wgpu_descriptor = wgpu_core::pipeline::ShaderModuleDescriptor {
label: crate::transform_label(descriptor.label.clone()),
runtime_checks: wgpu_types::ShaderRuntimeChecks::default(),
};
let (id, err) = self.instance.device_create_shader_module(
self.id,
&wgpu_descriptor,
wgpu_core::pipeline::ShaderModuleSource::Wgsl(Cow::Owned(
descriptor.code,
)),
None,
);
self.error_handler.push_error(err);
GPUShaderModule {
instance: self.instance.clone(),
id,
label: descriptor.label,
}
}
#[required(1)]
#[cppgc]
fn create_compute_pipeline(
&self,
#[webidl] descriptor: super::compute_pipeline::GPUComputePipelineDescriptor,
) -> GPUComputePipeline {
self.new_compute_pipeline(descriptor)
}
#[required(1)]
#[cppgc]
fn create_render_pipeline(
&self,
#[webidl] descriptor: super::render_pipeline::GPURenderPipelineDescriptor,
) -> Result<GPURenderPipeline, JsErrorBox> {
self.new_render_pipeline(descriptor)
}
#[async_method]
#[required(1)]
#[cppgc]
async fn create_compute_pipeline_async(
&self,
#[webidl] descriptor: super::compute_pipeline::GPUComputePipelineDescriptor,
) -> GPUComputePipeline {
self.new_compute_pipeline(descriptor)
}
#[async_method]
#[required(1)]
#[cppgc]
async fn create_render_pipeline_async(
&self,
#[webidl] descriptor: super::render_pipeline::GPURenderPipelineDescriptor,
) -> Result<GPURenderPipeline, JsErrorBox> {
self.new_render_pipeline(descriptor)
}
#[cppgc]
fn create_command_encoder(
&self,
#[webidl] descriptor: Option<
super::command_encoder::GPUCommandEncoderDescriptor,
>,
) -> GPUCommandEncoder {
let label = descriptor.map(|d| d.label).unwrap_or_default();
let wgpu_descriptor = wgpu_types::CommandEncoderDescriptor {
label: Some(Cow::Owned(label.clone())),
};
let (id, err) = self.instance.device_create_command_encoder(
self.id,
&wgpu_descriptor,
None,
);
self.error_handler.push_error(err);
GPUCommandEncoder {
instance: self.instance.clone(),
error_handler: self.error_handler.clone(),
id,
label,
}
}
#[required(1)]
#[cppgc]
fn create_render_bundle_encoder(
&self,
#[webidl]
descriptor: super::render_bundle::GPURenderBundleEncoderDescriptor,
) -> GPURenderBundleEncoder {
let wgpu_descriptor = wgpu_core::command::RenderBundleEncoderDescriptor {
label: crate::transform_label(descriptor.label.clone()),
color_formats: Cow::Owned(
descriptor
.color_formats
.into_iter()
.map(|format| format.into_option().map(Into::into))
.collect::<Vec<_>>(),
),
depth_stencil: descriptor.depth_stencil_format.map(|format| {
wgpu_types::RenderBundleDepthStencil {
format: format.into(),
depth_read_only: descriptor.depth_read_only,
stencil_read_only: descriptor.stencil_read_only,
}
}),
sample_count: descriptor.sample_count,
multiview: None,
};
let res = wgpu_core::command::RenderBundleEncoder::new(
&wgpu_descriptor,
self.id,
None,
);
let (encoder, err) = match res {
Ok(encoder) => (encoder, None),
Err(e) => (
wgpu_core::command::RenderBundleEncoder::dummy(self.id),
Some(e),
),
};
self.error_handler.push_error(err);
GPURenderBundleEncoder {
instance: self.instance.clone(),
error_handler: self.error_handler.clone(),
encoder: RefCell::new(Some(encoder)),
label: descriptor.label,
}
}
#[required(1)]
#[cppgc]
fn create_query_set(
&self,
#[webidl] descriptor: crate::query_set::GPUQuerySetDescriptor,
) -> GPUQuerySet {
let wgpu_descriptor = wgpu_core::resource::QuerySetDescriptor {
label: crate::transform_label(descriptor.label.clone()),
ty: descriptor.r#type.clone().into(),
count: descriptor.count,
};
let (id, err) =
self
.instance
.device_create_query_set(self.id, &wgpu_descriptor, None);
self.error_handler.push_error(err);
GPUQuerySet {
instance: self.instance.clone(),
id,
r#type: descriptor.r#type,
count: descriptor.count,
label: descriptor.label,
}
}
// TODO(@crowlKats): support returning same promise
#[async_method]
#[getter]
#[cppgc]
async fn lost(&self) -> GPUDeviceLostInfo {
if let Some(lost_receiver) = self.lost_receiver.lock().await.take() {
let _ = lost_receiver.await;
}
GPUDeviceLostInfo
}
#[required(1)]
fn push_error_scope(&self, #[webidl] filter: super::error::GPUErrorFilter) {
self
.error_handler
.scopes
.lock()
.unwrap()
.push((filter, vec![]));
}
#[async_method(fake)]
#[global]
fn pop_error_scope(
&self,
scope: &mut v8::HandleScope,
) -> Result<v8::Global<v8::Value>, JsErrorBox> {
if self.error_handler.is_lost.get().is_some() {
let val = v8::null(scope).cast::<v8::Value>();
return Ok(v8::Global::new(scope, val));
}
let Some((_, errors)) = self.error_handler.scopes.lock().unwrap().pop()
else {
return Err(JsErrorBox::new(
"DOMExceptionOperationError",
"There are no error scopes on the error scope stack",
));
};
let val = if let Some(err) = errors.into_iter().next() {
deno_core::error::to_v8_error(scope, &err)
} else {
v8::null(scope).into()
};
Ok(v8::Global::new(scope, val))
}
#[fast]
fn start_capture(&self) {
self.instance.device_start_capture(self.id);
}
#[fast]
fn stop_capture(&self) {
self
.instance
.device_poll(self.id, wgpu_types::Maintain::wait())
.unwrap();
self.instance.device_stop_capture(self.id);
}
}
impl GPUDevice {
fn new_compute_pipeline(
&self,
descriptor: super::compute_pipeline::GPUComputePipelineDescriptor,
) -> GPUComputePipeline {
let wgpu_descriptor = wgpu_core::pipeline::ComputePipelineDescriptor {
label: crate::transform_label(descriptor.label.clone()),
layout: descriptor.layout.into(),
stage: ProgrammableStageDescriptor {
module: descriptor.compute.module.id,
entry_point: descriptor.compute.entry_point.map(Into::into),
constants: Cow::Owned(
descriptor.compute.constants.into_iter().collect(),
),
zero_initialize_workgroup_memory: true,
},
cache: None,
};
let (id, err) = self.instance.device_create_compute_pipeline(
self.id,
&wgpu_descriptor,
None,
None,
);
self.error_handler.push_error(err);
GPUComputePipeline {
instance: self.instance.clone(),
error_handler: self.error_handler.clone(),
id,
label: descriptor.label.clone(),
}
}
fn new_render_pipeline(
&self,
descriptor: super::render_pipeline::GPURenderPipelineDescriptor,
) -> Result<GPURenderPipeline, JsErrorBox> {
let vertex = wgpu_core::pipeline::VertexState {
stage: ProgrammableStageDescriptor {
module: descriptor.vertex.module.id,
entry_point: descriptor.vertex.entry_point.map(Into::into),
constants: Cow::Owned(
descriptor.vertex.constants.into_iter().collect(),
),
zero_initialize_workgroup_memory: true,
},
buffers: Cow::Owned(
descriptor
.vertex
.buffers
.into_iter()
.map(|b| {
let layout = b.into_option().ok_or_else(|| {
JsErrorBox::type_error(
"Nullable GPUVertexBufferLayouts are currently not supported",
)
})?;
Ok(wgpu_core::pipeline::VertexBufferLayout {
array_stride: layout.array_stride,
step_mode: layout.step_mode.into(),
attributes: Cow::Owned(
layout
.attributes
.into_iter()
.map(|attr| wgpu_types::VertexAttribute {
format: attr.format.into(),
offset: attr.offset,
shader_location: attr.shader_location,
})
.collect(),
),
})
})
.collect::<Result<_, JsErrorBox>>()?,
),
};
let primitive = wgpu_types::PrimitiveState {
topology: descriptor.primitive.topology.into(),
strip_index_format: descriptor
.primitive
.strip_index_format
.map(Into::into),
front_face: descriptor.primitive.front_face.into(),
cull_mode: descriptor.primitive.cull_mode.into(),
unclipped_depth: descriptor.primitive.unclipped_depth,
polygon_mode: Default::default(),
conservative: false,
};
let depth_stencil = descriptor.depth_stencil.map(|depth_stencil| {
let front = wgpu_types::StencilFaceState {
compare: depth_stencil.stencil_front.compare.into(),
fail_op: depth_stencil.stencil_front.fail_op.into(),
depth_fail_op: depth_stencil.stencil_front.depth_fail_op.into(),
pass_op: depth_stencil.stencil_front.pass_op.into(),
};
let back = wgpu_types::StencilFaceState {
compare: depth_stencil.stencil_back.compare.into(),
fail_op: depth_stencil.stencil_back.fail_op.into(),
depth_fail_op: depth_stencil.stencil_back.depth_fail_op.into(),
pass_op: depth_stencil.stencil_back.pass_op.into(),
};
wgpu_types::DepthStencilState {
format: depth_stencil.format.into(),
depth_write_enabled: depth_stencil
.depth_write_enabled
.unwrap_or_default(),
depth_compare: depth_stencil
.depth_compare
.map(Into::into)
.unwrap_or(wgpu_types::CompareFunction::Never), // TODO(wgpu): should be optional here
stencil: wgpu_types::StencilState {
front,
back,
read_mask: depth_stencil.stencil_read_mask,
write_mask: depth_stencil.stencil_write_mask,
},
bias: wgpu_types::DepthBiasState {
constant: depth_stencil.depth_bias,
slope_scale: depth_stencil.depth_bias_slope_scale,
clamp: depth_stencil.depth_bias_clamp,
},
}
});
let multisample = wgpu_types::MultisampleState {
count: descriptor.multisample.count,
mask: descriptor.multisample.mask as u64,
alpha_to_coverage_enabled: descriptor
.multisample
.alpha_to_coverage_enabled,
};
let fragment = descriptor
.fragment
.map(|fragment| {
Ok::<_, JsErrorBox>(wgpu_core::pipeline::FragmentState {
stage: ProgrammableStageDescriptor {
module: fragment.module.id,
entry_point: fragment.entry_point.map(Into::into),
constants: Cow::Owned(fragment.constants.into_iter().collect()),
zero_initialize_workgroup_memory: true,
},
targets: Cow::Owned(
fragment
.targets
.into_iter()
.map(|target| {
target
.into_option()
.map(|target| {
Ok(wgpu_types::ColorTargetState {
format: target.format.into(),
blend: target.blend.map(|blend| wgpu_types::BlendState {
color: wgpu_types::BlendComponent {
src_factor: blend.color.src_factor.into(),
dst_factor: blend.color.dst_factor.into(),
operation: blend.color.operation.into(),
},
alpha: wgpu_types::BlendComponent {
src_factor: blend.alpha.src_factor.into(),
dst_factor: blend.alpha.dst_factor.into(),
operation: blend.alpha.operation.into(),
},
}),
write_mask: wgpu_types::ColorWrites::from_bits(
target.write_mask,
)
.ok_or_else(|| {
JsErrorBox::type_error("usage is not valid")
})?,
})
})
.transpose()
})
.collect::<Result<_, JsErrorBox>>()?,
),
})
})
.transpose()?;
let wgpu_descriptor = wgpu_core::pipeline::RenderPipelineDescriptor {
label: crate::transform_label(descriptor.label.clone()),
layout: descriptor.layout.into(),
vertex,
primitive,
depth_stencil,
multisample,
fragment,
cache: None,
multiview: None,
};
let (id, err) = self.instance.device_create_render_pipeline(
self.id,
&wgpu_descriptor,
None,
None,
);
self.error_handler.push_error(err);
Ok(GPURenderPipeline {
instance: self.instance.clone(),
error_handler: self.error_handler.clone(),
id,
label: descriptor.label,
})
}
}
pub struct GPUDeviceLostInfo;
impl GarbageCollected for GPUDeviceLostInfo {}
#[op2]
impl GPUDeviceLostInfo {
#[getter]
#[string]
fn reason(&self) -> &'static str {
"unknown"
}
#[getter]
#[string]
fn message(&self) -> &'static str {
"device was lost"
}
}

View file

@ -1,10 +1,10 @@
// Copyright 2018-2025 the Deno authors. MIT license.
use std::convert::From;
use std::error::Error;
use std::fmt::Display;
use std::fmt::Formatter;
use std::sync::Mutex;
use std::sync::OnceLock;
use deno_core::ResourceId;
use serde::Serialize;
use wgpu_core::binding_model::CreateBindGroupError;
use wgpu_core::binding_model::CreateBindGroupLayoutError;
use wgpu_core::binding_model::CreatePipelineLayoutError;
@ -31,7 +31,121 @@ use wgpu_core::resource::CreateSamplerError;
use wgpu_core::resource::CreateTextureError;
use wgpu_core::resource::CreateTextureViewError;
fn fmt_err(err: &(dyn Error + 'static)) -> String {
pub type ErrorHandler = std::sync::Arc<DeviceErrorHandler>;
pub struct DeviceErrorHandler {
pub is_lost: OnceLock<()>,
lost_sender: Mutex<Option<tokio::sync::oneshot::Sender<()>>>,
uncaptured_sender_is_closed: Mutex<Option<tokio::sync::oneshot::Sender<()>>>,
pub uncaptured_sender: tokio::sync::mpsc::UnboundedSender<GPUError>,
pub scopes: Mutex<Vec<(GPUErrorFilter, Vec<GPUError>)>>,
}
impl Drop for DeviceErrorHandler {
fn drop(&mut self) {
if let Some(sender) =
self.uncaptured_sender_is_closed.lock().unwrap().take()
{
let _ = sender.send(());
}
}
}
impl DeviceErrorHandler {
pub fn new(
lost_sender: tokio::sync::oneshot::Sender<()>,
uncaptured_sender: tokio::sync::mpsc::UnboundedSender<GPUError>,
uncaptured_sender_is_closed: tokio::sync::oneshot::Sender<()>,
) -> Self {
Self {
is_lost: Default::default(),
lost_sender: Mutex::new(Some(lost_sender)),
uncaptured_sender,
uncaptured_sender_is_closed: Mutex::new(Some(
uncaptured_sender_is_closed,
)),
scopes: Mutex::new(vec![]),
}
}
pub fn push_error<E: Into<GPUError>>(&self, err: Option<E>) {
let Some(err) = err else {
return;
};
if self.is_lost.get().is_some() {
return;
}
let err = err.into();
if matches!(err, GPUError::Lost) {
let _ = self.is_lost.set(());
if let Some(sender) = self.lost_sender.lock().unwrap().take() {
let _ = sender.send(());
}
return;
}
let error_filter = match err {
GPUError::Lost => unreachable!(),
GPUError::Validation(_) => GPUErrorFilter::Validation,
GPUError::OutOfMemory => GPUErrorFilter::OutOfMemory,
GPUError::Internal => GPUErrorFilter::Internal,
};
let mut scopes = self.scopes.lock().unwrap();
let scope = scopes
.iter_mut()
.rfind(|(filter, _)| filter == &error_filter);
if let Some(scope) = scope {
scope.1.push(err);
} else {
self.uncaptured_sender.send(err).unwrap();
}
}
}
#[derive(deno_core::WebIDL, Eq, PartialEq)]
#[webidl(enum)]
pub enum GPUErrorFilter {
Validation,
OutOfMemory,
Internal,
}
#[derive(Debug, deno_error::JsError)]
pub enum GPUError {
// TODO(@crowlKats): consider adding an unreachable value that uses unreachable!()
#[class("UNREACHABLE")]
Lost,
#[class("GPUValidationError")]
Validation(String),
#[class("GPUOutOfMemoryError")]
OutOfMemory,
#[allow(dead_code)]
#[class("GPUInternalError")]
Internal,
}
impl Display for GPUError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
GPUError::Lost => Ok(()),
GPUError::Validation(s) => f.write_str(s),
GPUError::OutOfMemory => f.write_str("not enough memory left"),
GPUError::Internal => Ok(()),
}
}
}
impl std::error::Error for GPUError {}
fn fmt_err(err: &(dyn std::error::Error + 'static)) -> String {
let mut output = err.to_string();
let mut e = err.source();
@ -40,248 +154,203 @@ fn fmt_err(err: &(dyn Error + 'static)) -> String {
e = source.source();
}
if output.is_empty() {
output.push_str("validation error");
}
output
}
#[derive(Serialize)]
pub struct WebGpuResult {
pub rid: Option<ResourceId>,
pub err: Option<WebGpuError>,
}
impl WebGpuResult {
pub fn rid(rid: ResourceId) -> Self {
Self {
rid: Some(rid),
err: None,
}
}
pub fn rid_err<T: Into<WebGpuError>>(
rid: ResourceId,
err: Option<T>,
) -> Self {
Self {
rid: Some(rid),
err: err.map(Into::into),
}
}
pub fn maybe_err<T: Into<WebGpuError>>(err: Option<T>) -> Self {
Self {
rid: None,
err: err.map(Into::into),
}
}
pub fn empty() -> Self {
Self {
rid: None,
err: None,
}
}
}
#[derive(Serialize)]
#[serde(tag = "type", content = "value")]
#[serde(rename_all = "kebab-case")]
pub enum WebGpuError {
Lost,
OutOfMemory,
Validation(String),
Internal,
}
impl From<CreateBufferError> for WebGpuError {
impl From<CreateBufferError> for GPUError {
fn from(err: CreateBufferError) -> Self {
match err {
CreateBufferError::Device(err) => err.into(),
CreateBufferError::AccessError(err) => err.into(),
err => WebGpuError::Validation(fmt_err(&err)),
err => GPUError::Validation(fmt_err(&err)),
}
}
}
impl From<DeviceError> for WebGpuError {
impl From<DeviceError> for GPUError {
fn from(err: DeviceError) -> Self {
match err {
DeviceError::Lost => WebGpuError::Lost,
DeviceError::OutOfMemory => WebGpuError::OutOfMemory,
_ => WebGpuError::Validation(fmt_err(&err)),
DeviceError::Lost => GPUError::Lost,
DeviceError::OutOfMemory => GPUError::OutOfMemory,
_ => GPUError::Validation(fmt_err(&err)),
}
}
}
impl From<BufferAccessError> for WebGpuError {
impl From<BufferAccessError> for GPUError {
fn from(err: BufferAccessError) -> Self {
match err {
BufferAccessError::Device(err) => err.into(),
err => WebGpuError::Validation(fmt_err(&err)),
err => GPUError::Validation(fmt_err(&err)),
}
}
}
impl From<CreateBindGroupLayoutError> for WebGpuError {
impl From<CreateBindGroupLayoutError> for GPUError {
fn from(err: CreateBindGroupLayoutError) -> Self {
match err {
CreateBindGroupLayoutError::Device(err) => err.into(),
err => WebGpuError::Validation(fmt_err(&err)),
err => GPUError::Validation(fmt_err(&err)),
}
}
}
impl From<CreatePipelineLayoutError> for WebGpuError {
impl From<CreatePipelineLayoutError> for GPUError {
fn from(err: CreatePipelineLayoutError) -> Self {
match err {
CreatePipelineLayoutError::Device(err) => err.into(),
err => WebGpuError::Validation(fmt_err(&err)),
err => GPUError::Validation(fmt_err(&err)),
}
}
}
impl From<CreateBindGroupError> for WebGpuError {
impl From<CreateBindGroupError> for GPUError {
fn from(err: CreateBindGroupError) -> Self {
match err {
CreateBindGroupError::Device(err) => err.into(),
err => WebGpuError::Validation(fmt_err(&err)),
err => GPUError::Validation(fmt_err(&err)),
}
}
}
impl From<RenderBundleError> for WebGpuError {
impl From<RenderBundleError> for GPUError {
fn from(err: RenderBundleError) -> Self {
WebGpuError::Validation(fmt_err(&err))
GPUError::Validation(fmt_err(&err))
}
}
impl From<CreateRenderBundleError> for WebGpuError {
impl From<CreateRenderBundleError> for GPUError {
fn from(err: CreateRenderBundleError) -> Self {
WebGpuError::Validation(fmt_err(&err))
GPUError::Validation(fmt_err(&err))
}
}
impl From<CopyError> for WebGpuError {
impl From<CopyError> for GPUError {
fn from(err: CopyError) -> Self {
WebGpuError::Validation(fmt_err(&err))
GPUError::Validation(fmt_err(&err))
}
}
impl From<CommandEncoderError> for WebGpuError {
impl From<CommandEncoderError> for GPUError {
fn from(err: CommandEncoderError) -> Self {
WebGpuError::Validation(fmt_err(&err))
GPUError::Validation(fmt_err(&err))
}
}
impl From<QueryError> for WebGpuError {
impl From<QueryError> for GPUError {
fn from(err: QueryError) -> Self {
WebGpuError::Validation(fmt_err(&err))
GPUError::Validation(fmt_err(&err))
}
}
impl From<ComputePassError> for WebGpuError {
impl From<ComputePassError> for GPUError {
fn from(err: ComputePassError) -> Self {
WebGpuError::Validation(fmt_err(&err))
GPUError::Validation(fmt_err(&err))
}
}
impl From<CreateComputePipelineError> for WebGpuError {
impl From<CreateComputePipelineError> for GPUError {
fn from(err: CreateComputePipelineError) -> Self {
match err {
CreateComputePipelineError::Device(err) => err.into(),
err => WebGpuError::Validation(fmt_err(&err)),
err => GPUError::Validation(fmt_err(&err)),
}
}
}
impl From<GetBindGroupLayoutError> for WebGpuError {
impl From<GetBindGroupLayoutError> for GPUError {
fn from(err: GetBindGroupLayoutError) -> Self {
WebGpuError::Validation(fmt_err(&err))
GPUError::Validation(fmt_err(&err))
}
}
impl From<CreateRenderPipelineError> for WebGpuError {
impl From<CreateRenderPipelineError> for GPUError {
fn from(err: CreateRenderPipelineError) -> Self {
match err {
CreateRenderPipelineError::Device(err) => err.into(),
err => WebGpuError::Validation(fmt_err(&err)),
err => GPUError::Validation(fmt_err(&err)),
}
}
}
impl From<RenderPassError> for WebGpuError {
impl From<RenderPassError> for GPUError {
fn from(err: RenderPassError) -> Self {
WebGpuError::Validation(fmt_err(&err))
GPUError::Validation(fmt_err(&err))
}
}
impl From<CreateSamplerError> for WebGpuError {
impl From<CreateSamplerError> for GPUError {
fn from(err: CreateSamplerError) -> Self {
match err {
CreateSamplerError::Device(err) => err.into(),
err => WebGpuError::Validation(fmt_err(&err)),
err => GPUError::Validation(fmt_err(&err)),
}
}
}
impl From<CreateShaderModuleError> for WebGpuError {
impl From<CreateShaderModuleError> for GPUError {
fn from(err: CreateShaderModuleError) -> Self {
match err {
CreateShaderModuleError::Device(err) => err.into(),
err => WebGpuError::Validation(fmt_err(&err)),
err => GPUError::Validation(fmt_err(&err)),
}
}
}
impl From<CreateTextureError> for WebGpuError {
impl From<CreateTextureError> for GPUError {
fn from(err: CreateTextureError) -> Self {
match err {
CreateTextureError::Device(err) => err.into(),
err => WebGpuError::Validation(fmt_err(&err)),
err => GPUError::Validation(fmt_err(&err)),
}
}
}
impl From<CreateTextureViewError> for WebGpuError {
impl From<CreateTextureViewError> for GPUError {
fn from(err: CreateTextureViewError) -> Self {
WebGpuError::Validation(fmt_err(&err))
GPUError::Validation(fmt_err(&err))
}
}
impl From<CreateQuerySetError> for WebGpuError {
impl From<CreateQuerySetError> for GPUError {
fn from(err: CreateQuerySetError) -> Self {
match err {
CreateQuerySetError::Device(err) => err.into(),
err => WebGpuError::Validation(fmt_err(&err)),
err => GPUError::Validation(fmt_err(&err)),
}
}
}
impl From<QueueSubmitError> for WebGpuError {
impl From<QueueSubmitError> for GPUError {
fn from(err: QueueSubmitError) -> Self {
match err {
QueueSubmitError::Queue(err) => err.into(),
err => WebGpuError::Validation(fmt_err(&err)),
err => GPUError::Validation(fmt_err(&err)),
}
}
}
impl From<QueueWriteError> for WebGpuError {
impl From<QueueWriteError> for GPUError {
fn from(err: QueueWriteError) -> Self {
match err {
QueueWriteError::Queue(err) => err.into(),
err => WebGpuError::Validation(fmt_err(&err)),
err => GPUError::Validation(fmt_err(&err)),
}
}
}
impl From<ClearError> for WebGpuError {
impl From<ClearError> for GPUError {
fn from(err: ClearError) -> Self {
WebGpuError::Validation(fmt_err(&err))
GPUError::Validation(fmt_err(&err))
}
}
impl From<ConfigureSurfaceError> for WebGpuError {
impl From<ConfigureSurfaceError> for GPUError {
fn from(err: ConfigureSurfaceError) -> Self {
WebGpuError::Validation(fmt_err(&err))
GPUError::Validation(fmt_err(&err))
}
}

View file

@ -2,20 +2,41 @@
#![cfg(not(target_arch = "wasm32"))]
#![warn(unsafe_op_in_unsafe_fn)]
use std::borrow::Cow;
use std::cell::RefCell;
use std::collections::HashSet;
use std::rc::Rc;
use std::sync::Arc;
use deno_core::cppgc::SameObject;
use deno_core::op2;
use deno_core::v8;
use deno_core::GarbageCollected;
use deno_core::OpState;
use deno_core::Resource;
use deno_core::ResourceId;
use error::WebGpuResult;
use serde::Deserialize;
use serde::Serialize;
pub use wgpu_core;
pub use wgpu_types;
use wgpu_types::PowerPreference;
mod adapter;
mod bind_group;
mod bind_group_layout;
mod buffer;
mod byow;
mod command_buffer;
mod command_encoder;
mod compute_pass;
mod compute_pipeline;
mod device;
mod error;
mod pipeline_layout;
mod query_set;
mod queue;
mod render_bundle;
mod render_pass;
mod render_pipeline;
mod sampler;
mod shader;
mod surface;
mod texture;
mod webidl;
pub const UNSTABLE_FEATURE_NAME: &str = "webgpu";
@ -38,793 +59,145 @@ pub fn print_linker_flags(name: &str) {
}
}
#[macro_use]
mod macros {
macro_rules! gfx_select {
($id:expr => $p0:ident.$p1:tt.$method:ident $params:tt) => {
gfx_select!($id => {$p0.$p1}, $method $params)
};
($id:expr => $p0:ident.$method:ident $params:tt) => {
gfx_select!($id => {$p0}, $method $params)
};
($id:expr => {$($c:tt)*}, $method:ident $params:tt) => {
match $id.backend() {
#[cfg(any(
all(not(target_arch = "wasm32"), not(target_os = "ios"), not(target_os = "macos")),
feature = "vulkan-portability"
))]
wgpu_types::Backend::Vulkan => $($c)*.$method::<wgpu_core::api::Vulkan> $params,
#[cfg(all(not(target_arch = "wasm32"), any(target_os = "ios", target_os = "macos")))]
wgpu_types::Backend::Metal => $($c)*.$method::<wgpu_core::api::Metal> $params,
#[cfg(all(not(target_arch = "wasm32"), windows))]
wgpu_types::Backend::Dx12 => $($c)*.$method::<wgpu_core::api::Dx12> $params,
#[cfg(any(
all(not(target_os = "macos"), not(target_os = "ios")),
feature = "angle",
target_arch = "wasm32"
))]
wgpu_types::Backend::Gl => $($c)*.$method::<wgpu_core::api::Gles> $params,
other => panic!("Unexpected backend {:?}", other),
}
};
}
macro_rules! gfx_put {
($id:expr => $global:ident.$method:ident( $($param:expr),* ) => $state:expr, $rc:expr) => {{
let (val, maybe_err) = gfx_select!($id => $global.$method($($param),*));
let rid = $state.resource_table.add($rc($global.clone(), val));
Ok(WebGpuResult::rid_err(rid, maybe_err))
}};
}
macro_rules! gfx_ok {
($id:expr => $global:ident.$method:ident( $($param:expr),* )) => {{
let maybe_err = gfx_select!($id => $global.$method($($param),*)).err();
Ok(WebGpuResult::maybe_err(maybe_err))
}};
}
}
pub mod binding;
pub mod buffer;
pub mod bundle;
pub mod byow;
pub mod command_encoder;
pub mod compute_pass;
pub mod error;
pub mod pipeline;
pub mod queue;
pub mod render_pass;
pub mod sampler;
pub mod shader;
pub mod surface;
pub mod texture;
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum InitError {
#[class(inherit)]
#[error(transparent)]
Resource(
#[from]
#[inherit]
deno_core::error::ResourceError,
),
#[class(generic)]
#[error(transparent)]
InvalidAdapter(wgpu_core::instance::InvalidAdapter),
#[class("DOMExceptionOperationError")]
#[error(transparent)]
RequestDevice(wgpu_core::instance::RequestDeviceError),
#[class(generic)]
#[error(transparent)]
InvalidDevice(wgpu_core::device::InvalidDevice),
}
pub type Instance = std::sync::Arc<wgpu_core::global::Global>;
struct WebGpuAdapter(Instance, wgpu_core::id::AdapterId);
impl Resource for WebGpuAdapter {
fn name(&self) -> Cow<str> {
"webGPUAdapter".into()
}
fn close(self: Rc<Self>) {
gfx_select!(self.1 => self.0.adapter_drop(self.1));
}
}
struct WebGpuDevice(Instance, wgpu_core::id::DeviceId);
impl Resource for WebGpuDevice {
fn name(&self) -> Cow<str> {
"webGPUDevice".into()
}
fn close(self: Rc<Self>) {
gfx_select!(self.1 => self.0.device_drop(self.1));
}
}
struct WebGpuQuerySet(Instance, wgpu_core::id::QuerySetId);
impl Resource for WebGpuQuerySet {
fn name(&self) -> Cow<str> {
"webGPUQuerySet".into()
}
fn close(self: Rc<Self>) {
gfx_select!(self.1 => self.0.query_set_drop(self.1));
}
}
pub type Instance = Arc<wgpu_core::global::Global>;
deno_core::extension!(
deno_webgpu,
deps = [deno_webidl, deno_web],
ops = [
// Request device/adapter
op_webgpu_request_adapter,
op_webgpu_request_device,
op_webgpu_request_adapter_info,
// Query Set
op_webgpu_create_query_set,
// buffer
buffer::op_webgpu_create_buffer,
buffer::op_webgpu_buffer_get_mapped_range,
buffer::op_webgpu_buffer_unmap,
// buffer async
buffer::op_webgpu_buffer_get_map_async,
// remaining sync ops
// texture
texture::op_webgpu_create_texture,
texture::op_webgpu_create_texture_view,
// sampler
sampler::op_webgpu_create_sampler,
// binding
binding::op_webgpu_create_bind_group_layout,
binding::op_webgpu_create_pipeline_layout,
binding::op_webgpu_create_bind_group,
// pipeline
pipeline::op_webgpu_create_compute_pipeline,
pipeline::op_webgpu_compute_pipeline_get_bind_group_layout,
pipeline::op_webgpu_create_render_pipeline,
pipeline::op_webgpu_render_pipeline_get_bind_group_layout,
// command_encoder
command_encoder::op_webgpu_create_command_encoder,
command_encoder::op_webgpu_command_encoder_begin_render_pass,
command_encoder::op_webgpu_command_encoder_begin_compute_pass,
command_encoder::op_webgpu_command_encoder_copy_buffer_to_buffer,
command_encoder::op_webgpu_command_encoder_copy_buffer_to_texture,
command_encoder::op_webgpu_command_encoder_copy_texture_to_buffer,
command_encoder::op_webgpu_command_encoder_copy_texture_to_texture,
command_encoder::op_webgpu_command_encoder_clear_buffer,
command_encoder::op_webgpu_command_encoder_push_debug_group,
command_encoder::op_webgpu_command_encoder_pop_debug_group,
command_encoder::op_webgpu_command_encoder_insert_debug_marker,
command_encoder::op_webgpu_command_encoder_write_timestamp,
command_encoder::op_webgpu_command_encoder_resolve_query_set,
command_encoder::op_webgpu_command_encoder_finish,
render_pass::op_webgpu_render_pass_set_viewport,
render_pass::op_webgpu_render_pass_set_scissor_rect,
render_pass::op_webgpu_render_pass_set_blend_constant,
render_pass::op_webgpu_render_pass_set_stencil_reference,
render_pass::op_webgpu_render_pass_begin_occlusion_query,
render_pass::op_webgpu_render_pass_end_occlusion_query,
render_pass::op_webgpu_render_pass_execute_bundles,
render_pass::op_webgpu_render_pass_end,
render_pass::op_webgpu_render_pass_set_bind_group,
render_pass::op_webgpu_render_pass_push_debug_group,
render_pass::op_webgpu_render_pass_pop_debug_group,
render_pass::op_webgpu_render_pass_insert_debug_marker,
render_pass::op_webgpu_render_pass_set_pipeline,
render_pass::op_webgpu_render_pass_set_index_buffer,
render_pass::op_webgpu_render_pass_set_vertex_buffer,
render_pass::op_webgpu_render_pass_draw,
render_pass::op_webgpu_render_pass_draw_indexed,
render_pass::op_webgpu_render_pass_draw_indirect,
render_pass::op_webgpu_render_pass_draw_indexed_indirect,
compute_pass::op_webgpu_compute_pass_set_pipeline,
compute_pass::op_webgpu_compute_pass_dispatch_workgroups,
compute_pass::op_webgpu_compute_pass_dispatch_workgroups_indirect,
compute_pass::op_webgpu_compute_pass_end,
compute_pass::op_webgpu_compute_pass_set_bind_group,
compute_pass::op_webgpu_compute_pass_push_debug_group,
compute_pass::op_webgpu_compute_pass_pop_debug_group,
compute_pass::op_webgpu_compute_pass_insert_debug_marker,
// bundle
bundle::op_webgpu_create_render_bundle_encoder,
bundle::op_webgpu_render_bundle_encoder_finish,
bundle::op_webgpu_render_bundle_encoder_set_bind_group,
bundle::op_webgpu_render_bundle_encoder_push_debug_group,
bundle::op_webgpu_render_bundle_encoder_pop_debug_group,
bundle::op_webgpu_render_bundle_encoder_insert_debug_marker,
bundle::op_webgpu_render_bundle_encoder_set_pipeline,
bundle::op_webgpu_render_bundle_encoder_set_index_buffer,
bundle::op_webgpu_render_bundle_encoder_set_vertex_buffer,
bundle::op_webgpu_render_bundle_encoder_draw,
bundle::op_webgpu_render_bundle_encoder_draw_indexed,
bundle::op_webgpu_render_bundle_encoder_draw_indirect,
// queue
queue::op_webgpu_queue_submit,
queue::op_webgpu_write_buffer,
queue::op_webgpu_write_texture,
// shader
shader::op_webgpu_create_shader_module,
// surface
surface::op_webgpu_surface_configure,
surface::op_webgpu_surface_get_current_texture,
surface::op_webgpu_surface_present,
// byow
byow::op_webgpu_surface_create,
ops = [op_create_gpu],
objects = [
GPU,
adapter::GPUAdapter,
adapter::GPUAdapterInfo,
bind_group::GPUBindGroup,
bind_group_layout::GPUBindGroupLayout,
buffer::GPUBuffer,
command_buffer::GPUCommandBuffer,
command_encoder::GPUCommandEncoder,
compute_pass::GPUComputePassEncoder,
compute_pipeline::GPUComputePipeline,
device::GPUDevice,
device::GPUDeviceLostInfo,
pipeline_layout::GPUPipelineLayout,
query_set::GPUQuerySet,
queue::GPUQueue,
render_bundle::GPURenderBundle,
render_bundle::GPURenderBundleEncoder,
render_pass::GPURenderPassEncoder,
render_pipeline::GPURenderPipeline,
sampler::GPUSampler,
shader::GPUShaderModule,
adapter::GPUSupportedFeatures,
adapter::GPUSupportedLimits,
texture::GPUTexture,
texture::GPUTextureView,
byow::UnsafeWindowSurface,
surface::GPUCanvasContext,
],
esm = ["00_init.js", "02_surface.js"],
lazy_loaded_esm = ["01_webgpu.js"],
);
fn deserialize_features(features: &wgpu_types::Features) -> Vec<&'static str> {
let mut return_features: Vec<&'static str> = vec![];
// api
if features.contains(wgpu_types::Features::DEPTH_CLIP_CONTROL) {
return_features.push("depth-clip-control");
}
if features.contains(wgpu_types::Features::TIMESTAMP_QUERY) {
return_features.push("timestamp-query");
}
if features.contains(wgpu_types::Features::INDIRECT_FIRST_INSTANCE) {
return_features.push("indirect-first-instance");
}
// shader
if features.contains(wgpu_types::Features::SHADER_F16) {
return_features.push("shader-f16");
}
// texture formats
if features.contains(wgpu_types::Features::DEPTH32FLOAT_STENCIL8) {
return_features.push("depth32float-stencil8");
}
if features.contains(wgpu_types::Features::TEXTURE_COMPRESSION_BC) {
return_features.push("texture-compression-bc");
}
if features.contains(wgpu_types::Features::TEXTURE_COMPRESSION_ETC2) {
return_features.push("texture-compression-etc2");
}
if features.contains(wgpu_types::Features::TEXTURE_COMPRESSION_ASTC) {
return_features.push("texture-compression-astc");
}
if features.contains(wgpu_types::Features::RG11B10UFLOAT_RENDERABLE) {
return_features.push("rg11b10ufloat-renderable");
}
if features.contains(wgpu_types::Features::BGRA8UNORM_STORAGE) {
return_features.push("bgra8unorm-storage");
}
if features.contains(wgpu_types::Features::FLOAT32_FILTERABLE) {
return_features.push("float32-filterable");
}
// extended from spec
// texture formats
if features.contains(wgpu_types::Features::TEXTURE_FORMAT_16BIT_NORM) {
return_features.push("texture-format-16-bit-norm");
}
if features.contains(wgpu_types::Features::TEXTURE_COMPRESSION_ASTC_HDR) {
return_features.push("texture-compression-astc-hdr");
}
if features
.contains(wgpu_types::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES)
{
return_features.push("texture-adapter-specific-format-features");
}
// api
if features.contains(wgpu_types::Features::PIPELINE_STATISTICS_QUERY) {
return_features.push("pipeline-statistics-query");
}
if features.contains(wgpu_types::Features::TIMESTAMP_QUERY_INSIDE_PASSES) {
return_features.push("timestamp-query-inside-passes");
}
if features.contains(wgpu_types::Features::MAPPABLE_PRIMARY_BUFFERS) {
return_features.push("mappable-primary-buffers");
}
if features.contains(wgpu_types::Features::TEXTURE_BINDING_ARRAY) {
return_features.push("texture-binding-array");
}
if features.contains(wgpu_types::Features::BUFFER_BINDING_ARRAY) {
return_features.push("buffer-binding-array");
}
if features.contains(wgpu_types::Features::STORAGE_RESOURCE_BINDING_ARRAY) {
return_features.push("storage-resource-binding-array");
}
if features.contains(
wgpu_types::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
) {
return_features.push("sampled-texture-and-storage-buffer-array-non-uniform-indexing");
}
if features.contains(
wgpu_types::Features::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING,
) {
return_features.push("uniform-buffer-and-storage-texture-array-non-uniform-indexing");
}
if features.contains(wgpu_types::Features::PARTIALLY_BOUND_BINDING_ARRAY) {
return_features.push("partially-bound-binding-array");
}
if features.contains(wgpu_types::Features::MULTI_DRAW_INDIRECT) {
return_features.push("multi-draw-indirect");
}
if features.contains(wgpu_types::Features::MULTI_DRAW_INDIRECT_COUNT) {
return_features.push("multi-draw-indirect-count");
}
if features.contains(wgpu_types::Features::PUSH_CONSTANTS) {
return_features.push("push-constants");
}
if features.contains(wgpu_types::Features::ADDRESS_MODE_CLAMP_TO_ZERO) {
return_features.push("address-mode-clamp-to-zero");
}
if features.contains(wgpu_types::Features::ADDRESS_MODE_CLAMP_TO_BORDER) {
return_features.push("address-mode-clamp-to-border");
}
if features.contains(wgpu_types::Features::POLYGON_MODE_LINE) {
return_features.push("polygon-mode-line");
}
if features.contains(wgpu_types::Features::POLYGON_MODE_POINT) {
return_features.push("polygon-mode-point");
}
if features.contains(wgpu_types::Features::CONSERVATIVE_RASTERIZATION) {
return_features.push("conservative-rasterization");
}
if features.contains(wgpu_types::Features::VERTEX_WRITABLE_STORAGE) {
return_features.push("vertex-writable-storage");
}
if features.contains(wgpu_types::Features::CLEAR_TEXTURE) {
return_features.push("clear-texture");
}
if features.contains(wgpu_types::Features::SPIRV_SHADER_PASSTHROUGH) {
return_features.push("spirv-shader-passthrough");
}
if features.contains(wgpu_types::Features::MULTIVIEW) {
return_features.push("multiview");
}
if features.contains(wgpu_types::Features::VERTEX_ATTRIBUTE_64BIT) {
return_features.push("vertex-attribute-64-bit");
}
// shader
if features.contains(wgpu_types::Features::SHADER_F64) {
return_features.push("shader-f64");
}
if features.contains(wgpu_types::Features::SHADER_I16) {
return_features.push("shader-i16");
}
if features.contains(wgpu_types::Features::SHADER_PRIMITIVE_INDEX) {
return_features.push("shader-primitive-index");
}
if features.contains(wgpu_types::Features::SHADER_EARLY_DEPTH_TEST) {
return_features.push("shader-early-depth-test");
}
if features.contains(wgpu_types::Features::SHADER_UNUSED_VERTEX_OUTPUT) {
return_features.push("shader-unused-vertex-output");
}
return_features
}
#[derive(Serialize)]
#[serde(untagged)]
pub enum GpuAdapterResOrErr {
Error { err: String },
Features(GpuAdapterRes),
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
pub struct GpuAdapterRes {
rid: ResourceId,
limits: wgpu_types::Limits,
features: Vec<&'static str>,
is_fallback: bool,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
pub struct GpuDeviceRes {
rid: ResourceId,
queue_rid: ResourceId,
limits: wgpu_types::Limits,
features: Vec<&'static str>,
}
#[op2]
#[serde]
pub fn op_webgpu_request_adapter(
state: Rc<RefCell<OpState>>,
#[serde] power_preference: Option<wgpu_types::PowerPreference>,
force_fallback_adapter: bool,
) -> Result<GpuAdapterResOrErr, InitError> {
let mut state = state.borrow_mut();
let backends = std::env::var("DENO_WEBGPU_BACKEND").map_or_else(
|_| wgpu_types::Backends::all(),
|s| wgpu_core::instance::parse_backends_from_comma_list(&s),
);
let instance = if let Some(instance) = state.try_borrow::<Instance>() {
instance
} else {
state.put(std::sync::Arc::new(wgpu_core::global::Global::new(
"webgpu",
wgpu_types::InstanceDescriptor {
backends,
flags: wgpu_types::InstanceFlags::from_build_config(),
dx12_shader_compiler: wgpu_types::Dx12Compiler::Fxc,
gles_minor_version: wgpu_types::Gles3MinorVersion::default(),
},
)));
state.borrow::<Instance>()
};
let descriptor = wgpu_core::instance::RequestAdapterOptions {
power_preference: power_preference.unwrap_or_default(),
force_fallback_adapter,
compatible_surface: None, // windowless
};
let res = instance.request_adapter(
&descriptor,
wgpu_core::instance::AdapterInputs::Mask(backends, |_| None),
);
let adapter = match res {
Ok(adapter) => adapter,
Err(err) => {
return Ok(GpuAdapterResOrErr::Error {
err: err.to_string(),
})
}
};
let adapter_features =
gfx_select!(adapter => instance.adapter_features(adapter))
.map_err(InitError::InvalidAdapter)?;
let features = deserialize_features(&adapter_features);
let adapter_limits = gfx_select!(adapter => instance.adapter_limits(adapter))
.map_err(InitError::InvalidAdapter)?;
let instance = instance.clone();
let rid = state.resource_table.add(WebGpuAdapter(instance, adapter));
Ok(GpuAdapterResOrErr::Features(GpuAdapterRes {
rid,
features,
limits: adapter_limits,
// TODO(lucacasonato): report correctly from wgpu
is_fallback: false,
}))
}
#[derive(Deserialize)]
pub struct GpuRequiredFeatures(HashSet<String>);
impl From<GpuRequiredFeatures> for wgpu_types::Features {
fn from(required_features: GpuRequiredFeatures) -> wgpu_types::Features {
let mut features: wgpu_types::Features = wgpu_types::Features::empty();
// api
features.set(
wgpu_types::Features::DEPTH_CLIP_CONTROL,
required_features.0.contains("depth-clip-control"),
);
features.set(
wgpu_types::Features::TIMESTAMP_QUERY,
required_features.0.contains("timestamp-query"),
);
features.set(
wgpu_types::Features::INDIRECT_FIRST_INSTANCE,
required_features.0.contains("indirect-first-instance"),
);
// shader
features.set(
wgpu_types::Features::SHADER_F16,
required_features.0.contains("shader-f16"),
);
// texture formats
features.set(
wgpu_types::Features::DEPTH32FLOAT_STENCIL8,
required_features.0.contains("depth32float-stencil8"),
);
features.set(
wgpu_types::Features::TEXTURE_COMPRESSION_BC,
required_features.0.contains("texture-compression-bc"),
);
features.set(
wgpu_types::Features::TEXTURE_COMPRESSION_ETC2,
required_features.0.contains("texture-compression-etc2"),
);
features.set(
wgpu_types::Features::TEXTURE_COMPRESSION_ASTC,
required_features.0.contains("texture-compression-astc"),
);
features.set(
wgpu_types::Features::RG11B10UFLOAT_RENDERABLE,
required_features.0.contains("rg11b10ufloat-renderable"),
);
features.set(
wgpu_types::Features::BGRA8UNORM_STORAGE,
required_features.0.contains("bgra8unorm-storage"),
);
features.set(
wgpu_types::Features::FLOAT32_FILTERABLE,
required_features.0.contains("float32-filterable"),
);
// extended from spec
// texture formats
features.set(
wgpu_types::Features::TEXTURE_FORMAT_16BIT_NORM,
required_features.0.contains("texture-format-16-bit-norm"),
);
features.set(
wgpu_types::Features::TEXTURE_COMPRESSION_ASTC_HDR,
required_features.0.contains("texture-compression-astc-hdr"),
);
features.set(
wgpu_types::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES,
required_features
.0
.contains("texture-adapter-specific-format-features"),
);
// api
features.set(
wgpu_types::Features::PIPELINE_STATISTICS_QUERY,
required_features.0.contains("pipeline-statistics-query"),
);
features.set(
wgpu_types::Features::TIMESTAMP_QUERY_INSIDE_PASSES,
required_features
.0
.contains("timestamp-query-inside-passes"),
);
features.set(
wgpu_types::Features::MAPPABLE_PRIMARY_BUFFERS,
required_features.0.contains("mappable-primary-buffers"),
);
features.set(
wgpu_types::Features::TEXTURE_BINDING_ARRAY,
required_features.0.contains("texture-binding-array"),
);
features.set(
wgpu_types::Features::BUFFER_BINDING_ARRAY,
required_features.0.contains("buffer-binding-array"),
);
features.set(
wgpu_types::Features::STORAGE_RESOURCE_BINDING_ARRAY,
required_features
.0
.contains("storage-resource-binding-array"),
);
features.set(
wgpu_types::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
required_features
.0
.contains("sampled-texture-and-storage-buffer-array-non-uniform-indexing"),
);
features.set(
wgpu_types::Features::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING,
required_features
.0
.contains("uniform-buffer-and-storage-texture-array-non-uniform-indexing"),
);
features.set(
wgpu_types::Features::PARTIALLY_BOUND_BINDING_ARRAY,
required_features
.0
.contains("partially-bound-binding-array"),
);
features.set(
wgpu_types::Features::MULTI_DRAW_INDIRECT,
required_features.0.contains("multi-draw-indirect"),
);
features.set(
wgpu_types::Features::MULTI_DRAW_INDIRECT_COUNT,
required_features.0.contains("multi-draw-indirect-count"),
);
features.set(
wgpu_types::Features::PUSH_CONSTANTS,
required_features.0.contains("push-constants"),
);
features.set(
wgpu_types::Features::ADDRESS_MODE_CLAMP_TO_ZERO,
required_features.0.contains("address-mode-clamp-to-zero"),
);
features.set(
wgpu_types::Features::ADDRESS_MODE_CLAMP_TO_BORDER,
required_features.0.contains("address-mode-clamp-to-border"),
);
features.set(
wgpu_types::Features::POLYGON_MODE_LINE,
required_features.0.contains("polygon-mode-line"),
);
features.set(
wgpu_types::Features::POLYGON_MODE_POINT,
required_features.0.contains("polygon-mode-point"),
);
features.set(
wgpu_types::Features::CONSERVATIVE_RASTERIZATION,
required_features.0.contains("conservative-rasterization"),
);
features.set(
wgpu_types::Features::VERTEX_WRITABLE_STORAGE,
required_features.0.contains("vertex-writable-storage"),
);
features.set(
wgpu_types::Features::CLEAR_TEXTURE,
required_features.0.contains("clear-texture"),
);
features.set(
wgpu_types::Features::SPIRV_SHADER_PASSTHROUGH,
required_features.0.contains("spirv-shader-passthrough"),
);
features.set(
wgpu_types::Features::MULTIVIEW,
required_features.0.contains("multiview"),
);
features.set(
wgpu_types::Features::VERTEX_ATTRIBUTE_64BIT,
required_features.0.contains("vertex-attribute-64-bit"),
);
// shader
features.set(
wgpu_types::Features::SHADER_F64,
required_features.0.contains("shader-f64"),
);
features.set(
wgpu_types::Features::SHADER_I16,
required_features.0.contains("shader-i16"),
);
features.set(
wgpu_types::Features::SHADER_PRIMITIVE_INDEX,
required_features.0.contains("shader-primitive-index"),
);
features.set(
wgpu_types::Features::SHADER_EARLY_DEPTH_TEST,
required_features.0.contains("shader-early-depth-test"),
);
features.set(
wgpu_types::Features::SHADER_UNUSED_VERTEX_OUTPUT,
required_features.0.contains("shader-unused-vertex-output"),
);
features
}
}
#[op2]
#[serde]
pub fn op_webgpu_request_device(
state: Rc<RefCell<OpState>>,
#[smi] adapter_rid: ResourceId,
#[string] label: String,
#[serde] required_features: GpuRequiredFeatures,
#[serde] required_limits: Option<wgpu_types::Limits>,
) -> Result<GpuDeviceRes, InitError> {
let mut state = state.borrow_mut();
let adapter_resource =
state.resource_table.take::<WebGpuAdapter>(adapter_rid)?;
let adapter = adapter_resource.1;
let instance = state.borrow::<Instance>();
let descriptor = wgpu_types::DeviceDescriptor {
label: Some(Cow::Owned(label)),
required_features: required_features.into(),
required_limits: required_limits.unwrap_or_default(),
};
let (device, queue, maybe_err) = gfx_select!(adapter => instance.adapter_request_device(
adapter,
&descriptor,
std::env::var("DENO_WEBGPU_TRACE").ok().as_ref().map(std::path::Path::new),
None,
None
));
adapter_resource.close();
if let Some(err) = maybe_err {
return Err(InitError::RequestDevice(err));
}
let device_features = gfx_select!(device => instance.device_features(device))
.map_err(InitError::InvalidDevice)?;
let features = deserialize_features(&device_features);
let limits = gfx_select!(device => instance.device_limits(device))
.map_err(InitError::InvalidDevice)?;
let instance = instance.clone();
let instance2 = instance.clone();
let rid = state.resource_table.add(WebGpuDevice(instance, device));
let queue_rid = state
.resource_table
.add(queue::WebGpuQueue(instance2, queue));
Ok(GpuDeviceRes {
rid,
queue_rid,
features,
limits,
})
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
pub struct GPUAdapterInfo {
vendor: String,
architecture: String,
device: String,
description: String,
}
#[op2]
#[serde]
pub fn op_webgpu_request_adapter_info(
state: Rc<RefCell<OpState>>,
#[smi] adapter_rid: ResourceId,
) -> Result<GPUAdapterInfo, InitError> {
let state = state.borrow_mut();
let adapter_resource =
state.resource_table.get::<WebGpuAdapter>(adapter_rid)?;
let adapter = adapter_resource.1;
let instance = state.borrow::<Instance>();
let info = gfx_select!(adapter => instance.adapter_get_info(adapter))
.map_err(InitError::InvalidAdapter)?;
Ok(GPUAdapterInfo {
vendor: info.vendor.to_string(),
architecture: String::new(), // TODO(#2170)
device: info.device.to_string(),
description: info.name,
})
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CreateQuerySetArgs {
device_rid: ResourceId,
label: String,
#[serde(flatten)]
r#type: GpuQueryType,
count: u32,
}
#[derive(Deserialize)]
#[serde(rename_all = "kebab-case", tag = "type")]
enum GpuQueryType {
Occlusion,
Timestamp,
}
impl From<GpuQueryType> for wgpu_types::QueryType {
fn from(query_type: GpuQueryType) -> Self {
match query_type {
GpuQueryType::Occlusion => wgpu_types::QueryType::Occlusion,
GpuQueryType::Timestamp => wgpu_types::QueryType::Timestamp,
}
}
}
#[op2]
#[serde]
pub fn op_webgpu_create_query_set(
#[cppgc]
pub fn op_create_gpu(
state: &mut OpState,
#[serde] args: CreateQuerySetArgs,
) -> Result<WebGpuResult, InitError> {
let device_resource =
state.resource_table.get::<WebGpuDevice>(args.device_rid)?;
let device = device_resource.1;
let instance = state.borrow::<Instance>();
let descriptor = wgpu_types::QuerySetDescriptor {
label: Some(Cow::Owned(args.label)),
ty: args.r#type.into(),
count: args.count,
};
gfx_put!(device => instance.device_create_query_set(
device,
&descriptor,
None
) => state, WebGpuQuerySet)
scope: &mut v8::HandleScope,
webidl_brand: v8::Local<v8::Value>,
set_event_target_data: v8::Local<v8::Value>,
error_event_class: v8::Local<v8::Value>,
) -> GPU {
state.put(EventTargetSetup {
brand: v8::Global::new(scope, webidl_brand),
set_event_target_data: v8::Global::new(scope, set_event_target_data),
});
state.put(ErrorEventClass(v8::Global::new(scope, error_event_class)));
GPU
}
struct EventTargetSetup {
brand: v8::Global<v8::Value>,
set_event_target_data: v8::Global<v8::Value>,
}
struct ErrorEventClass(v8::Global<v8::Value>);
pub struct GPU;
impl GarbageCollected for GPU {}
#[op2]
impl GPU {
#[async_method]
#[cppgc]
async fn request_adapter(
&self,
state: Rc<RefCell<OpState>>,
#[webidl] options: adapter::GPURequestAdapterOptions,
) -> Option<adapter::GPUAdapter> {
let mut state = state.borrow_mut();
let backends = std::env::var("DENO_WEBGPU_BACKEND").map_or_else(
|_| wgpu_types::Backends::all(),
|s| wgpu_types::Backends::from_comma_list(&s),
);
let instance = if let Some(instance) = state.try_borrow::<Instance>() {
instance
} else {
state.put(Arc::new(wgpu_core::global::Global::new(
"webgpu",
&wgpu_types::InstanceDescriptor {
backends,
flags: wgpu_types::InstanceFlags::from_build_config(),
backend_options: wgpu_types::BackendOptions {
dx12: wgpu_types::Dx12BackendOptions {
shader_compiler: wgpu_types::Dx12Compiler::Fxc,
},
gl: wgpu_types::GlBackendOptions::default(),
},
},
)));
state.borrow::<Instance>()
};
let descriptor = wgpu_core::instance::RequestAdapterOptions {
power_preference: options
.power_preference
.map(|pp| match pp {
adapter::GPUPowerPreference::LowPower => PowerPreference::LowPower,
adapter::GPUPowerPreference::HighPerformance => {
PowerPreference::HighPerformance
}
})
.unwrap_or_default(),
force_fallback_adapter: options.force_fallback_adapter,
compatible_surface: None, // windowless
};
let id = instance.request_adapter(&descriptor, backends, None).ok()?;
Some(adapter::GPUAdapter {
instance: instance.clone(),
features: SameObject::new(),
limits: SameObject::new(),
info: Rc::new(SameObject::new()),
id,
})
}
#[string]
fn getPreferredCanvasFormat(&self) -> &'static str {
// https://github.com/mozilla/gecko-dev/blob/b75080bb8b11844d18cb5f9ac6e68a866ef8e243/dom/webgpu/Instance.h#L42-L47
if cfg!(target_os = "android") {
texture::GPUTextureFormat::Rgba8unorm.as_str()
} else {
texture::GPUTextureFormat::Bgra8unorm.as_str()
}
}
}
fn transform_label<'a>(label: String) -> Option<std::borrow::Cow<'a, str>> {
if label.is_empty() {
None
} else {
Some(std::borrow::Cow::Owned(label))
}
}

View file

@ -1,460 +0,0 @@
// Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::collections::HashMap;
use std::rc::Rc;
use deno_core::error::ResourceError;
use deno_core::op2;
use deno_core::OpState;
use deno_core::Resource;
use deno_core::ResourceId;
use serde::Deserialize;
use serde::Serialize;
use super::error::WebGpuError;
use super::error::WebGpuResult;
const MAX_BIND_GROUPS: usize = 8;
pub(crate) struct WebGpuPipelineLayout(
pub(crate) crate::Instance,
pub(crate) wgpu_core::id::PipelineLayoutId,
);
impl Resource for WebGpuPipelineLayout {
fn name(&self) -> Cow<str> {
"webGPUPipelineLayout".into()
}
fn close(self: Rc<Self>) {
gfx_select!(self.1 => self.0.pipeline_layout_drop(self.1));
}
}
pub(crate) struct WebGpuComputePipeline(
pub(crate) crate::Instance,
pub(crate) wgpu_core::id::ComputePipelineId,
);
impl Resource for WebGpuComputePipeline {
fn name(&self) -> Cow<str> {
"webGPUComputePipeline".into()
}
fn close(self: Rc<Self>) {
gfx_select!(self.1 => self.0.compute_pipeline_drop(self.1));
}
}
pub(crate) struct WebGpuRenderPipeline(
pub(crate) crate::Instance,
pub(crate) wgpu_core::id::RenderPipelineId,
);
impl Resource for WebGpuRenderPipeline {
fn name(&self) -> Cow<str> {
"webGPURenderPipeline".into()
}
fn close(self: Rc<Self>) {
gfx_select!(self.1 => self.0.render_pipeline_drop(self.1));
}
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum GPUAutoLayoutMode {
Auto,
}
#[derive(Deserialize)]
#[serde(untagged)]
pub enum GPUPipelineLayoutOrGPUAutoLayoutMode {
Layout(ResourceId),
Auto(GPUAutoLayoutMode),
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct GpuProgrammableStage {
module: ResourceId,
entry_point: Option<String>,
constants: Option<HashMap<String, f64>>,
}
#[op2]
#[serde]
pub fn op_webgpu_create_compute_pipeline(
state: &mut OpState,
#[smi] device_rid: ResourceId,
#[string] label: Cow<str>,
#[serde] layout: GPUPipelineLayoutOrGPUAutoLayoutMode,
#[serde] compute: GpuProgrammableStage,
) -> Result<WebGpuResult, ResourceError> {
let instance = state.borrow::<super::Instance>();
let device_resource = state
.resource_table
.get::<super::WebGpuDevice>(device_rid)?;
let device = device_resource.1;
let pipeline_layout = match layout {
GPUPipelineLayoutOrGPUAutoLayoutMode::Layout(rid) => {
let id = state.resource_table.get::<WebGpuPipelineLayout>(rid)?;
Some(id.1)
}
GPUPipelineLayoutOrGPUAutoLayoutMode::Auto(GPUAutoLayoutMode::Auto) => None,
};
let compute_shader_module_resource =
state
.resource_table
.get::<super::shader::WebGpuShaderModule>(compute.module)?;
let descriptor = wgpu_core::pipeline::ComputePipelineDescriptor {
label: Some(label),
layout: pipeline_layout,
stage: wgpu_core::pipeline::ProgrammableStageDescriptor {
module: compute_shader_module_resource.1,
entry_point: compute.entry_point.map(Cow::from),
constants: Cow::Owned(compute.constants.unwrap_or_default()),
zero_initialize_workgroup_memory: true,
},
};
let implicit_pipelines = match layout {
GPUPipelineLayoutOrGPUAutoLayoutMode::Layout(_) => None,
GPUPipelineLayoutOrGPUAutoLayoutMode::Auto(GPUAutoLayoutMode::Auto) => {
Some(wgpu_core::device::ImplicitPipelineIds {
root_id: None,
group_ids: &[None; MAX_BIND_GROUPS],
})
}
};
let (compute_pipeline, maybe_err) = gfx_select!(device => instance.device_create_compute_pipeline(
device,
&descriptor,
None,
implicit_pipelines
));
let rid = state
.resource_table
.add(WebGpuComputePipeline(instance.clone(), compute_pipeline));
Ok(WebGpuResult::rid_err(rid, maybe_err))
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
pub struct PipelineLayout {
rid: ResourceId,
label: String,
err: Option<WebGpuError>,
}
#[op2]
#[serde]
pub fn op_webgpu_compute_pipeline_get_bind_group_layout(
state: &mut OpState,
#[smi] compute_pipeline_rid: ResourceId,
index: u32,
) -> Result<PipelineLayout, ResourceError> {
let instance = state.borrow::<super::Instance>();
let compute_pipeline_resource = state
.resource_table
.get::<WebGpuComputePipeline>(compute_pipeline_rid)?;
let compute_pipeline = compute_pipeline_resource.1;
let (bind_group_layout, maybe_err) = gfx_select!(compute_pipeline => instance.compute_pipeline_get_bind_group_layout(compute_pipeline, index, None));
let label = gfx_select!(bind_group_layout => instance.bind_group_layout_label(bind_group_layout));
let rid = state
.resource_table
.add(super::binding::WebGpuBindGroupLayout(
instance.clone(),
bind_group_layout,
));
Ok(PipelineLayout {
rid,
label,
err: maybe_err.map(WebGpuError::from),
})
}
#[derive(Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum GpuCullMode {
None,
Front,
Back,
}
impl From<GpuCullMode> for Option<wgpu_types::Face> {
fn from(value: GpuCullMode) -> Option<wgpu_types::Face> {
match value {
GpuCullMode::None => None,
GpuCullMode::Front => Some(wgpu_types::Face::Front),
GpuCullMode::Back => Some(wgpu_types::Face::Back),
}
}
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct GpuPrimitiveState {
topology: wgpu_types::PrimitiveTopology,
strip_index_format: Option<wgpu_types::IndexFormat>,
front_face: wgpu_types::FrontFace,
cull_mode: GpuCullMode,
unclipped_depth: bool,
}
impl From<GpuPrimitiveState> for wgpu_types::PrimitiveState {
fn from(value: GpuPrimitiveState) -> wgpu_types::PrimitiveState {
wgpu_types::PrimitiveState {
topology: value.topology,
strip_index_format: value.strip_index_format,
front_face: value.front_face,
cull_mode: value.cull_mode.into(),
unclipped_depth: value.unclipped_depth,
polygon_mode: Default::default(), // native-only
conservative: false, // native-only
}
}
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct GpuDepthStencilState {
format: wgpu_types::TextureFormat,
depth_write_enabled: bool,
depth_compare: wgpu_types::CompareFunction,
stencil_front: wgpu_types::StencilFaceState,
stencil_back: wgpu_types::StencilFaceState,
stencil_read_mask: u32,
stencil_write_mask: u32,
depth_bias: i32,
depth_bias_slope_scale: f32,
depth_bias_clamp: f32,
}
impl From<GpuDepthStencilState> for wgpu_types::DepthStencilState {
fn from(state: GpuDepthStencilState) -> wgpu_types::DepthStencilState {
wgpu_types::DepthStencilState {
format: state.format,
depth_write_enabled: state.depth_write_enabled,
depth_compare: state.depth_compare,
stencil: wgpu_types::StencilState {
front: state.stencil_front,
back: state.stencil_back,
read_mask: state.stencil_read_mask,
write_mask: state.stencil_write_mask,
},
bias: wgpu_types::DepthBiasState {
constant: state.depth_bias,
slope_scale: state.depth_bias_slope_scale,
clamp: state.depth_bias_clamp,
},
}
}
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct GpuVertexBufferLayout {
array_stride: u64,
step_mode: wgpu_types::VertexStepMode,
attributes: Vec<wgpu_types::VertexAttribute>,
}
impl<'a> From<GpuVertexBufferLayout>
for wgpu_core::pipeline::VertexBufferLayout<'a>
{
fn from(
layout: GpuVertexBufferLayout,
) -> wgpu_core::pipeline::VertexBufferLayout<'a> {
wgpu_core::pipeline::VertexBufferLayout {
array_stride: layout.array_stride,
step_mode: layout.step_mode,
attributes: Cow::Owned(layout.attributes),
}
}
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct GpuVertexState {
module: ResourceId,
entry_point: Option<String>,
constants: Option<HashMap<String, f64>>,
buffers: Vec<Option<GpuVertexBufferLayout>>,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct GpuMultisampleState {
count: u32,
mask: u64,
alpha_to_coverage_enabled: bool,
}
impl From<GpuMultisampleState> for wgpu_types::MultisampleState {
fn from(gms: GpuMultisampleState) -> wgpu_types::MultisampleState {
wgpu_types::MultisampleState {
count: gms.count,
mask: gms.mask,
alpha_to_coverage_enabled: gms.alpha_to_coverage_enabled,
}
}
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct GpuFragmentState {
targets: Vec<Option<wgpu_types::ColorTargetState>>,
module: u32,
entry_point: Option<String>,
constants: Option<HashMap<String, f64>>,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CreateRenderPipelineArgs {
device_rid: ResourceId,
label: String,
layout: GPUPipelineLayoutOrGPUAutoLayoutMode,
vertex: GpuVertexState,
primitive: GpuPrimitiveState,
depth_stencil: Option<GpuDepthStencilState>,
multisample: wgpu_types::MultisampleState,
fragment: Option<GpuFragmentState>,
}
#[op2]
#[serde]
pub fn op_webgpu_create_render_pipeline(
state: &mut OpState,
#[serde] args: CreateRenderPipelineArgs,
) -> Result<WebGpuResult, ResourceError> {
let instance = state.borrow::<super::Instance>();
let device_resource = state
.resource_table
.get::<super::WebGpuDevice>(args.device_rid)?;
let device = device_resource.1;
let layout = match args.layout {
GPUPipelineLayoutOrGPUAutoLayoutMode::Layout(rid) => {
let pipeline_layout_resource =
state.resource_table.get::<WebGpuPipelineLayout>(rid)?;
Some(pipeline_layout_resource.1)
}
GPUPipelineLayoutOrGPUAutoLayoutMode::Auto(GPUAutoLayoutMode::Auto) => None,
};
let vertex_shader_module_resource =
state
.resource_table
.get::<super::shader::WebGpuShaderModule>(args.vertex.module)?;
let fragment = if let Some(fragment) = args.fragment {
let fragment_shader_module_resource =
state
.resource_table
.get::<super::shader::WebGpuShaderModule>(fragment.module)?;
Some(wgpu_core::pipeline::FragmentState {
stage: wgpu_core::pipeline::ProgrammableStageDescriptor {
module: fragment_shader_module_resource.1,
entry_point: fragment.entry_point.map(Cow::from),
constants: Cow::Owned(fragment.constants.unwrap_or_default()),
// Required to be true for WebGPU
zero_initialize_workgroup_memory: true,
},
targets: Cow::Owned(fragment.targets),
})
} else {
None
};
let vertex_buffers = args
.vertex
.buffers
.into_iter()
.flatten()
.map(Into::into)
.collect();
let descriptor = wgpu_core::pipeline::RenderPipelineDescriptor {
label: Some(Cow::Owned(args.label)),
layout,
vertex: wgpu_core::pipeline::VertexState {
stage: wgpu_core::pipeline::ProgrammableStageDescriptor {
module: vertex_shader_module_resource.1,
entry_point: args.vertex.entry_point.map(Cow::Owned),
constants: Cow::Owned(args.vertex.constants.unwrap_or_default()),
// Required to be true for WebGPU
zero_initialize_workgroup_memory: true,
},
buffers: Cow::Owned(vertex_buffers),
},
primitive: args.primitive.into(),
depth_stencil: args.depth_stencil.map(Into::into),
multisample: args.multisample,
fragment,
multiview: None,
};
let implicit_pipelines = match args.layout {
GPUPipelineLayoutOrGPUAutoLayoutMode::Layout(_) => None,
GPUPipelineLayoutOrGPUAutoLayoutMode::Auto(GPUAutoLayoutMode::Auto) => {
Some(wgpu_core::device::ImplicitPipelineIds {
root_id: None,
group_ids: &[None; MAX_BIND_GROUPS],
})
}
};
let (render_pipeline, maybe_err) = gfx_select!(device => instance.device_create_render_pipeline(
device,
&descriptor,
None,
implicit_pipelines
));
let rid = state
.resource_table
.add(WebGpuRenderPipeline(instance.clone(), render_pipeline));
Ok(WebGpuResult::rid_err(rid, maybe_err))
}
#[op2]
#[serde]
pub fn op_webgpu_render_pipeline_get_bind_group_layout(
state: &mut OpState,
#[smi] render_pipeline_rid: ResourceId,
index: u32,
) -> Result<PipelineLayout, ResourceError> {
let instance = state.borrow::<super::Instance>();
let render_pipeline_resource = state
.resource_table
.get::<WebGpuRenderPipeline>(render_pipeline_rid)?;
let render_pipeline = render_pipeline_resource.1;
let (bind_group_layout, maybe_err) = gfx_select!(render_pipeline => instance.render_pipeline_get_bind_group_layout(render_pipeline, index, None));
let label = gfx_select!(bind_group_layout => instance.bind_group_layout_label(bind_group_layout));
let rid = state
.resource_table
.add(super::binding::WebGpuBindGroupLayout(
instance.clone(),
bind_group_layout,
));
Ok(PipelineLayout {
rid,
label,
err: maybe_err.map(WebGpuError::from),
})
}

View file

@ -0,0 +1,51 @@
// Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::cppgc::Ptr;
use deno_core::op2;
use deno_core::webidl::WebIdlInterfaceConverter;
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use crate::Instance;
pub struct GPUPipelineLayout {
pub instance: Instance,
pub id: wgpu_core::id::PipelineLayoutId,
pub label: String,
}
impl Drop for GPUPipelineLayout {
fn drop(&mut self) {
self.instance.pipeline_layout_drop(self.id);
}
}
impl WebIdlInterfaceConverter for GPUPipelineLayout {
const NAME: &'static str = "GPUPipelineLayout";
}
impl GarbageCollected for GPUPipelineLayout {}
#[op2]
impl GPUPipelineLayout {
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUPipelineLayoutDescriptor {
#[webidl(default = String::new())]
pub label: String,
pub bind_group_layouts:
Vec<Ptr<super::bind_group_layout::GPUBindGroupLayout>>,
}

87
ext/webgpu/query_set.rs Normal file
View file

@ -0,0 +1,87 @@
// Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::op2;
use deno_core::webidl::WebIdlInterfaceConverter;
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_error::JsErrorBox;
use crate::Instance;
pub struct GPUQuerySet {
pub instance: Instance,
pub id: wgpu_core::id::QuerySetId,
pub r#type: GPUQueryType,
pub count: u32,
pub label: String,
}
impl Drop for GPUQuerySet {
fn drop(&mut self) {
self.instance.query_set_drop(self.id);
}
}
impl WebIdlInterfaceConverter for GPUQuerySet {
const NAME: &'static str = "GPUQuerySet";
}
impl GarbageCollected for GPUQuerySet {}
#[op2]
impl GPUQuerySet {
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
#[fast]
fn destroy(&self) -> Result<(), JsErrorBox> {
Err(JsErrorBox::generic(
"This operation is currently not supported",
))
}
#[getter]
#[string]
fn r#type(&self) -> &'static str {
self.r#type.as_str()
}
#[getter]
fn count(&self) -> u32 {
self.count
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUQuerySetDescriptor {
#[webidl(default = String::new())]
pub label: String,
pub r#type: GPUQueryType,
#[options(enforce_range = true)]
pub count: u32,
}
#[derive(WebIDL, Clone)]
#[webidl(enum)]
pub(crate) enum GPUQueryType {
Occlusion,
Timestamp,
}
impl From<GPUQueryType> for wgpu_types::QueryType {
fn from(value: GPUQueryType) -> Self {
match value {
GPUQueryType::Occlusion => Self::Occlusion,
GPUQueryType::Timestamp => Self::Timestamp,
}
}
}

View file

@ -1,144 +1,165 @@
// Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::rc::Rc;
use deno_core::error::ResourceError;
use deno_core::cppgc::Ptr;
use deno_core::op2;
use deno_core::OpState;
use deno_core::Resource;
use deno_core::ResourceId;
use serde::Deserialize;
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_error::JsErrorBox;
use super::error::WebGpuResult;
use crate::command_encoder::WebGpuCommandBuffer;
use crate::buffer::GPUBuffer;
use crate::command_buffer::GPUCommandBuffer;
use crate::texture::GPUTexture;
use crate::texture::GPUTextureAspect;
use crate::webidl::GPUExtent3D;
use crate::webidl::GPUOrigin3D;
use crate::Instance;
pub struct WebGpuQueue(pub Instance, pub wgpu_core::id::QueueId);
impl Resource for WebGpuQueue {
fn name(&self) -> Cow<str> {
"webGPUQueue".into()
}
pub struct GPUQueue {
pub instance: Instance,
pub error_handler: super::error::ErrorHandler,
fn close(self: Rc<Self>) {
gfx_select!(self.1 => self.0.queue_drop(self.1));
pub label: String,
pub id: wgpu_core::id::QueueId,
}
impl Drop for GPUQueue {
fn drop(&mut self) {
self.instance.queue_drop(self.id);
}
}
impl GarbageCollected for GPUQueue {}
#[op2]
#[serde]
pub fn op_webgpu_queue_submit(
state: &mut OpState,
#[smi] queue_rid: ResourceId,
#[serde] command_buffers: Vec<ResourceId>,
) -> Result<WebGpuResult, ResourceError> {
let instance = state.borrow::<Instance>();
let queue_resource = state.resource_table.get::<WebGpuQueue>(queue_rid)?;
let queue = queue_resource.1;
let ids = command_buffers
.iter()
.map(|rid| {
let buffer_resource =
state.resource_table.get::<WebGpuCommandBuffer>(*rid)?;
let mut id = buffer_resource.1.borrow_mut();
Ok(id.take().unwrap())
})
.collect::<Result<Vec<_>, ResourceError>>()?;
let maybe_err =
gfx_select!(queue => instance.queue_submit(queue, &ids)).err();
for rid in command_buffers {
let resource = state.resource_table.take::<WebGpuCommandBuffer>(rid)?;
resource.close();
impl GPUQueue {
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
Ok(WebGpuResult::maybe_err(maybe_err))
#[required(1)]
fn submit(
&self,
#[webidl] command_buffers: Vec<Ptr<GPUCommandBuffer>>,
) -> Result<(), JsErrorBox> {
let ids = command_buffers
.into_iter()
.enumerate()
.map(|(i, cb)| {
if cb.consumed.set(()).is_err() {
Err(JsErrorBox::type_error(format!(
"The command buffer at position {i} has already been submitted."
)))
} else {
Ok(cb.id)
}
})
.collect::<Result<Vec<_>, _>>()?;
let err = self.instance.queue_submit(self.id, &ids).err();
if let Some((_, err)) = err {
self.error_handler.push_error(Some(err));
}
Ok(())
}
#[async_method]
async fn on_submitted_work_done(&self) -> Result<(), JsErrorBox> {
Err(JsErrorBox::generic(
"This operation is currently not supported",
))
}
#[required(3)]
fn write_buffer(
&self,
#[webidl] buffer: Ptr<GPUBuffer>,
#[webidl(options(enforce_range = true))] buffer_offset: u64,
#[anybuffer] buf: &[u8],
#[webidl(default = 0, options(enforce_range = true))] data_offset: u64,
#[webidl(options(enforce_range = true))] size: Option<u64>,
) {
let data = match size {
Some(size) => {
&buf[(data_offset as usize)..((data_offset + size) as usize)]
}
None => &buf[(data_offset as usize)..],
};
let err = self
.instance
.queue_write_buffer(self.id, buffer.id, buffer_offset, data)
.err();
self.error_handler.push_error(err);
}
#[required(4)]
fn write_texture(
&self,
#[webidl] destination: GPUTexelCopyTextureInfo,
#[anybuffer] buf: &[u8],
#[webidl] data_layout: GPUTexelCopyBufferLayout,
#[webidl] size: GPUExtent3D,
) {
let destination = wgpu_core::command::TexelCopyTextureInfo {
texture: destination.texture.id,
mip_level: destination.mip_level,
origin: destination.origin.into(),
aspect: destination.aspect.into(),
};
let data_layout = wgpu_types::TexelCopyBufferLayout {
offset: data_layout.offset,
bytes_per_row: data_layout.bytes_per_row,
rows_per_image: data_layout.rows_per_image,
};
let err = self
.instance
.queue_write_texture(
self.id,
&destination,
buf,
&data_layout,
&size.into(),
)
.err();
self.error_handler.push_error(err);
}
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct GpuImageDataLayout {
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUTexelCopyTextureInfo {
pub texture: Ptr<GPUTexture>,
#[webidl(default = 0)]
#[options(enforce_range = true)]
pub mip_level: u32,
#[webidl(default = Default::default())]
pub origin: GPUOrigin3D,
#[webidl(default = GPUTextureAspect::All)]
pub aspect: GPUTextureAspect,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
struct GPUTexelCopyBufferLayout {
#[webidl(default = 0)]
#[options(enforce_range = true)]
offset: u64,
#[options(enforce_range = true)]
bytes_per_row: Option<u32>,
#[options(enforce_range = true)]
rows_per_image: Option<u32>,
}
impl From<GpuImageDataLayout> for wgpu_types::ImageDataLayout {
fn from(layout: GpuImageDataLayout) -> Self {
wgpu_types::ImageDataLayout {
offset: layout.offset,
bytes_per_row: layout.bytes_per_row,
rows_per_image: layout.rows_per_image,
}
}
}
#[op2]
#[serde]
pub fn op_webgpu_write_buffer(
state: &mut OpState,
#[smi] queue_rid: ResourceId,
#[smi] buffer: ResourceId,
#[number] buffer_offset: u64,
#[number] data_offset: usize,
#[number] size: Option<usize>,
#[buffer] buf: &[u8],
) -> Result<WebGpuResult, ResourceError> {
let instance = state.borrow::<Instance>();
let buffer_resource = state
.resource_table
.get::<super::buffer::WebGpuBuffer>(buffer)?;
let buffer = buffer_resource.1;
let queue_resource = state.resource_table.get::<WebGpuQueue>(queue_rid)?;
let queue = queue_resource.1;
let data = match size {
Some(size) => &buf[data_offset..(data_offset + size)],
None => &buf[data_offset..],
};
let maybe_err = gfx_select!(queue => instance.queue_write_buffer(
queue,
buffer,
buffer_offset,
data
))
.err();
Ok(WebGpuResult::maybe_err(maybe_err))
}
#[op2]
#[serde]
pub fn op_webgpu_write_texture(
state: &mut OpState,
#[smi] queue_rid: ResourceId,
#[serde] destination: super::command_encoder::GpuImageCopyTexture,
#[serde] data_layout: GpuImageDataLayout,
#[serde] size: wgpu_types::Extent3d,
#[buffer] buf: &[u8],
) -> Result<WebGpuResult, ResourceError> {
let instance = state.borrow::<Instance>();
let texture_resource = state
.resource_table
.get::<super::texture::WebGpuTexture>(destination.texture)?;
let queue_resource = state.resource_table.get::<WebGpuQueue>(queue_rid)?;
let queue = queue_resource.1;
let destination = wgpu_core::command::ImageCopyTexture {
texture: texture_resource.id,
mip_level: destination.mip_level,
origin: destination.origin,
aspect: destination.aspect,
};
let data_layout = data_layout.into();
gfx_ok!(queue => instance.queue_write_texture(
queue,
&destination,
buf,
&data_layout,
&size
))
}

423
ext/webgpu/render_bundle.rs Normal file
View file

@ -0,0 +1,423 @@
// Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::RefCell;
use std::num::NonZeroU64;
use deno_core::cppgc::Ptr;
use deno_core::op2;
use deno_core::v8;
use deno_core::webidl::IntOptions;
use deno_core::webidl::Nullable;
use deno_core::webidl::WebIdlConverter;
use deno_core::webidl::WebIdlError;
use deno_core::webidl::WebIdlInterfaceConverter;
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_error::JsErrorBox;
use crate::buffer::GPUBuffer;
use crate::texture::GPUTextureFormat;
use crate::Instance;
pub struct GPURenderBundleEncoder {
pub instance: Instance,
pub error_handler: super::error::ErrorHandler,
pub encoder: RefCell<Option<wgpu_core::command::RenderBundleEncoder>>,
pub label: String,
}
impl GarbageCollected for GPURenderBundleEncoder {}
#[op2]
impl GPURenderBundleEncoder {
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
#[cppgc]
fn finish(
&self,
#[webidl] descriptor: GPURenderBundleDescriptor,
) -> GPURenderBundle {
let wgpu_descriptor = wgpu_core::command::RenderBundleDescriptor {
label: crate::transform_label(descriptor.label.clone()),
};
let (id, err) = self.instance.render_bundle_encoder_finish(
self.encoder.borrow_mut().take().unwrap(),
&wgpu_descriptor,
None,
);
self.error_handler.push_error(err);
GPURenderBundle {
instance: self.instance.clone(),
id,
label: descriptor.label.clone(),
}
}
fn push_debug_group(
&self,
#[webidl] group_label: String,
) -> Result<(), JsErrorBox> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
let label = std::ffi::CString::new(group_label).unwrap();
// SAFETY: the string the raw pointer points to lives longer than the below
// function invocation.
unsafe {
wgpu_core::command::bundle_ffi::wgpu_render_bundle_push_debug_group(
encoder,
label.as_ptr(),
);
}
Ok(())
}
#[fast]
fn pop_debug_group(&self) -> Result<(), JsErrorBox> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
wgpu_core::command::bundle_ffi::wgpu_render_bundle_pop_debug_group(encoder);
Ok(())
}
fn insert_debug_marker(
&self,
#[webidl] marker_label: String,
) -> Result<(), JsErrorBox> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
let label = std::ffi::CString::new(marker_label).unwrap();
// SAFETY: the string the raw pointer points to lives longer than the below
// function invocation.
unsafe {
wgpu_core::command::bundle_ffi::wgpu_render_bundle_insert_debug_marker(
encoder,
label.as_ptr(),
);
}
Ok(())
}
fn set_bind_group<'a>(
&self,
scope: &mut v8::HandleScope<'a>,
#[webidl(options(enforce_range = true))] index: u32,
#[webidl] bind_group: Nullable<Ptr<crate::bind_group::GPUBindGroup>>,
dynamic_offsets: v8::Local<'a, v8::Value>,
dynamic_offsets_data_start: v8::Local<'a, v8::Value>,
dynamic_offsets_data_length: v8::Local<'a, v8::Value>,
) -> Result<(), SetBindGroupError> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
const PREFIX: &str =
"Failed to execute 'setBindGroup' on 'GPUComputePassEncoder'";
if let Ok(uint_32) = dynamic_offsets.try_cast::<v8::Uint32Array>() {
let start = u64::convert(
scope,
dynamic_offsets_data_start,
Cow::Borrowed(PREFIX),
(|| Cow::Borrowed("Argument 4")).into(),
&IntOptions {
clamp: false,
enforce_range: true,
},
)? as usize;
let len = u32::convert(
scope,
dynamic_offsets_data_length,
Cow::Borrowed(PREFIX),
(|| Cow::Borrowed("Argument 5")).into(),
&IntOptions {
clamp: false,
enforce_range: true,
},
)? as usize;
let ab = uint_32.buffer(scope).unwrap();
let ptr = ab.data().unwrap();
let ab_len = ab.byte_length() / 4;
// SAFETY: created from an array buffer, slice is dropped at end of function call
let data =
unsafe { std::slice::from_raw_parts(ptr.as_ptr() as _, ab_len) };
let offsets = &data[start..(start + len)];
// SAFETY: wgpu FFI call
unsafe {
wgpu_core::command::bundle_ffi::wgpu_render_bundle_set_bind_group(
encoder,
index,
bind_group.into_option().map(|bind_group| bind_group.id),
offsets.as_ptr(),
offsets.len(),
);
}
} else {
let offsets = <Option<Vec<u32>>>::convert(
scope,
dynamic_offsets,
Cow::Borrowed(PREFIX),
(|| Cow::Borrowed("Argument 3")).into(),
&IntOptions {
clamp: false,
enforce_range: true,
},
)?
.unwrap_or_default();
// SAFETY: wgpu FFI call
unsafe {
wgpu_core::command::bundle_ffi::wgpu_render_bundle_set_bind_group(
encoder,
index,
bind_group.into_option().map(|bind_group| bind_group.id),
offsets.as_ptr(),
offsets.len(),
);
}
}
Ok(())
}
fn set_pipeline(
&self,
#[webidl] pipeline: Ptr<crate::render_pipeline::GPURenderPipeline>,
) -> Result<(), JsErrorBox> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
wgpu_core::command::bundle_ffi::wgpu_render_bundle_set_pipeline(
encoder,
pipeline.id,
);
Ok(())
}
#[required(2)]
fn set_index_buffer(
&self,
#[webidl] buffer: Ptr<GPUBuffer>,
#[webidl] index_format: crate::render_pipeline::GPUIndexFormat,
#[webidl(default = 0, options(enforce_range = true))] offset: u64,
#[webidl(options(enforce_range = true))] size: Option<u64>,
) -> Result<(), JsErrorBox> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
encoder.set_index_buffer(
buffer.id,
index_format.into(),
offset,
size.and_then(NonZeroU64::new),
);
Ok(())
}
#[required(2)]
fn set_vertex_buffer(
&self,
#[webidl(options(enforce_range = true))] slot: u32,
#[webidl] buffer: Ptr<GPUBuffer>, // TODO(wgpu): support nullable buffer
#[webidl(default = 0, options(enforce_range = true))] offset: u64,
#[webidl(options(enforce_range = true))] size: Option<u64>,
) -> Result<(), JsErrorBox> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
wgpu_core::command::bundle_ffi::wgpu_render_bundle_set_vertex_buffer(
encoder,
slot,
buffer.id,
offset,
size.and_then(NonZeroU64::new),
);
Ok(())
}
#[required(1)]
fn draw(
&self,
#[webidl(options(enforce_range = true))] vertex_count: u32,
#[webidl(default = 1, options(enforce_range = true))] instance_count: u32,
#[webidl(default = 0, options(enforce_range = true))] first_vertex: u32,
#[webidl(default = 0, options(enforce_range = true))] first_instance: u32,
) -> Result<(), JsErrorBox> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
wgpu_core::command::bundle_ffi::wgpu_render_bundle_draw(
encoder,
vertex_count,
instance_count,
first_vertex,
first_instance,
);
Ok(())
}
#[required(1)]
fn draw_indexed(
&self,
#[webidl(options(enforce_range = true))] index_count: u32,
#[webidl(default = 1, options(enforce_range = true))] instance_count: u32,
#[webidl(default = 0, options(enforce_range = true))] first_index: u32,
#[webidl(default = 0, options(enforce_range = true))] base_vertex: i32,
#[webidl(default = 0, options(enforce_range = true))] first_instance: u32,
) -> Result<(), JsErrorBox> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
wgpu_core::command::bundle_ffi::wgpu_render_bundle_draw_indexed(
encoder,
index_count,
instance_count,
first_index,
base_vertex,
first_instance,
);
Ok(())
}
#[required(2)]
fn draw_indirect(
&self,
#[webidl] indirect_buffer: Ptr<GPUBuffer>,
#[webidl(options(enforce_range = true))] indirect_offset: u64,
) -> Result<(), JsErrorBox> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
wgpu_core::command::bundle_ffi::wgpu_render_bundle_draw_indirect(
encoder,
indirect_buffer.id,
indirect_offset,
);
Ok(())
}
#[required(2)]
fn draw_indexed_indirect(
&self,
#[webidl] indirect_buffer: Ptr<GPUBuffer>,
#[webidl(options(enforce_range = true))] indirect_offset: u64,
) -> Result<(), JsErrorBox> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
wgpu_core::command::bundle_ffi::wgpu_render_bundle_draw_indexed_indirect(
encoder,
indirect_buffer.id,
indirect_offset,
);
Ok(())
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPURenderBundleEncoderDescriptor {
#[webidl(default = String::new())]
pub label: String,
pub color_formats: Vec<Nullable<GPUTextureFormat>>,
pub depth_stencil_format: Option<GPUTextureFormat>,
#[webidl(default = 1)]
#[options(enforce_range = true)]
pub sample_count: u32,
#[webidl(default = false)]
pub depth_read_only: bool,
#[webidl(default = false)]
pub stencil_read_only: bool,
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
enum SetBindGroupError {
#[class(inherit)]
#[error(transparent)]
WebIDL(#[from] WebIdlError),
#[class(inherit)]
#[error(transparent)]
Other(#[from] JsErrorBox),
}
pub struct GPURenderBundle {
pub instance: Instance,
pub id: wgpu_core::id::RenderBundleId,
pub label: String,
}
impl Drop for GPURenderBundle {
fn drop(&mut self) {
self.instance.render_bundle_drop(self.id);
}
}
impl WebIdlInterfaceConverter for GPURenderBundle {
const NAME: &'static str = "GPURenderBundle";
}
impl GarbageCollected for GPURenderBundle {}
#[op2]
impl GPURenderBundle {
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPURenderBundleDescriptor {
#[webidl(default = String::new())]
pub label: String,
}

Some files were not shown because too many files have changed in this diff Show more