mirror of
https://github.com/denoland/deno.git
synced 2025-03-03 09:31:22 -05:00
feat(bench): update API, new console reporter (#14305)
This commit changes "deno bench" subcommand, by updating the "Deno.bench" API as follows: - remove "Deno.BenchDefinition.n" - remove "Deno.BenchDefintion.warmup" - add "Deno.BenchDefinition.group" - add "Deno.BenchDefintion.baseline" This is done because bench cases are no longer run fixed amount of iterations, but instead they are run until there is difference between subsequent runs that is statistically insiginificant. Additionally, console reporter was rewritten completely, to looks similar to "hyperfine" reporter.
This commit is contained in:
parent
2612b6f20f
commit
f785ecee1a
30 changed files with 696 additions and 591 deletions
41
Cargo.lock
generated
41
Cargo.lock
generated
|
@ -119,15 +119,6 @@ version = "1.0.56"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4361135be9122e0870de935d7c439aef945b9f9ddd4199a553b5270b49c82a27"
|
||||
|
||||
[[package]]
|
||||
name = "arrayvec"
|
||||
version = "0.4.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9"
|
||||
dependencies = [
|
||||
"nodrop",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "arrayvec"
|
||||
version = "0.7.2"
|
||||
|
@ -442,6 +433,7 @@ dependencies = [
|
|||
"atty",
|
||||
"bitflags",
|
||||
"indexmap",
|
||||
"lazy_static",
|
||||
"os_str_bytes",
|
||||
"strsim 0.10.0",
|
||||
"termcolor",
|
||||
|
@ -769,10 +761,10 @@ dependencies = [
|
|||
"jsonc-parser",
|
||||
"libc",
|
||||
"log",
|
||||
"mitata",
|
||||
"nix",
|
||||
"node_resolver",
|
||||
"notify",
|
||||
"num-format",
|
||||
"once_cell",
|
||||
"os_pipe",
|
||||
"percent-encoding",
|
||||
|
@ -2451,6 +2443,15 @@ dependencies = [
|
|||
"winapi 0.3.9",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mitata"
|
||||
version = "0.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "975d43e4088e68e8b18c81e866185c71d1682d3cf923ed6e98be0c6173d80f77"
|
||||
dependencies = [
|
||||
"clap",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "naga"
|
||||
version = "0.8.3"
|
||||
|
@ -2520,12 +2521,6 @@ dependencies = [
|
|||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nodrop"
|
||||
version = "0.1.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb"
|
||||
|
||||
[[package]]
|
||||
name = "notify"
|
||||
version = "5.0.0-pre.14"
|
||||
|
@ -2583,16 +2578,6 @@ dependencies = [
|
|||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-format"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bafe4179722c2894288ee77a9f044f02811c86af699344c498b0840c698a2465"
|
||||
dependencies = [
|
||||
"arrayvec 0.4.12",
|
||||
"itoa 0.4.8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-integer"
|
||||
version = "0.1.44"
|
||||
|
@ -5086,7 +5071,7 @@ version = "0.12.2"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c4688c000eb841ca55f7b35db659b78d6e1cd77d7caf8fb929f4e181f754047d"
|
||||
dependencies = [
|
||||
"arrayvec 0.7.2",
|
||||
"arrayvec",
|
||||
"bitflags",
|
||||
"cfg_aliases",
|
||||
"codespan-reporting",
|
||||
|
@ -5110,7 +5095,7 @@ version = "0.12.3"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62fa524903bf336b51a399f0d586f3c30af94101149588678ef147c30be89e53"
|
||||
dependencies = [
|
||||
"arrayvec 0.7.2",
|
||||
"arrayvec",
|
||||
"ash",
|
||||
"bit-set",
|
||||
"bitflags",
|
||||
|
|
|
@ -74,9 +74,9 @@ import_map = "=0.9.0"
|
|||
jsonc-parser = { version = "=0.19.0", features = ["serde"] }
|
||||
libc = "=0.2.121"
|
||||
log = { version = "=0.4.14", features = ["serde"] }
|
||||
mitata = '=0.0.6'
|
||||
node_resolver = "=0.1.1"
|
||||
notify = "=5.0.0-pre.14"
|
||||
num-format = "=0.4.0"
|
||||
once_cell = "=1.10.0"
|
||||
os_pipe = "=1.0.1"
|
||||
percent-encoding = "=2.1.0"
|
||||
|
|
14
cli/dts/lib.deno.unstable.d.ts
vendored
14
cli/dts/lib.deno.unstable.d.ts
vendored
|
@ -8,14 +8,12 @@ declare namespace Deno {
|
|||
fn: () => void | Promise<void>;
|
||||
name: string;
|
||||
ignore?: boolean;
|
||||
/** Specify number of iterations benchmark should perform. Defaults to 1000. */
|
||||
n?: number;
|
||||
/** Specify number of warmup iterations benchmark should perform. Defaults
|
||||
* to 1000.
|
||||
*
|
||||
* These iterations are not measured. It allows the code to be optimized
|
||||
* by JIT compiler before measuring its performance. */
|
||||
warmup?: number;
|
||||
/** Group name for the benchmark.
|
||||
* Grouped benchmarks produce a time summary */
|
||||
group?: string;
|
||||
/** Benchmark should be used as the baseline for other benchmarks
|
||||
* If there are multiple baselines in a group, the first one is used as the baseline */
|
||||
baseline?: boolean;
|
||||
/** If at least one bench has `only` set to true, only run benches that have
|
||||
* `only` set to true and fail the bench suite. */
|
||||
only?: boolean;
|
||||
|
|
|
@ -111,6 +111,12 @@ itest!(finally_timeout {
|
|||
output: "bench/finally_timeout.out",
|
||||
});
|
||||
|
||||
itest!(group_baseline {
|
||||
args: "bench --unstable bench/group_baseline.ts",
|
||||
exit_code: 0,
|
||||
output: "bench/group_baseline.out",
|
||||
});
|
||||
|
||||
itest!(unresolved_promise {
|
||||
args: "bench --unstable bench/unresolved_promise.ts",
|
||||
exit_code: 1,
|
||||
|
|
37
cli/tests/testdata/bench/allow_all.out
vendored
37
cli/tests/testdata/bench/allow_all.out
vendored
|
@ -1,18 +1,21 @@
|
|||
[WILDCARD]
|
||||
running 14 benches from [WILDCARD]
|
||||
bench read false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
|
||||
bench read true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
|
||||
bench write false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
|
||||
bench write true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
|
||||
bench net false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
|
||||
bench net true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
|
||||
bench env false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
|
||||
bench env true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
|
||||
bench run false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
|
||||
bench run true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
|
||||
bench ffi false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
|
||||
bench ffi true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
|
||||
bench hrtime false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
|
||||
bench hrtime true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
|
||||
Check [WILDCARD]/bench/allow_all.ts
|
||||
cpu: [WILDCARD]
|
||||
runtime: deno [WILDCARD] ([WILDCARD])
|
||||
|
||||
bench result: ok. 14 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out [WILDCARD]
|
||||
[WILDCARD]/bench/allow_all.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
---------------------------------------------------- -----------------------------
|
||||
read false [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
read true [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
write false [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
write true [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
net false [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
net true [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
env false [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
env true [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
run false [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
run true [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
ffi false [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
ffi true [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
hrtime false [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
hrtime true [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
|
|
59
cli/tests/testdata/bench/allow_none.out
vendored
59
cli/tests/testdata/bench/allow_none.out
vendored
|
@ -1,51 +1,22 @@
|
|||
Check [WILDCARD]/bench/allow_none.ts
|
||||
cpu: [WILDCARD]
|
||||
runtime: deno [WILDCARD] ([WILDCARD])
|
||||
|
||||
[WILDCARD]/bench/allow_none.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
------------------------------------------------- -----------------------------
|
||||
read error: PermissionDenied: Can't escalate parent thread permissions
|
||||
[WILDCARD]
|
||||
running 7 benches from [WILDCARD]
|
||||
bench read ... 1000 iterations FAILED [WILDCARD]
|
||||
bench write ... 1000 iterations FAILED [WILDCARD]
|
||||
bench net ... 1000 iterations FAILED [WILDCARD]
|
||||
bench env ... 1000 iterations FAILED [WILDCARD]
|
||||
bench run ... 1000 iterations FAILED [WILDCARD]
|
||||
bench ffi ... 1000 iterations FAILED [WILDCARD]
|
||||
bench hrtime ... 1000 iterations FAILED [WILDCARD]
|
||||
|
||||
failures:
|
||||
|
||||
read
|
||||
PermissionDenied: Can't escalate parent thread permissions
|
||||
write error: PermissionDenied: Can't escalate parent thread permissions
|
||||
[WILDCARD]
|
||||
|
||||
write
|
||||
PermissionDenied: Can't escalate parent thread permissions
|
||||
net error: PermissionDenied: Can't escalate parent thread permissions
|
||||
[WILDCARD]
|
||||
|
||||
net
|
||||
PermissionDenied: Can't escalate parent thread permissions
|
||||
env error: PermissionDenied: Can't escalate parent thread permissions
|
||||
[WILDCARD]
|
||||
|
||||
env
|
||||
PermissionDenied: Can't escalate parent thread permissions
|
||||
run error: PermissionDenied: Can't escalate parent thread permissions
|
||||
[WILDCARD]
|
||||
|
||||
run
|
||||
PermissionDenied: Can't escalate parent thread permissions
|
||||
ffi error: PermissionDenied: Can't escalate parent thread permissions
|
||||
[WILDCARD]
|
||||
|
||||
ffi
|
||||
PermissionDenied: Can't escalate parent thread permissions
|
||||
hrtime error: PermissionDenied: Can't escalate parent thread permissions
|
||||
[WILDCARD]
|
||||
|
||||
hrtime
|
||||
PermissionDenied: Can't escalate parent thread permissions
|
||||
[WILDCARD]
|
||||
|
||||
failures:
|
||||
|
||||
read
|
||||
write
|
||||
net
|
||||
env
|
||||
run
|
||||
ffi
|
||||
hrtime
|
||||
|
||||
bench result: FAILED. 0 passed; 7 failed; 0 ignored; 0 measured; 0 filtered out [WILDCARD]
|
||||
error: Bench failed
|
||||
|
|
14
cli/tests/testdata/bench/clear_timeout.out
vendored
14
cli/tests/testdata/bench/clear_timeout.out
vendored
|
@ -1,8 +1,10 @@
|
|||
Check [WILDCARD]/bench/clear_timeout.ts
|
||||
running 3 benches from [WILDCARD]/bench/clear_timeout.ts
|
||||
bench bench1 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
bench bench2 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
bench bench3 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
|
||||
bench result: ok. 3 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
|
||||
cpu: [WILDCARD]
|
||||
runtime: deno [WILDCARD] ([WILDCARD])
|
||||
|
||||
[WILDCARD]/bench/clear_timeout.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
------------------------------------------------- -----------------------------
|
||||
bench1 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
bench2 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
bench3 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
|
|
8
cli/tests/testdata/bench/collect.out
vendored
8
cli/tests/testdata/bench/collect.out
vendored
|
@ -1,5 +1,7 @@
|
|||
Check [WILDCARD]/bench/collect/bench.ts
|
||||
running 0 benches from [WILDCARD]/bench/collect/bench.ts
|
||||
|
||||
bench result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
|
||||
cpu: [WILDCARD]
|
||||
runtime: deno [WILDCARD] ([WILDCARD])
|
||||
|
||||
[WILDCARD]/bench/collect/bench.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
------------------------------------------------- -----------------------------
|
||||
|
|
43
cli/tests/testdata/bench/exit_sanitizer.out
vendored
43
cli/tests/testdata/bench/exit_sanitizer.out
vendored
|
@ -1,35 +1,14 @@
|
|||
Check [WILDCARD]/bench/exit_sanitizer.ts
|
||||
running 3 benches from [WILDCARD]/bench/exit_sanitizer.ts
|
||||
bench exit(0) ... 1000 iterations FAILED ([WILDCARD])
|
||||
bench exit(1) ... 1000 iterations FAILED ([WILDCARD])
|
||||
bench exit(2) ... 1000 iterations FAILED ([WILDCARD])
|
||||
|
||||
failures:
|
||||
|
||||
exit(0)
|
||||
AssertionError: Bench attempted to exit with exit code: 0
|
||||
at [WILDCARD]
|
||||
at [WILDCARD]/bench/exit_sanitizer.ts:2:8
|
||||
at [WILDCARD]
|
||||
|
||||
exit(1)
|
||||
AssertionError: Bench attempted to exit with exit code: 1
|
||||
at [WILDCARD]
|
||||
at [WILDCARD]/bench/exit_sanitizer.ts:6:8
|
||||
at [WILDCARD]
|
||||
|
||||
exit(2)
|
||||
AssertionError: Bench attempted to exit with exit code: 2
|
||||
at [WILDCARD]
|
||||
at [WILDCARD]/bench/exit_sanitizer.ts:10:8
|
||||
at [WILDCARD]
|
||||
|
||||
failures:
|
||||
|
||||
exit(0)
|
||||
exit(1)
|
||||
exit(2)
|
||||
|
||||
bench result: FAILED. 0 passed; 3 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
|
||||
cpu: [WILDCARD]
|
||||
runtime: deno [WILDCARD] ([WILDCARD])
|
||||
|
||||
[WILDCARD]/bench/exit_sanitizer.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
------------------------------------------------- -----------------------------
|
||||
exit(0) error: AssertionError: Bench attempted to exit with exit code: 0
|
||||
[WILDCARD]
|
||||
exit(1) error: AssertionError: Bench attempted to exit with exit code: 1
|
||||
[WILDCARD]
|
||||
exit(2) error: AssertionError: Bench attempted to exit with exit code: 2
|
||||
[WILDCARD]
|
||||
error: Bench failed
|
||||
|
|
103
cli/tests/testdata/bench/fail.out
vendored
103
cli/tests/testdata/bench/fail.out
vendored
|
@ -1,81 +1,28 @@
|
|||
Check [WILDCARD]/bench/fail.ts
|
||||
running 10 benches from [WILDCARD]/bench/fail.ts
|
||||
bench bench0 ... 1000 iterations FAILED ([WILDCARD])
|
||||
bench bench1 ... 1000 iterations FAILED ([WILDCARD])
|
||||
bench bench2 ... 1000 iterations FAILED ([WILDCARD])
|
||||
bench bench3 ... 1000 iterations FAILED ([WILDCARD])
|
||||
bench bench4 ... 1000 iterations FAILED ([WILDCARD])
|
||||
bench bench5 ... 1000 iterations FAILED ([WILDCARD])
|
||||
bench bench6 ... 1000 iterations FAILED ([WILDCARD])
|
||||
bench bench7 ... 1000 iterations FAILED ([WILDCARD])
|
||||
bench bench8 ... 1000 iterations FAILED ([WILDCARD])
|
||||
bench bench9 ... 1000 iterations FAILED ([WILDCARD])
|
||||
|
||||
failures:
|
||||
|
||||
bench0
|
||||
Error
|
||||
at [WILDCARD]/bench/fail.ts:2:9
|
||||
at [WILDCARD]
|
||||
|
||||
bench1
|
||||
Error
|
||||
at [WILDCARD]/bench/fail.ts:5:9
|
||||
at [WILDCARD]
|
||||
|
||||
bench2
|
||||
Error
|
||||
at [WILDCARD]/bench/fail.ts:8:9
|
||||
at [WILDCARD]
|
||||
|
||||
bench3
|
||||
Error
|
||||
at [WILDCARD]/bench/fail.ts:11:9
|
||||
at [WILDCARD]
|
||||
|
||||
bench4
|
||||
Error
|
||||
at [WILDCARD]/bench/fail.ts:14:9
|
||||
at [WILDCARD]
|
||||
|
||||
bench5
|
||||
Error
|
||||
at [WILDCARD]/bench/fail.ts:17:9
|
||||
at [WILDCARD]
|
||||
|
||||
bench6
|
||||
Error
|
||||
at [WILDCARD]/bench/fail.ts:20:9
|
||||
at [WILDCARD]
|
||||
|
||||
bench7
|
||||
Error
|
||||
at [WILDCARD]/bench/fail.ts:23:9
|
||||
at [WILDCARD]
|
||||
|
||||
bench8
|
||||
Error
|
||||
at [WILDCARD]/bench/fail.ts:26:9
|
||||
at [WILDCARD]
|
||||
|
||||
bench9
|
||||
Error
|
||||
at [WILDCARD]/bench/fail.ts:29:9
|
||||
at [WILDCARD]
|
||||
|
||||
failures:
|
||||
|
||||
bench0
|
||||
bench1
|
||||
bench2
|
||||
bench3
|
||||
bench4
|
||||
bench5
|
||||
bench6
|
||||
bench7
|
||||
bench8
|
||||
bench9
|
||||
|
||||
bench result: FAILED. 0 passed; 10 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
|
||||
cpu: [WILDCARD]
|
||||
runtime: deno [WILDCARD] ([WILDCARD])
|
||||
|
||||
[WILDCARD]/bench/fail.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
------------------------------------------------- -----------------------------
|
||||
bench0 error: Error
|
||||
[WILDCARD]
|
||||
bench1 error: Error
|
||||
[WILDCARD]
|
||||
bench2 error: Error
|
||||
[WILDCARD]
|
||||
bench3 error: Error
|
||||
[WILDCARD]
|
||||
bench4 error: Error
|
||||
[WILDCARD]
|
||||
bench5 error: Error
|
||||
[WILDCARD]
|
||||
bench6 error: Error
|
||||
[WILDCARD]
|
||||
bench7 error: Error
|
||||
[WILDCARD]
|
||||
bench8 error: Error
|
||||
[WILDCARD]
|
||||
bench9 error: Error
|
||||
[WILDCARD]
|
||||
error: Bench failed
|
||||
|
|
22
cli/tests/testdata/bench/filter.out
vendored
22
cli/tests/testdata/bench/filter.out
vendored
|
@ -1,12 +1,20 @@
|
|||
Check [WILDCARD]/bench/filter/a_bench.ts
|
||||
Check [WILDCARD]/bench/filter/b_bench.ts
|
||||
Check [WILDCARD]/bench/filter/c_bench.ts
|
||||
running 1 bench from [WILDCARD]/bench/filter/a_bench.ts
|
||||
bench foo ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
running 1 bench from [WILDCARD]/bench/filter/b_bench.ts
|
||||
bench foo ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
running 1 bench from [WILDCARD]/bench/filter/c_bench.ts
|
||||
bench foo ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
cpu: [WILDCARD]
|
||||
runtime: deno [WILDCARD] ([WILDCARD])
|
||||
|
||||
bench result: ok. 3 passed; 0 failed; 0 ignored; 0 measured; 6 filtered out ([WILDCARD])
|
||||
[WILDCARD]/bench/filter/a_bench.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
------------------------------------------------- -----------------------------
|
||||
foo [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
|
||||
[WILDCARD]/bench/filter/b_bench.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
------------------------------------------------- -----------------------------
|
||||
foo [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
|
||||
[WILDCARD]/bench/filter/c_bench.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
------------------------------------------------- -----------------------------
|
||||
foo [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
|
|
24
cli/tests/testdata/bench/finally_timeout.out
vendored
24
cli/tests/testdata/bench/finally_timeout.out
vendored
|
@ -1,19 +1,11 @@
|
|||
Check [WILDCARD]/bench/finally_timeout.ts
|
||||
running 2 benches from [WILDCARD]/bench/finally_timeout.ts
|
||||
bench error ... 1000 iterations FAILED ([WILDCARD])
|
||||
bench success ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
|
||||
failures:
|
||||
|
||||
error
|
||||
Error: fail
|
||||
at [WILDCARD]/bench/finally_timeout.ts:4:11
|
||||
at [WILDCARD]
|
||||
|
||||
failures:
|
||||
|
||||
error
|
||||
|
||||
bench result: FAILED. 1 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
|
||||
cpu: [WILDCARD]
|
||||
runtime: deno [WILDCARD] ([WILDCARD])
|
||||
|
||||
[WILDCARD]/bench/finally_timeout.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
------------------------------------------------- -----------------------------
|
||||
error error: Error: fail
|
||||
[WILDCARD]
|
||||
success [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
error: Bench failed
|
||||
|
|
18
cli/tests/testdata/bench/group_baseline.out
vendored
Normal file
18
cli/tests/testdata/bench/group_baseline.out
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
[WILDCARD]/bench/group_baseline.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
---------------------------------------------------- -----------------------------
|
||||
noop [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
noop2 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
|
||||
summary
|
||||
noo[WILDCARD]
|
||||
[WILDCARD]x times [WILDCARD] than noo[WILDCARD]
|
||||
|
||||
noop3 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
parse url 2x [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
parse url 6x [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
|
||||
summary
|
||||
parse url 2x
|
||||
[WILDCARD]x times slower than noop3
|
||||
[WILDCARD]x times faster than parse url 6x
|
18
cli/tests/testdata/bench/group_baseline.ts
vendored
Normal file
18
cli/tests/testdata/bench/group_baseline.ts
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
Deno.bench("noop", () => {});
|
||||
Deno.bench("noop2", { baseline: true }, () => {});
|
||||
|
||||
Deno.bench("noop3", { group: "url" }, () => {});
|
||||
|
||||
Deno.bench("parse url 2x", { group: "url", baseline: true }, () => {
|
||||
new URL("https://deno.land/std/http/server.ts");
|
||||
new URL("https://deno.land/std/http/server.ts");
|
||||
});
|
||||
|
||||
Deno.bench("parse url 6x", { group: "url" }, () => {
|
||||
new URL("https://deno.land/std/http/server.ts");
|
||||
new URL("https://deno.land/std/http/server.ts");
|
||||
new URL("https://deno.land/std/http/server.ts");
|
||||
new URL("https://deno.land/std/http/server.ts");
|
||||
new URL("https://deno.land/std/http/server.ts");
|
||||
new URL("https://deno.land/std/http/server.ts");
|
||||
});
|
18
cli/tests/testdata/bench/ignore.out
vendored
18
cli/tests/testdata/bench/ignore.out
vendored
|
@ -1,15 +1,7 @@
|
|||
Check [WILDCARD]/bench/ignore.ts
|
||||
running 10 benches from [WILDCARD]/bench/ignore.ts
|
||||
bench bench0 ... 1000 iterations ignored ([WILDCARD])
|
||||
bench bench1 ... 1000 iterations ignored ([WILDCARD])
|
||||
bench bench2 ... 1000 iterations ignored ([WILDCARD])
|
||||
bench bench3 ... 1000 iterations ignored ([WILDCARD])
|
||||
bench bench4 ... 1000 iterations ignored ([WILDCARD])
|
||||
bench bench5 ... 1000 iterations ignored ([WILDCARD])
|
||||
bench bench6 ... 1000 iterations ignored ([WILDCARD])
|
||||
bench bench7 ... 1000 iterations ignored ([WILDCARD])
|
||||
bench bench8 ... 1000 iterations ignored ([WILDCARD])
|
||||
bench bench9 ... 1000 iterations ignored ([WILDCARD])
|
||||
|
||||
bench result: ok. 0 passed; 0 failed; 10 ignored; 0 measured; 0 filtered out ([WILDCARD])
|
||||
cpu: [WILDCARD]
|
||||
runtime: deno [WILDCARD] ([WILDCARD])
|
||||
|
||||
[WILDCARD]/bench/ignore.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
------------------------------------------------- -----------------------------
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
Check [WILDCARD]/bench/ignore_permissions.ts
|
||||
running 1 bench from [WILDCARD]/bench/ignore_permissions.ts
|
||||
bench ignore ... 1000 iterations ignored ([WILDCARD])
|
||||
|
||||
bench result: ok. 0 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out ([WILDCARD])
|
||||
cpu: [WILDCARD]
|
||||
runtime: deno [WILDCARD] ([WILDCARD])
|
||||
|
||||
[WILDCARD]/bench/ignore_permissions.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
------------------------------------------------- -----------------------------
|
||||
|
|
8
cli/tests/testdata/bench/interval.out
vendored
8
cli/tests/testdata/bench/interval.out
vendored
|
@ -1,5 +1,7 @@
|
|||
Check [WILDCARD]/bench/interval.ts
|
||||
running 0 benches from [WILDCARD]/bench/interval.ts
|
||||
|
||||
bench result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
|
||||
cpu: [WILDCARD]
|
||||
runtime: deno [WILDCARD] ([WILDCARD])
|
||||
|
||||
[WILDCARD]/bench/interval.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
------------------------------------------------- -----------------------------
|
||||
|
|
10
cli/tests/testdata/bench/load_unload.out
vendored
10
cli/tests/testdata/bench/load_unload.out
vendored
|
@ -1,6 +1,8 @@
|
|||
Check [WILDCARD]/bench/load_unload.ts
|
||||
running 1 bench from [WILDCARD]/bench/load_unload.ts
|
||||
bench bench ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
|
||||
bench result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
|
||||
cpu: [WILDCARD]
|
||||
runtime: deno [WILDCARD] ([WILDCARD])
|
||||
|
||||
[WILDCARD]/bench/load_unload.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
------------------------------------------------- -----------------------------
|
||||
bench [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
|
|
8
cli/tests/testdata/bench/meta.out
vendored
8
cli/tests/testdata/bench/meta.out
vendored
|
@ -1,7 +1,9 @@
|
|||
Check [WILDCARD]/bench/meta.ts
|
||||
import.meta.main: false
|
||||
import.meta.url: [WILDCARD]/bench/meta.ts
|
||||
running 0 benches from [WILDCARD]/bench/meta.ts
|
||||
|
||||
bench result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
|
||||
cpu: [WILDCARD]
|
||||
runtime: deno [WILDCARD] ([WILDCARD])
|
||||
|
||||
[WILDCARD]/bench/meta.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
------------------------------------------------- -----------------------------
|
||||
|
|
3
cli/tests/testdata/bench/no_check.out
vendored
3
cli/tests/testdata/bench/no_check.out
vendored
|
@ -1,6 +1,3 @@
|
|||
|
||||
bench result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
|
||||
|
||||
error: Uncaught TypeError: Cannot read properties of undefined (reading 'fn')
|
||||
Deno.bench();
|
||||
^
|
||||
|
|
|
@ -1,16 +1,9 @@
|
|||
running 1 bench from [WILDCARD]no_prompt_by_default.ts
|
||||
bench no prompt ... 1000 iterations FAILED ([WILDCARD]ms)
|
||||
[WILDCARD]cpu: [WILDCARD]
|
||||
runtime: deno [WILDCARD] ([WILDCARD])
|
||||
|
||||
failures:
|
||||
|
||||
no prompt
|
||||
PermissionDenied: Requires read access to "./some_file.txt", run again with the --allow-read flag
|
||||
[WILDCARD]/bench/no_prompt_by_default.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
------------------------------------------------- -----------------------------
|
||||
no prompt error: PermissionDenied: Requires read access to "./some_file.txt", run again with the --allow-read flag
|
||||
[WILDCARD]
|
||||
|
||||
failures:
|
||||
|
||||
no prompt
|
||||
|
||||
bench result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]ms)
|
||||
|
||||
error: Bench failed
|
||||
|
|
|
@ -1,16 +1,9 @@
|
|||
running 1 bench from [WILDCARD]/no_prompt_with_denied_perms.ts
|
||||
bench no prompt ... 1000 iterations FAILED ([WILDCARD]ms)
|
||||
[WILDCARD]cpu: [WILDCARD]
|
||||
runtime: deno [WILDCARD] ([WILDCARD])
|
||||
|
||||
failures:
|
||||
|
||||
no prompt
|
||||
PermissionDenied: Requires read access to "./some_file.txt", run again with the --allow-read flag
|
||||
[WILDCARD]/bench/no_prompt_with_denied_perms.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
------------------------------------------------- -----------------------------
|
||||
no prompt error: PermissionDenied: Requires read access to "./some_file.txt", run again with the --allow-read flag
|
||||
[WILDCARD]
|
||||
|
||||
failures:
|
||||
|
||||
no prompt
|
||||
|
||||
bench result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]ms)
|
||||
|
||||
error: Bench failed
|
||||
|
|
10
cli/tests/testdata/bench/only.out
vendored
10
cli/tests/testdata/bench/only.out
vendored
|
@ -1,7 +1,9 @@
|
|||
Check [WILDCARD]/bench/only.ts
|
||||
running 1 bench from [WILDCARD]/bench/only.ts
|
||||
bench only ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
|
||||
bench result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out ([WILDCARD])
|
||||
cpu: [WILDCARD]
|
||||
runtime: deno [WILDCARD] ([WILDCARD])
|
||||
|
||||
[WILDCARD]/bench/only.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
------------------------------------------------- -----------------------------
|
||||
only [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
error: Bench failed because the "only" option was used
|
||||
|
|
19
cli/tests/testdata/bench/overloads.out
vendored
19
cli/tests/testdata/bench/overloads.out
vendored
|
@ -1,11 +1,12 @@
|
|||
Check [WILDCARD]/bench/overloads.ts
|
||||
running 6 benches from [WILDCARD]/bench/overloads.ts
|
||||
bench bench0 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
bench bench1 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
bench bench2 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
bench bench3 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
bench bench4 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
bench bench5 ... 1000 iterations ignored ([WILDCARD])
|
||||
|
||||
bench result: ok. 5 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out ([WILDCARD])
|
||||
cpu: [WILDCARD]
|
||||
runtime: deno [WILDCARD] ([WILDCARD])
|
||||
|
||||
[WILDCARD]/bench/overloads.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
------------------------------------------------- -----------------------------
|
||||
bench0 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
bench1 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
bench2 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
bench3 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
bench4 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
|
|
28
cli/tests/testdata/bench/pass.out
vendored
28
cli/tests/testdata/bench/pass.out
vendored
|
@ -1,15 +1,17 @@
|
|||
Check [WILDCARD]/bench/pass.ts
|
||||
running 10 benches from [WILDCARD]/bench/pass.ts
|
||||
bench bench0 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
bench bench1 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
bench bench2 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
bench bench3 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
bench bench4 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
bench bench5 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
bench bench6 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
bench bench7 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
bench bench8 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
bench bench9 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
|
||||
|
||||
bench result: ok. 10 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
|
||||
cpu: [WILDCARD]
|
||||
runtime: deno [WILDCARD] ([WILDCARD])
|
||||
|
||||
[WILDCARD]/bench/pass.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
------------------------------------------------- -----------------------------
|
||||
bench0 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
bench1 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
bench2 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
bench3 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
bench4 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
bench5 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
bench6 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
bench7 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
bench8 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
bench9 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
|
|
16
cli/tests/testdata/bench/quiet.out
vendored
16
cli/tests/testdata/bench/quiet.out
vendored
|
@ -1,8 +1,10 @@
|
|||
running 4 benches from [WILDCARD]/bench/quiet.ts
|
||||
bench console.log ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
|
||||
bench console.error ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
|
||||
bench console.info ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
|
||||
bench console.warn ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
|
||||
|
||||
bench result: ok. 4 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
|
||||
cpu: [WILDCARD]
|
||||
runtime: deno [WILDCARD] ([WILDCARD])
|
||||
|
||||
[WILDCARD]/bench/quiet.ts
|
||||
benchmark time (avg) (min … max) p75 p99 p995
|
||||
----------------------------------------------------- -----------------------------
|
||||
console.log [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
console.error [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
console.info [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
console.warn [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
|
|
|
@ -1,7 +1,4 @@
|
|||
Check [WILDCARD]/bench/unhandled_rejection.ts
|
||||
|
||||
bench result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
|
||||
|
||||
error: Uncaught (in promise) Error: rejection
|
||||
reject(new Error("rejection"));
|
||||
^
|
||||
|
|
|
@ -1,5 +1,2 @@
|
|||
Check [WILDCARD]/bench/unresolved_promise.ts
|
||||
|
||||
bench result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
|
||||
|
||||
error: Module evaluation is still pending but there are no pending ops or dynamic imports. This situation is often caused by unresolved promises.
|
||||
|
|
|
@ -5,7 +5,6 @@ use crate::cache::CacherLoader;
|
|||
use crate::colors;
|
||||
use crate::compat;
|
||||
use crate::create_main_worker;
|
||||
use crate::display;
|
||||
use crate::emit;
|
||||
use crate::file_watcher;
|
||||
use crate::file_watcher::ResolutionResult;
|
||||
|
@ -35,15 +34,11 @@ use deno_graph::ModuleKind;
|
|||
use deno_runtime::permissions::Permissions;
|
||||
use deno_runtime::tokio_util::run_basic;
|
||||
use log::Level;
|
||||
use num_format::Locale;
|
||||
use num_format::ToFormattedString;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::collections::HashSet;
|
||||
use std::io::Write;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use tokio::sync::mpsc::unbounded_channel;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
|
||||
|
@ -53,214 +48,302 @@ struct BenchSpecifierOptions {
|
|||
filter: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Eq, Hash)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct BenchDescription {
|
||||
pub origin: String,
|
||||
pub name: String,
|
||||
pub iterations: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum BenchOutput {
|
||||
Console(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum BenchResult {
|
||||
Ok,
|
||||
Ignored,
|
||||
Failed(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct BenchPlan {
|
||||
pub origin: String,
|
||||
pub total: usize,
|
||||
pub filtered_out: usize,
|
||||
pub origin: String,
|
||||
pub used_only: bool,
|
||||
pub names: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum BenchEvent {
|
||||
Plan(BenchPlan),
|
||||
Wait(BenchDescription),
|
||||
Output(BenchOutput),
|
||||
IterationTime(u64),
|
||||
Result(BenchDescription, BenchResult, u64),
|
||||
Wait(BenchMetadata),
|
||||
Result(String, BenchResult),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BenchMeasures {
|
||||
pub iterations: u64,
|
||||
pub current_start: Instant,
|
||||
pub measures: Vec<u128>,
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum BenchResult {
|
||||
Ok(BenchMeasurement),
|
||||
Failed(BenchFailure),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BenchSummary {
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct BenchReport {
|
||||
pub total: usize,
|
||||
pub passed: usize,
|
||||
pub failed: usize,
|
||||
pub ignored: usize,
|
||||
pub filtered_out: usize,
|
||||
pub measured: usize,
|
||||
pub measures: Vec<BenchMeasures>,
|
||||
pub current_bench: BenchMeasures,
|
||||
pub failures: Vec<(BenchDescription, String)>,
|
||||
pub failures: Vec<BenchFailure>,
|
||||
pub measurements: Vec<BenchMeasurement>,
|
||||
}
|
||||
|
||||
impl BenchSummary {
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Eq, Hash)]
|
||||
pub struct BenchMetadata {
|
||||
pub name: String,
|
||||
pub origin: String,
|
||||
pub baseline: bool,
|
||||
pub group: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct BenchMeasurement {
|
||||
pub name: String,
|
||||
pub baseline: bool,
|
||||
pub stats: BenchStats,
|
||||
pub group: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct BenchFailure {
|
||||
pub name: String,
|
||||
pub error: String,
|
||||
pub baseline: bool,
|
||||
pub group: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct BenchStats {
|
||||
pub n: u64,
|
||||
pub min: f64,
|
||||
pub max: f64,
|
||||
pub avg: f64,
|
||||
pub p75: f64,
|
||||
pub p99: f64,
|
||||
pub p995: f64,
|
||||
pub p999: f64,
|
||||
}
|
||||
|
||||
impl BenchReport {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
total: 0,
|
||||
passed: 0,
|
||||
failed: 0,
|
||||
ignored: 0,
|
||||
filtered_out: 0,
|
||||
measured: 0,
|
||||
measures: Vec::new(),
|
||||
current_bench: BenchMeasures {
|
||||
iterations: 0,
|
||||
current_start: Instant::now(),
|
||||
measures: vec![],
|
||||
},
|
||||
failures: Vec::new(),
|
||||
measurements: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn has_failed(&self) -> bool {
|
||||
self.failed > 0 || !self.failures.is_empty()
|
||||
}
|
||||
|
||||
fn has_pending(&self) -> bool {
|
||||
self.total - self.passed - self.failed - self.ignored > 0
|
||||
}
|
||||
fn create_reporter(show_output: bool) -> Box<dyn BenchReporter + Send> {
|
||||
Box::new(ConsoleReporter::new(show_output))
|
||||
}
|
||||
|
||||
pub trait BenchReporter {
|
||||
fn report_group_summary(&mut self);
|
||||
fn report_plan(&mut self, plan: &BenchPlan);
|
||||
fn report_wait(&mut self, description: &BenchDescription);
|
||||
fn report_end(&mut self, report: &BenchReport);
|
||||
fn report_wait(&mut self, wait: &BenchMetadata);
|
||||
fn report_output(&mut self, output: &BenchOutput);
|
||||
fn report_result(
|
||||
&mut self,
|
||||
description: &BenchDescription,
|
||||
result: &BenchResult,
|
||||
elapsed: u64,
|
||||
current_bench: &BenchMeasures,
|
||||
);
|
||||
fn report_summary(&mut self, summary: &BenchSummary, elapsed: &Duration);
|
||||
fn report_result(&mut self, result: &BenchResult);
|
||||
}
|
||||
|
||||
struct PrettyBenchReporter {
|
||||
echo_output: bool,
|
||||
struct ConsoleReporter {
|
||||
name: String,
|
||||
show_output: bool,
|
||||
has_ungrouped: bool,
|
||||
group: Option<String>,
|
||||
baseline: Option<BenchMeasurement>,
|
||||
group_measurements: Vec<BenchMeasurement>,
|
||||
options: Option<mitata::reporter::Options>,
|
||||
}
|
||||
|
||||
impl PrettyBenchReporter {
|
||||
fn new(echo_output: bool) -> Self {
|
||||
Self { echo_output }
|
||||
}
|
||||
|
||||
fn force_report_wait(&mut self, description: &BenchDescription) {
|
||||
print!(
|
||||
"bench {} ... {} iterations ",
|
||||
description.name, description.iterations
|
||||
);
|
||||
// flush for faster feedback when line buffered
|
||||
std::io::stdout().flush().unwrap();
|
||||
impl ConsoleReporter {
|
||||
fn new(show_output: bool) -> Self {
|
||||
Self {
|
||||
show_output,
|
||||
group: None,
|
||||
options: None,
|
||||
baseline: None,
|
||||
name: String::new(),
|
||||
has_ungrouped: false,
|
||||
group_measurements: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BenchReporter for PrettyBenchReporter {
|
||||
impl BenchReporter for ConsoleReporter {
|
||||
#[cold]
|
||||
fn report_plan(&mut self, plan: &BenchPlan) {
|
||||
let inflection = if plan.total == 1 { "bench" } else { "benches" };
|
||||
println!("running {} {} from {}", plan.total, inflection, plan.origin);
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering;
|
||||
static FIRST_PLAN: AtomicBool = AtomicBool::new(true);
|
||||
|
||||
self.options = Some(mitata::reporter::Options::new(
|
||||
&plan.names.iter().map(|x| x.as_str()).collect::<Vec<&str>>(),
|
||||
));
|
||||
|
||||
let options = self.options.as_mut().unwrap();
|
||||
|
||||
options.percentiles = true;
|
||||
options.colors = colors::use_color();
|
||||
|
||||
if FIRST_PLAN
|
||||
.compare_exchange(true, false, Ordering::SeqCst, Ordering::SeqCst)
|
||||
.is_ok()
|
||||
{
|
||||
println!("{}", colors::gray(format!("cpu: {}", mitata::cpu::name())));
|
||||
println!(
|
||||
"{}\n",
|
||||
colors::gray(format!(
|
||||
"runtime: deno {} ({})",
|
||||
crate::version::deno(),
|
||||
env!("TARGET")
|
||||
))
|
||||
);
|
||||
} else {
|
||||
println!();
|
||||
}
|
||||
|
||||
println!(
|
||||
"{}\n{}\n{}",
|
||||
colors::gray(&plan.origin),
|
||||
mitata::reporter::header(options),
|
||||
mitata::reporter::br(options)
|
||||
);
|
||||
}
|
||||
|
||||
fn report_wait(&mut self, description: &BenchDescription) {
|
||||
self.force_report_wait(description);
|
||||
fn report_wait(&mut self, wait: &BenchMetadata) {
|
||||
self.name = wait.name.clone();
|
||||
|
||||
match &wait.group {
|
||||
None => {
|
||||
self.has_ungrouped = true;
|
||||
}
|
||||
|
||||
Some(group) => {
|
||||
if self.group.is_none()
|
||||
&& self.has_ungrouped
|
||||
&& self.group_measurements.is_empty()
|
||||
{
|
||||
println!();
|
||||
}
|
||||
|
||||
if None == self.group || group != self.group.as_ref().unwrap() {
|
||||
self.report_group_summary();
|
||||
}
|
||||
|
||||
if (self.group.is_none() && self.has_ungrouped)
|
||||
|| (self.group.is_some() && self.group_measurements.is_empty())
|
||||
{
|
||||
println!();
|
||||
}
|
||||
|
||||
self.group = Some(group.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn report_output(&mut self, output: &BenchOutput) {
|
||||
if self.echo_output {
|
||||
if self.show_output {
|
||||
match output {
|
||||
BenchOutput::Console(line) => print!("{}", line),
|
||||
BenchOutput::Console(line) => {
|
||||
print!("{} {}", colors::gray(format!("{}:", self.name)), line)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn report_result(
|
||||
&mut self,
|
||||
_description: &BenchDescription,
|
||||
result: &BenchResult,
|
||||
elapsed: u64,
|
||||
current_bench: &BenchMeasures,
|
||||
) {
|
||||
let status = match result {
|
||||
BenchResult::Ok => {
|
||||
let ns_op = current_bench.measures.iter().sum::<u128>()
|
||||
/ current_bench.iterations as u128;
|
||||
let min_op = current_bench.measures.iter().min().unwrap_or(&0);
|
||||
let max_op = current_bench.measures.iter().max().unwrap_or(&0);
|
||||
format!(
|
||||
"{} ns/iter ({}..{} ns/iter) {}",
|
||||
ns_op.to_formatted_string(&Locale::en),
|
||||
min_op.to_formatted_string(&Locale::en),
|
||||
max_op.to_formatted_string(&Locale::en),
|
||||
colors::green("ok")
|
||||
fn report_result(&mut self, result: &BenchResult) {
|
||||
let options = self.options.as_ref().unwrap();
|
||||
|
||||
match result {
|
||||
BenchResult::Ok(bench) => {
|
||||
let mut bench = bench.to_owned();
|
||||
|
||||
if bench.baseline && self.baseline.is_none() {
|
||||
self.baseline = Some(bench.clone());
|
||||
} else {
|
||||
bench.baseline = false;
|
||||
}
|
||||
|
||||
self.group_measurements.push(bench.clone());
|
||||
|
||||
println!(
|
||||
"{}",
|
||||
mitata::reporter::benchmark(
|
||||
&bench.name,
|
||||
&mitata::reporter::BenchmarkStats {
|
||||
avg: bench.stats.avg,
|
||||
min: bench.stats.min,
|
||||
max: bench.stats.max,
|
||||
p75: bench.stats.p75,
|
||||
p99: bench.stats.p99,
|
||||
p995: bench.stats.p995,
|
||||
},
|
||||
options
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
BenchResult::Failed(failure) => {
|
||||
println!(
|
||||
"{}",
|
||||
mitata::reporter::benchmark_error(
|
||||
&failure.name,
|
||||
&mitata::reporter::Error {
|
||||
stack: None,
|
||||
message: failure.error.clone(),
|
||||
},
|
||||
options
|
||||
)
|
||||
)
|
||||
}
|
||||
BenchResult::Ignored => colors::yellow("ignored").to_string(),
|
||||
BenchResult::Failed(_) => colors::red("FAILED").to_string(),
|
||||
};
|
||||
|
||||
println!(
|
||||
"{} {}",
|
||||
status,
|
||||
colors::gray(format!("({})", display::human_elapsed(elapsed.into())))
|
||||
);
|
||||
}
|
||||
|
||||
fn report_summary(&mut self, summary: &BenchSummary, elapsed: &Duration) {
|
||||
if !summary.failures.is_empty() {
|
||||
println!("\nfailures:\n");
|
||||
for (description, error) in &summary.failures {
|
||||
println!("{}", description.name);
|
||||
println!("{}", error);
|
||||
println!();
|
||||
}
|
||||
fn report_group_summary(&mut self) {
|
||||
let options = match self.options.as_ref() {
|
||||
None => return,
|
||||
Some(options) => options,
|
||||
};
|
||||
|
||||
println!("failures:\n");
|
||||
for (description, _) in &summary.failures {
|
||||
println!("\t{}", description.name);
|
||||
}
|
||||
if 2 <= self.group_measurements.len()
|
||||
&& (self.group.is_some()
|
||||
|| (self.group.is_none() && self.baseline.is_some()))
|
||||
{
|
||||
println!(
|
||||
"\n{}",
|
||||
mitata::reporter::summary(
|
||||
&self
|
||||
.group_measurements
|
||||
.iter()
|
||||
.map(|b| mitata::reporter::GroupBenchmark {
|
||||
name: b.name.clone(),
|
||||
baseline: b.baseline,
|
||||
group: b.group.as_deref().unwrap_or("").to_owned(),
|
||||
|
||||
stats: mitata::reporter::BenchmarkStats {
|
||||
avg: b.stats.avg,
|
||||
min: b.stats.min,
|
||||
max: b.stats.max,
|
||||
p75: b.stats.p75,
|
||||
p99: b.stats.p99,
|
||||
p995: b.stats.p995,
|
||||
},
|
||||
})
|
||||
.collect::<Vec<mitata::reporter::GroupBenchmark>>(),
|
||||
options
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
let status = if summary.has_failed() || summary.has_pending() {
|
||||
colors::red("FAILED").to_string()
|
||||
} else {
|
||||
colors::green("ok").to_string()
|
||||
};
|
||||
|
||||
println!(
|
||||
"\nbench result: {}. {} passed; {} failed; {} ignored; {} measured; {} filtered out {}\n",
|
||||
status,
|
||||
summary.passed,
|
||||
summary.failed,
|
||||
summary.ignored,
|
||||
summary.measured,
|
||||
summary.filtered_out,
|
||||
colors::gray(format!("({})", display::human_elapsed(elapsed.as_millis()))),
|
||||
);
|
||||
self.baseline = None;
|
||||
self.group_measurements.clear();
|
||||
}
|
||||
}
|
||||
|
||||
fn create_reporter(echo_output: bool) -> Box<dyn BenchReporter + Send> {
|
||||
Box::new(PrettyBenchReporter::new(echo_output))
|
||||
fn report_end(&mut self, _: &BenchReport) {
|
||||
self.report_group_summary();
|
||||
}
|
||||
}
|
||||
|
||||
/// Type check a collection of module and document specifiers.
|
||||
|
@ -367,20 +450,16 @@ async fn bench_specifiers(
|
|||
.buffer_unordered(1)
|
||||
.collect::<Vec<Result<Result<(), AnyError>, tokio::task::JoinError>>>();
|
||||
|
||||
let mut reporter = create_reporter(log_level != Some(Level::Error));
|
||||
|
||||
let handler = {
|
||||
tokio::task::spawn(async move {
|
||||
let earlier = Instant::now();
|
||||
let mut summary = BenchSummary::new();
|
||||
let mut used_only = false;
|
||||
let mut report = BenchReport::new();
|
||||
let mut reporter = create_reporter(log_level != Some(Level::Error));
|
||||
|
||||
while let Some(event) = receiver.recv().await {
|
||||
match event {
|
||||
BenchEvent::Plan(plan) => {
|
||||
summary.total += plan.total;
|
||||
summary.filtered_out += plan.filtered_out;
|
||||
|
||||
report.total += plan.total;
|
||||
if plan.used_only {
|
||||
used_only = true;
|
||||
}
|
||||
|
@ -388,51 +467,32 @@ async fn bench_specifiers(
|
|||
reporter.report_plan(&plan);
|
||||
}
|
||||
|
||||
BenchEvent::Wait(description) => {
|
||||
reporter.report_wait(&description);
|
||||
summary.current_bench = BenchMeasures {
|
||||
iterations: description.iterations,
|
||||
current_start: Instant::now(),
|
||||
measures: Vec::with_capacity(
|
||||
description.iterations.try_into().unwrap(),
|
||||
),
|
||||
};
|
||||
BenchEvent::Wait(metadata) => {
|
||||
reporter.report_wait(&metadata);
|
||||
}
|
||||
|
||||
BenchEvent::Output(output) => {
|
||||
reporter.report_output(&output);
|
||||
}
|
||||
|
||||
BenchEvent::IterationTime(iter_time) => {
|
||||
summary.current_bench.measures.push(iter_time.into())
|
||||
}
|
||||
|
||||
BenchEvent::Result(description, result, elapsed) => {
|
||||
BenchEvent::Result(_origin, result) => {
|
||||
match &result {
|
||||
BenchResult::Ok => {
|
||||
summary.passed += 1;
|
||||
BenchResult::Ok(bench) => {
|
||||
report.measurements.push(bench.clone());
|
||||
}
|
||||
BenchResult::Ignored => {
|
||||
summary.ignored += 1;
|
||||
}
|
||||
BenchResult::Failed(error) => {
|
||||
summary.failed += 1;
|
||||
summary.failures.push((description.clone(), error.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
reporter.report_result(
|
||||
&description,
|
||||
&result,
|
||||
elapsed,
|
||||
&summary.current_bench,
|
||||
);
|
||||
BenchResult::Failed(failure) => {
|
||||
report.failed += 1;
|
||||
report.failures.push(failure.clone());
|
||||
}
|
||||
};
|
||||
|
||||
reporter.report_result(&result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let elapsed = Instant::now().duration_since(earlier);
|
||||
reporter.report_summary(&summary, &elapsed);
|
||||
reporter.report_end(&report);
|
||||
|
||||
if used_only {
|
||||
return Err(generic_error(
|
||||
|
@ -440,7 +500,7 @@ async fn bench_specifiers(
|
|||
));
|
||||
}
|
||||
|
||||
if summary.failed > 0 {
|
||||
if report.failed > 0 {
|
||||
return Err(generic_error("Bench failed"));
|
||||
}
|
||||
|
||||
|
|
|
@ -9,16 +9,20 @@
|
|||
const { assert } = window.__bootstrap.infra;
|
||||
const {
|
||||
AggregateErrorPrototype,
|
||||
ArrayFrom,
|
||||
ArrayPrototypeFilter,
|
||||
ArrayPrototypeJoin,
|
||||
ArrayPrototypeMap,
|
||||
ArrayPrototypePush,
|
||||
ArrayPrototypeShift,
|
||||
ArrayPrototypeSome,
|
||||
ArrayPrototypeSort,
|
||||
DateNow,
|
||||
Error,
|
||||
FunctionPrototype,
|
||||
Map,
|
||||
MapPrototypeHas,
|
||||
MathCeil,
|
||||
ObjectKeys,
|
||||
ObjectPrototypeIsPrototypeOf,
|
||||
Promise,
|
||||
|
@ -434,6 +438,27 @@
|
|||
};
|
||||
}
|
||||
|
||||
function assertExitSync(fn, isTest) {
|
||||
return function exitSanitizer(...params) {
|
||||
setExitHandler((exitCode) => {
|
||||
assert(
|
||||
false,
|
||||
`${
|
||||
isTest ? "Test case" : "Bench"
|
||||
} attempted to exit with exit code: ${exitCode}`,
|
||||
);
|
||||
});
|
||||
|
||||
try {
|
||||
fn(...new SafeArrayIterator(params));
|
||||
} catch (err) {
|
||||
throw err;
|
||||
} finally {
|
||||
setExitHandler(null);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function assertTestStepScopes(fn) {
|
||||
/** @param step {TestStep} */
|
||||
return async function testStepSanitizer(step) {
|
||||
|
@ -721,18 +746,14 @@
|
|||
benchDef = { ...defaults, ...nameOrFnOrOptions, fn, name };
|
||||
}
|
||||
|
||||
const AsyncFunction = (async () => {}).constructor;
|
||||
benchDef.async = AsyncFunction === benchDef.fn.constructor;
|
||||
|
||||
benchDef.fn = wrapBenchFnWithSanitizers(
|
||||
reportBenchIteration(benchDef.fn),
|
||||
benchDef.fn,
|
||||
benchDef,
|
||||
);
|
||||
|
||||
if (benchDef.permissions) {
|
||||
benchDef.fn = withPermissions(
|
||||
benchDef.fn,
|
||||
benchDef.permissions,
|
||||
);
|
||||
}
|
||||
|
||||
ArrayPrototypePush(benches, benchDef);
|
||||
}
|
||||
|
||||
|
@ -823,37 +844,166 @@
|
|||
}
|
||||
}
|
||||
|
||||
async function runBench(bench) {
|
||||
if (bench.ignore) {
|
||||
return "ignored";
|
||||
function compareMeasurements(a, b) {
|
||||
if (a > b) return 1;
|
||||
if (a < b) return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
function benchStats(n, highPrecision, avg, min, max, all) {
|
||||
return {
|
||||
n,
|
||||
min,
|
||||
max,
|
||||
p75: all[MathCeil(n * (75 / 100)) - 1],
|
||||
p99: all[MathCeil(n * (99 / 100)) - 1],
|
||||
p995: all[MathCeil(n * (99.5 / 100)) - 1],
|
||||
p999: all[MathCeil(n * (99.9 / 100)) - 1],
|
||||
avg: !highPrecision ? (avg / n) : MathCeil(avg / n),
|
||||
};
|
||||
}
|
||||
|
||||
async function benchMeasure(timeBudget, fn, step, sync) {
|
||||
let n = 0;
|
||||
let avg = 0;
|
||||
let wavg = 0;
|
||||
const all = [];
|
||||
let min = Infinity;
|
||||
let max = -Infinity;
|
||||
const lowPrecisionThresholdInNs = 1e4;
|
||||
|
||||
// warmup step
|
||||
let c = 0;
|
||||
step.warmup = true;
|
||||
let iterations = 20;
|
||||
let budget = 10 * 1e6;
|
||||
|
||||
if (sync) {
|
||||
while (budget > 0 || iterations-- > 0) {
|
||||
const t1 = benchNow();
|
||||
|
||||
fn();
|
||||
const iterationTime = benchNow() - t1;
|
||||
|
||||
c++;
|
||||
wavg += iterationTime;
|
||||
budget -= iterationTime;
|
||||
}
|
||||
} else {
|
||||
while (budget > 0 || iterations-- > 0) {
|
||||
const t1 = benchNow();
|
||||
|
||||
await fn();
|
||||
const iterationTime = benchNow() - t1;
|
||||
|
||||
c++;
|
||||
wavg += iterationTime;
|
||||
budget -= iterationTime;
|
||||
}
|
||||
}
|
||||
|
||||
wavg /= c;
|
||||
|
||||
// measure step
|
||||
step.warmup = false;
|
||||
|
||||
if (wavg > lowPrecisionThresholdInNs) {
|
||||
let iterations = 10;
|
||||
let budget = timeBudget * 1e6;
|
||||
|
||||
if (sync) {
|
||||
while (budget > 0 || iterations-- > 0) {
|
||||
const t1 = benchNow();
|
||||
|
||||
fn();
|
||||
const iterationTime = benchNow() - t1;
|
||||
|
||||
n++;
|
||||
avg += iterationTime;
|
||||
budget -= iterationTime;
|
||||
all.push(iterationTime);
|
||||
if (iterationTime < min) min = iterationTime;
|
||||
if (iterationTime > max) max = iterationTime;
|
||||
}
|
||||
} else {
|
||||
while (budget > 0 || iterations-- > 0) {
|
||||
const t1 = benchNow();
|
||||
|
||||
await fn();
|
||||
const iterationTime = benchNow() - t1;
|
||||
|
||||
n++;
|
||||
avg += iterationTime;
|
||||
budget -= iterationTime;
|
||||
all.push(iterationTime);
|
||||
if (iterationTime < min) min = iterationTime;
|
||||
if (iterationTime > max) max = iterationTime;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let iterations = 10;
|
||||
let budget = timeBudget * 1e6;
|
||||
|
||||
if (sync) {
|
||||
while (budget > 0 || iterations-- > 0) {
|
||||
const t1 = benchNow();
|
||||
for (let c = 0; c < lowPrecisionThresholdInNs; c++) fn();
|
||||
const iterationTime = (benchNow() - t1) / lowPrecisionThresholdInNs;
|
||||
|
||||
n++;
|
||||
avg += iterationTime;
|
||||
all.push(iterationTime);
|
||||
if (iterationTime < min) min = iterationTime;
|
||||
if (iterationTime > max) max = iterationTime;
|
||||
budget -= iterationTime * lowPrecisionThresholdInNs;
|
||||
}
|
||||
} else {
|
||||
while (budget > 0 || iterations-- > 0) {
|
||||
const t1 = benchNow();
|
||||
for (let c = 0; c < lowPrecisionThresholdInNs; c++) await fn();
|
||||
const iterationTime = (benchNow() - t1) / lowPrecisionThresholdInNs;
|
||||
|
||||
n++;
|
||||
avg += iterationTime;
|
||||
all.push(iterationTime);
|
||||
if (iterationTime < min) min = iterationTime;
|
||||
if (iterationTime > max) max = iterationTime;
|
||||
budget -= iterationTime * lowPrecisionThresholdInNs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
all.sort(compareMeasurements);
|
||||
return benchStats(n, wavg > lowPrecisionThresholdInNs, avg, min, max, all);
|
||||
}
|
||||
|
||||
async function runBench(bench) {
|
||||
const step = new BenchStep({
|
||||
name: bench.name,
|
||||
sanitizeExit: bench.sanitizeExit,
|
||||
warmup: false,
|
||||
});
|
||||
|
||||
let token = null;
|
||||
|
||||
try {
|
||||
const warmupIterations = bench.warmupIterations;
|
||||
step.warmup = true;
|
||||
|
||||
for (let i = 0; i < warmupIterations; i++) {
|
||||
await bench.fn(step);
|
||||
if (bench.permissions) {
|
||||
token = core.opSync(
|
||||
"op_pledge_test_permissions",
|
||||
serializePermissions(bench.permissions),
|
||||
);
|
||||
}
|
||||
|
||||
const iterations = bench.n;
|
||||
step.warmup = false;
|
||||
const benchTimeInMs = 500;
|
||||
const fn = bench.fn.bind(null, step);
|
||||
const stats = await benchMeasure(benchTimeInMs, fn, step, !bench.async);
|
||||
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
await bench.fn(step);
|
||||
}
|
||||
|
||||
return "ok";
|
||||
return { ok: { stats, ...bench } };
|
||||
} catch (error) {
|
||||
return {
|
||||
"failed": formatError(error),
|
||||
};
|
||||
return { failed: { ...bench, error: formatError(error) } };
|
||||
} finally {
|
||||
if (token !== null) core.opSync("op_restore_test_permissions", token);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -913,35 +1063,16 @@
|
|||
});
|
||||
}
|
||||
|
||||
function reportBenchResult(description, result, elapsed) {
|
||||
function reportBenchResult(origin, result) {
|
||||
core.opSync("op_dispatch_bench_event", {
|
||||
result: [description, result, elapsed],
|
||||
result: [origin, result],
|
||||
});
|
||||
}
|
||||
|
||||
function reportBenchIteration(fn) {
|
||||
return async function benchIteration(step) {
|
||||
let now;
|
||||
if (!step.warmup) {
|
||||
now = benchNow();
|
||||
}
|
||||
await fn(step);
|
||||
if (!step.warmup) {
|
||||
reportIterationTime(benchNow() - now);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function benchNow() {
|
||||
return core.opSync("op_bench_now");
|
||||
}
|
||||
|
||||
function reportIterationTime(time) {
|
||||
core.opSync("op_dispatch_bench_event", {
|
||||
iterationTime: time,
|
||||
});
|
||||
}
|
||||
|
||||
async function runTests({
|
||||
filter = null,
|
||||
shuffle = null,
|
||||
|
@ -1013,32 +1144,34 @@
|
|||
createTestFilter(filter),
|
||||
);
|
||||
|
||||
let groups = new Set();
|
||||
const benchmarks = ArrayPrototypeFilter(filtered, (bench) => !bench.ignore);
|
||||
|
||||
// make sure ungrouped benchmarks are placed above grouped
|
||||
groups.add(undefined);
|
||||
|
||||
for (const bench of benchmarks) {
|
||||
bench.group ||= undefined;
|
||||
groups.add(bench.group);
|
||||
}
|
||||
|
||||
groups = ArrayFrom(groups);
|
||||
ArrayPrototypeSort(
|
||||
benchmarks,
|
||||
(a, b) => groups.indexOf(a.group) - groups.indexOf(b.group),
|
||||
);
|
||||
|
||||
reportBenchPlan({
|
||||
origin,
|
||||
total: filtered.length,
|
||||
filteredOut: benches.length - filtered.length,
|
||||
total: benchmarks.length,
|
||||
usedOnly: only.length > 0,
|
||||
names: ArrayPrototypeMap(benchmarks, (bench) => bench.name),
|
||||
});
|
||||
|
||||
for (const bench of filtered) {
|
||||
// TODO(bartlomieju): probably needs some validation?
|
||||
const iterations = bench.n ?? 1000;
|
||||
const warmupIterations = bench.warmup ?? 1000;
|
||||
const description = {
|
||||
origin,
|
||||
name: bench.name,
|
||||
iterations,
|
||||
};
|
||||
bench.n = iterations;
|
||||
bench.warmupIterations = warmupIterations;
|
||||
const earlier = DateNow();
|
||||
|
||||
reportBenchWait(description);
|
||||
|
||||
const result = await runBench(bench);
|
||||
const elapsed = DateNow() - earlier;
|
||||
|
||||
reportBenchResult(description, result, elapsed);
|
||||
for (const bench of benchmarks) {
|
||||
bench.baseline = !!bench.baseline;
|
||||
reportBenchWait({ origin, ...bench });
|
||||
reportBenchResult(origin, await runBench(bench));
|
||||
}
|
||||
|
||||
globalThis.console = originalConsole;
|
||||
|
@ -1420,7 +1553,7 @@
|
|||
*/
|
||||
function wrapBenchFnWithSanitizers(fn, opts) {
|
||||
if (opts.sanitizeExit) {
|
||||
fn = assertExit(fn, false);
|
||||
fn = opts.async ? assertExit(fn, false) : assertExitSync(fn, false);
|
||||
}
|
||||
return fn;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue