mirror of
https://github.com/denoland/deno.git
synced 2025-01-21 21:50:00 -05:00
Add throughput benchmark (#961)
This commit is contained in:
parent
51f9331ecb
commit
c814d5a914
6 changed files with 145 additions and 7 deletions
9
tests/echo_server.ts
Normal file
9
tests/echo_server.ts
Normal file
|
@ -0,0 +1,9 @@
|
|||
import { args, listen, copy } from "deno";
|
||||
const addr = args[1] || "127.0.0.1:4544";
|
||||
const listener = listen("tcp", addr);
|
||||
console.log("listening on", addr);
|
||||
listener.accept().then(async conn => {
|
||||
await copy(conn, conn);
|
||||
conn.close();
|
||||
listener.close();
|
||||
});
|
|
@ -13,6 +13,7 @@ import shutil
|
|||
from util import run, run_output, root_path, build_path, executable_suffix
|
||||
import tempfile
|
||||
import http_server
|
||||
import throughput_benchmark
|
||||
|
||||
# The list of the tuples of the benchmark name and arguments
|
||||
exec_time_benchmarks = [
|
||||
|
@ -116,6 +117,15 @@ def run_thread_count_benchmark(deno_path):
|
|||
return thread_count_map
|
||||
|
||||
|
||||
def run_throughput(deno_exe):
|
||||
m = {}
|
||||
m["100M_tcp"] = throughput_benchmark.tcp(deno_exe, 100)
|
||||
m["100M_cat"] = throughput_benchmark.cat(deno_exe, 100)
|
||||
m["10M_tcp"] = throughput_benchmark.tcp(deno_exe, 10)
|
||||
m["10M_cat"] = throughput_benchmark.cat(deno_exe, 10)
|
||||
return m
|
||||
|
||||
|
||||
def run_syscall_count_benchmark(deno_path):
|
||||
syscall_count_map = {}
|
||||
syscall_count_map["hello"] = get_strace_summary(
|
||||
|
@ -169,6 +179,10 @@ def main(argv):
|
|||
}
|
||||
|
||||
new_data["binary_size"] = get_binary_sizes(build_dir)
|
||||
# Cannot run throughput benchmark on windows because they don't have nc or
|
||||
# pipe.
|
||||
if os.name != 'nt':
|
||||
new_data["throughput"] = run_throughput(deno_path)
|
||||
if "linux" in sys.platform:
|
||||
# Thread count test, only on linux
|
||||
new_data["thread_count"] = run_thread_count_benchmark(deno_path)
|
||||
|
|
54
tools/throughput_benchmark.py
Executable file
54
tools/throughput_benchmark.py
Executable file
|
@ -0,0 +1,54 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2018 the Deno authors. All rights reserved. MIT license.
|
||||
# Performs benchmark and append data to //website/data.json.
|
||||
# If //website/data.json doesn't exist, this script tries to import it from gh-pages branch.
|
||||
# To view the results locally run ./tools/http_server.py and visit
|
||||
# http://localhost:4545/website
|
||||
|
||||
import os
|
||||
import sys
|
||||
import util
|
||||
import time
|
||||
import subprocess
|
||||
|
||||
MB = 1024 * 1024
|
||||
ADDR = "127.0.0.1:4544"
|
||||
|
||||
|
||||
def cat(deno_exe, megs):
|
||||
size = megs * MB
|
||||
start = time.time()
|
||||
cmd = deno_exe + " tests/cat.ts /dev/zero | head -c %s " % size
|
||||
print cmd
|
||||
subprocess.check_output(cmd, shell=True)
|
||||
end = time.time()
|
||||
return end - start
|
||||
|
||||
|
||||
def tcp(deno_exe, megs):
|
||||
size = megs * MB
|
||||
# Run deno echo server in the background.
|
||||
echo_server = subprocess.Popen(
|
||||
[deno_exe, "--allow-net", "tests/echo_server.ts", ADDR])
|
||||
|
||||
time.sleep(1) # wait for deno to wake up. TODO racy.
|
||||
try:
|
||||
start = time.time()
|
||||
cmd = ("head -c %s /dev/zero " % size) + "| nc " + ADDR.replace(
|
||||
":", " ")
|
||||
print cmd
|
||||
subprocess.check_output(cmd, shell=True)
|
||||
end = time.time()
|
||||
return end - start
|
||||
finally:
|
||||
echo_server.kill()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
deno_exe = sys.argv[1]
|
||||
megs = int(sys.argv[2])
|
||||
if not deno_exe or not megs:
|
||||
print "Usage ./tools/throughput_benchmark.py out/debug/deno 100"
|
||||
sys.exit(1)
|
||||
secs = tcp_throughput_benchmark(sys.argv[1], megs)
|
||||
print secs, "seconds"
|
|
@ -16,18 +16,41 @@ export function getTravisData() {
|
|||
.then(data => data.builds.reverse());
|
||||
}
|
||||
|
||||
export function createExecTimeColumns(data) {
|
||||
const benchmarkNames = Object.keys(data[data.length - 1].benchmark);
|
||||
return benchmarkNames.map(name => [
|
||||
name,
|
||||
function getBenchmarkVarieties(data, benchmarkName) {
|
||||
// Look at last sha hash.
|
||||
const last = data[data.length - 1];
|
||||
return Object.keys(last[benchmarkName]);
|
||||
}
|
||||
|
||||
export function createColumns(data, benchmarkName) {
|
||||
const varieties = getBenchmarkVarieties(data, benchmarkName);
|
||||
return varieties.map(variety => [
|
||||
variety,
|
||||
...data.map(d => {
|
||||
const benchmark = d.benchmark[name];
|
||||
const meanValue = benchmark ? benchmark.mean : 0;
|
||||
return meanValue || null;
|
||||
if (d[benchmarkName] != null) {
|
||||
if (d[benchmarkName][variety] != null) {
|
||||
const v = d[benchmarkName][variety];
|
||||
if (benchmarkName == "benchmark") {
|
||||
const meanValue = v ? v.mean : 0;
|
||||
return meanValue || null;
|
||||
} else {
|
||||
return v;
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
})
|
||||
]);
|
||||
}
|
||||
|
||||
export function createExecTimeColumns(data) {
|
||||
return createColumns(data, "benchmark");
|
||||
}
|
||||
|
||||
export function createThroughputColumns(data) {
|
||||
return createColumns(data, "throughput");
|
||||
}
|
||||
|
||||
export function createBinarySizeColumns(data) {
|
||||
const propName = "binary_size";
|
||||
const binarySizeNames = Object.keys(data[data.length - 1][propName]);
|
||||
|
@ -108,6 +131,7 @@ export async function main() {
|
|||
const travisData = (await getTravisData()).filter(d => d.duration > 0);
|
||||
|
||||
const execTimeColumns = createExecTimeColumns(data);
|
||||
const throughputColumns = createThroughputColumns(data);
|
||||
const binarySizeColumns = createBinarySizeColumns(data);
|
||||
const threadCountColumns = createThreadCountColumns(data);
|
||||
const syscallCountColumns = createSyscallCountColumns(data);
|
||||
|
@ -146,6 +170,24 @@ export async function main() {
|
|||
}
|
||||
});
|
||||
|
||||
c3.generate({
|
||||
bindto: "#throughput-chart",
|
||||
data: {
|
||||
columns: throughputColumns,
|
||||
onclick: viewCommitOnClick(sha1List)
|
||||
},
|
||||
axis: {
|
||||
x: {
|
||||
type: "category",
|
||||
show: false,
|
||||
categories: sha1ShortList
|
||||
},
|
||||
y: {
|
||||
label: "seconds"
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
c3.generate({
|
||||
bindto: "#binary-size-chart",
|
||||
data: {
|
||||
|
|
|
@ -22,6 +22,12 @@ const regularData = [
|
|||
"main.js.map": 80000000,
|
||||
"snapshot_deno.bin": 70000000
|
||||
},
|
||||
throughput: {
|
||||
"100M_tcp": 3.6,
|
||||
"100M_cat": 3.0,
|
||||
"10M_tcp": 1.6,
|
||||
"10M_cat": 1.0
|
||||
},
|
||||
benchmark: {
|
||||
hello: {
|
||||
mean: 0.05
|
||||
|
@ -54,6 +60,12 @@ const regularData = [
|
|||
"main.js.map": 80000001,
|
||||
"snapshot_deno.bin": 70000001
|
||||
},
|
||||
throughput: {
|
||||
"100M_tcp": 3.6,
|
||||
"100M_cat": 3.0,
|
||||
"10M_tcp": 1.6,
|
||||
"10M_cat": 1.0
|
||||
},
|
||||
benchmark: {
|
||||
hello: {
|
||||
mean: 0.055
|
||||
|
@ -84,6 +96,7 @@ const irregularData = [
|
|||
created_at: "2018-01-01T01:00:00Z",
|
||||
sha1: "123",
|
||||
benchmark: {},
|
||||
throughput: {},
|
||||
binary_size: {},
|
||||
thread_count: {},
|
||||
syscall_count: {}
|
||||
|
@ -97,6 +110,9 @@ const irregularData = [
|
|||
cold_hello: {},
|
||||
cold_relative_import: {}
|
||||
},
|
||||
throughput: {
|
||||
"100M_tcp": 3.0
|
||||
},
|
||||
binary_size: {
|
||||
deno: 1
|
||||
},
|
||||
|
|
|
@ -26,6 +26,9 @@
|
|||
A cold startup is when deno must compile from scratch.
|
||||
<div id="exec-time-chart"></div>
|
||||
|
||||
<h2>Throughput</h2>
|
||||
<div id="throughput-chart"></div>
|
||||
|
||||
<h2>Executable size</h2>
|
||||
deno ships only a single binary. We track its size here.
|
||||
<div id="binary-size-chart"></div>
|
||||
|
|
Loading…
Add table
Reference in a new issue