From 88566cee72b4762e0742fdde3f2c56b8a4d0bf03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bartek=20Iwa=C5=84czuk?= Date: Mon, 11 Dec 2023 17:33:56 +0100 Subject: [PATCH] perf(lsp): instrument all ops with performance marks (#21536) Adds performance measurements for all ops used by the LSP. Also changes output of "Language server status" page to include more precise information. Current suspicion is that computing "script version" takes a long time for some users. --- cli/lsp/language_server.rs | 14 +++++--------- cli/lsp/performance.rs | 25 ++++++++++++++++++++++++- cli/lsp/tsc.rs | 21 ++++++++++++++------- cli/tests/integration/lsp_tests.rs | 3 +++ 4 files changed, 46 insertions(+), 17 deletions(-) diff --git a/cli/lsp/language_server.rs b/cli/lsp/language_server.rs index 225b46eacd..9b531cc409 100644 --- a/cli/lsp/language_server.rs +++ b/cli/lsp/language_server.rs @@ -3809,15 +3809,11 @@ impl Inner { .unwrap(); contents .push_str("\n## Performance\n\n|Name|Duration|Count|\n|---|---|---|\n"); - let mut averages = self.performance.averages(); - averages.sort(); - for average in averages { - writeln!( - contents, - "|{}|{}ms|{}|", - average.name, average.average_duration, average.count - ) - .unwrap(); + let mut averages = self.performance.averages_as_f64(); + averages.sort_by(|a, b| a.0.cmp(&b.0)); + for (name, count, average_duration) in averages { + writeln!(contents, "|{}|{}ms|{}|", name, average_duration, count) + .unwrap(); } Some(contents) } else { diff --git a/cli/lsp/performance.rs b/cli/lsp/performance.rs index ffb6ed2170..610e180670 100644 --- a/cli/lsp/performance.rs +++ b/cli/lsp/performance.rs @@ -51,7 +51,12 @@ pub struct PerformanceMeasure { impl fmt::Display for PerformanceMeasure { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{} ({}ms)", self.name, self.duration.as_millis()) + write!( + f, + "{} ({}ms)", + self.name, + self.duration.as_micros() as f64 / 1000.0 + ) } } @@ -132,6 +137,24 @@ impl Performance { .collect() } + pub fn averages_as_f64(&self) -> Vec<(String, u32, f64)> { + let mut averages: HashMap> = HashMap::new(); + for measure in self.measures.lock().iter() { + averages + .entry(measure.name.clone()) + .or_default() + .push(measure.duration); + } + averages + .into_iter() + .map(|(k, d)| { + let count = d.len() as u32; + let a = d.into_iter().sum::() / count; + (k, count, a.as_micros() as f64 / 1000.0) + }) + .collect() + } + fn mark_inner, V: Serialize>( &self, name: S, diff --git a/cli/lsp/tsc.rs b/cli/lsp/tsc.rs index 594ef02a24..8b771a4785 100644 --- a/cli/lsp/tsc.rs +++ b/cli/lsp/tsc.rs @@ -3842,7 +3842,8 @@ fn op_is_cancelled(state: &mut OpState) -> bool { #[op2(fast)] fn op_is_node_file(state: &mut OpState, #[string] path: String) -> bool { let state = state.borrow::(); - match ModuleSpecifier::parse(&path) { + let mark = state.performance.mark("tsc.op.op_is_node_file"); + let r = match ModuleSpecifier::parse(&path) { Ok(specifier) => state .state_snapshot .npm @@ -3850,7 +3851,9 @@ fn op_is_node_file(state: &mut OpState, #[string] path: String) -> bool { .map(|n| n.npm_resolver.in_npm_package(&specifier)) .unwrap_or(false), Err(_) => false, - } + }; + state.performance.measure(mark); + r } #[derive(Debug, Serialize)] @@ -3946,6 +3949,7 @@ fn op_respond(state: &mut OpState, #[serde] args: Response) { #[serde] fn op_script_names(state: &mut OpState) -> Vec { let state = state.borrow_mut::(); + let mark = state.performance.mark("tsc.op.op_script_names"); let documents = &state.state_snapshot.documents; let all_docs = documents.documents(DocumentsFilter::AllDiagnosable); let mut seen = HashSet::new(); @@ -3985,13 +3989,15 @@ fn op_script_names(state: &mut OpState) -> Vec { } } - result + let r = result .into_iter() .map(|s| match ModuleSpecifier::parse(&s) { Ok(s) => state.specifier_map.denormalize(&s), Err(_) => s, }) - .collect() + .collect(); + state.performance.measure(mark); + r } #[op2] @@ -4001,10 +4007,11 @@ fn op_script_version( #[string] specifier: &str, ) -> Result, AnyError> { let state = state.borrow_mut::(); - // this op is very "noisy" and measuring its performance is not useful, so we - // don't measure it uniquely anymore. + let mark = state.performance.mark("tsc.op.op_script_version"); let specifier = state.specifier_map.normalize(specifier)?; - Ok(state.script_version(&specifier)) + let r = state.script_version(&specifier); + state.performance.measure(mark); + Ok(r) } /// Create and setup a JsRuntime based on a snapshot. It is expected that the diff --git a/cli/tests/integration/lsp_tests.rs b/cli/tests/integration/lsp_tests.rs index 98aaaebb49..aad2c90864 100644 --- a/cli/tests/integration/lsp_tests.rs +++ b/cli/tests/integration/lsp_tests.rs @@ -8318,7 +8318,10 @@ fn lsp_performance() { "tsc.host.$getDiagnostics", "tsc.host.$getSupportedCodeFixes", "tsc.host.getQuickInfoAtPosition", + "tsc.op.op_is_node_file", "tsc.op.op_load", + "tsc.op.op_script_names", + "tsc.op.op_script_version", "tsc.request.$configure", "tsc.request.$getAssets", "tsc.request.$getSupportedCodeFixes",