Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

LSP evaluation in the background #1814

Merged
merged 15 commits into from
Mar 8, 2024
74 changes: 72 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,14 @@ ansi_term = "0.12"
anyhow = "1.0"
assert_cmd = "2.0.11"
assert_matches = "1.5.0"
bincode = "1.3.3"
clap = "4.3"
clap_complete = "4.3.2"
codespan = { version = "0.11", features = ["serialization"] }
codespan-reporting = { version = "0.11", features = ["serialization"] }
comrak = "0.17.0"
criterion = "0.4"
crossbeam = "0.8.4"
csv = "1"
cxx = "1.0"
cxx-build = "1.0"
Expand All @@ -53,6 +55,7 @@ git-version = "0.3.5"
indexmap = "1.9.3"
indoc = "2"
insta = "1.29.0"
ipc-channel = "0.18.0"
js-sys = "0.3"
lalrpop = "0.19.9"
lalrpop-util = "0.19.9"
Expand Down
2 changes: 1 addition & 1 deletion core/src/error/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ impl IllegalPolymorphicTailAction {
}
}

const UNKNOWN_SOURCE_NAME: &str = "<unknown> (generated by evaluation)";
pub const UNKNOWN_SOURCE_NAME: &str = "<unknown> (generated by evaluation)";

/// An error occurring during the static typechecking phase.
#[derive(Debug, PartialEq, Clone)]
Expand Down
47 changes: 47 additions & 0 deletions core/src/eval/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -935,6 +935,53 @@ impl<R: ImportResolver, C: Cache> VirtualMachine<R, C> {
})
}
}

/// Evaluate a term, but attempt to continue on errors.
///
/// This differs from `VirtualMachine::eval_full` in 3 ways:
/// - We try to accumulate errors instead of bailing out. When recursing into record
/// fields and array elements, we keep evaluating subsequent elements even if one
/// fails.
/// - We ignore missing field errors. It would be nice not to ignore them, but it's hard
/// to tell when they're appropriate: the term might intentionally be a partial configuration.
/// - We only return the accumulated errors; we don't return the eval'ed term.
pub fn eval_permissive(&mut self, rt: RichTerm) -> Vec<EvalError> {
fn inner<R: ImportResolver, C: Cache>(
slf: &mut VirtualMachine<R, C>,
acc: &mut Vec<EvalError>,
rt: RichTerm,
) {
match slf.eval(rt) {
Err(e) => acc.push(e),
Ok(t) => match t.as_ref() {
Term::Array(ts, _) => {
for t in ts.iter() {
// After eval_closure, all the array elements are
// closurized already, so we don't need to do any tracking
// of the env.
inner(slf, acc, t.clone());
}
}
Term::Record(data) => {
for field in data.fields.values() {
if let Some(v) = &field.value {
let value_with_ctr = RuntimeContract::apply_all(
v.clone(),
field.pending_contracts.iter().cloned(),
v.pos,
);
inner(slf, acc, value_with_ctr);
}
}
}
_ => {}
},
}
}
let mut ret = Vec::new();
inner(self, &mut ret, rt);
ret
}
}

impl<C: Cache> VirtualMachine<ImportCache, C> {
Expand Down
78 changes: 68 additions & 10 deletions lsp/lsp-harness/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ use serde::Deserialize;
pub struct TestFixture {
pub files: Vec<TestFile>,
pub reqs: Vec<Request>,
pub expected_diags: Vec<Url>,
}

pub struct TestFile {
Expand All @@ -49,7 +50,11 @@ pub enum Request {

#[derive(Deserialize, Debug, Default)]
pub struct Requests {
request: Vec<Request>,
request: Option<Vec<Request>>,
yannham marked this conversation as resolved.
Show resolved Hide resolved
// A list of files to compare diagnostic snapshots.
// TODO: once the background output has settled down a little,
// consider checking diagnostic snapshots for all tests
diagnostic: Option<Vec<Url>>,
}

impl TestFixture {
Expand Down Expand Up @@ -92,15 +97,18 @@ impl TestFixture {
Some(TestFixture {
files,
reqs: Vec::new(),
expected_diags: Vec::new(),
})
} else {
// The remaining lines at the end of the file are a toml source
// listing the LSP requests we need to make.
// listing the LSP requests we need to make and the diagnostics
// we expect to receive.
let remaining = header_lines.join("\n");
let reqs: Requests = toml::from_str(&remaining).unwrap();
Some(TestFixture {
files,
reqs: reqs.request,
reqs: reqs.request.unwrap_or_default(),
expected_diags: reqs.diagnostic.unwrap_or_default(),
})
}
}
Expand Down Expand Up @@ -191,14 +199,64 @@ impl TestHarness {
}

// For debug purposes, drain and print notifications.
pub fn drain_notifications(&mut self) {
// FIXME: nls doesn't report progress, so we have no way to check whether
// it's finished sending notifications. We just retrieve any that we've already
// received.
// We should also have a better format for printing diagnostics and other
// notifications.
pub fn drain_diagnostics(&mut self, files: impl Iterator<Item = Url>) {
let mut diags = self.drain_diagnostics_inner(files);

// Sort and dedup the diagnostics, for stability of the output.
let mut files: Vec<_> = diags.keys().cloned().collect();
files.sort();

for f in files {
let mut diags = diags.remove(&f).unwrap();
diags.sort_by_cached_key(|d| (d.range.start, d.range.end, d.message.clone()));
diags.dedup_by_key(|d| (d.range.start, d.range.end, d.message.clone()));
for d in diags {
(&f, d).debug(&mut self.out).unwrap();
self.out.push(b'\n');
}
}
}

fn drain_diagnostics_inner(
&mut self,
files: impl Iterator<Item = Url>,
) -> HashMap<Url, Vec<lsp_types::Diagnostic>> {
let mut diags: HashMap<Url, Vec<lsp_types::Diagnostic>> = HashMap::new();

// This is pretty fragile, but I don't know of a better way to handle notifications: we
// expect 2 rounds of notifications from each file (one synchronously from typechecking,
// and one from the background eval). So we just wait until we've received both, and we
// concatenate their outputs.
let mut waiting: HashMap<Url, u32> = files.map(|f| (f, 2)).collect();

// Handle a single diagnostic, returning true if we have enough of them.
let mut handle_diag = |diag: PublishDiagnosticsParams| -> bool {
if let Some(remaining) = waiting.get_mut(&diag.uri) {
*remaining -= 1;
if *remaining == 0 {
waiting.remove(&diag.uri);
}
diags
.entry(diag.uri.clone())
.or_default()
.extend(diag.diagnostics);
}

waiting.is_empty()
};

for msg in self.srv.pending_notifications() {
eprintln!("{msg:?}");
if msg.method == PublishDiagnostics::METHOD {
let diag: PublishDiagnosticsParams =
serde_json::value::from_value(msg.params).unwrap();
if handle_diag(diag) {
return diags;
}
}
}

while !handle_diag(self.wait_for_diagnostics()) {}

diags
}
}
8 changes: 7 additions & 1 deletion lsp/lsp-harness/src/output.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

use std::io::Write;

use lsp_types::{DocumentSymbolResponse, GotoDefinitionResponse, WorkspaceEdit};
use lsp_types::{Diagnostic, DocumentSymbolResponse, GotoDefinitionResponse, WorkspaceEdit};

pub trait LspDebug {
fn debug(&self, w: impl Write) -> std::io::Result<()>;
Expand Down Expand Up @@ -210,3 +210,9 @@ impl LspDebug for WorkspaceEdit {
changes.debug(w)
}
}

impl LspDebug for Diagnostic {
fn debug(&self, mut w: impl Write) -> std::io::Result<()> {
write!(w, "{}: {}", self.range.debug_str(), self.message)
}
}
28 changes: 15 additions & 13 deletions lsp/nls/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,23 +26,25 @@ harness = false
lalrpop.workspace = true

[dependencies]
lalrpop-util.workspace = true
anyhow.workspace = true
bincode.workspace = true
clap = { workspace = true, features = ["derive"] }
codespan.workspace = true
codespan-reporting.workspace = true
serde = { workspace = true, features = ["derive"] }
serde_json.workspace = true
regex.workspace = true

nickel-lang-core.workspace = true
lsp-server.workspace = true
lsp-types.workspace = true
log.workspace = true
env_logger.workspace = true
anyhow.workspace = true
codespan.workspace = true
crossbeam.workspace = true
csv.workspace = true
derive_more.workspace = true
env_logger.workspace = true
ipc-channel.workspace = true
lalrpop-util.workspace = true
lazy_static.workspace = true
csv.workspace = true
log.workspace = true
lsp-server.workspace = true
lsp-types.workspace = true
nickel-lang-core.workspace = true
regex.workspace = true
serde = { workspace = true, features = ["derive"] }
serde_json.workspace = true
thiserror.workspace = true

[dev-dependencies]
Expand Down
Loading
Loading