Skip to content

Commit

Permalink
feat: run tests in parallel in nargo test (#4484)
Browse files Browse the repository at this point in the history
# Description

## Problem\*

Resolves <!-- Link to GitHub Issue -->

## Summary\*

This is a dirty hack to get tests running in parallel. To do this, we've
given up printing the test results in a stream as they're run but we
instead wait for all tests in a package to be run before we print them
out in one go.

This takes the noir-protocol-circuits test suite from taking 1:46s to
27s.

## Additional Context

## Documentation\*

Check one:
- [x] No documentation needed.
- [ ] Documentation included in this PR.
- [ ] **[Exceptional Case]** Documentation to be submitted in a separate
PR.

# PR Checklist\*

- [x] I have tested the changes locally.
- [x] I have formatted the changes with [Prettier](https://prettier.io/)
and/or `cargo fmt` on default settings.
  • Loading branch information
TomAFrench authored Mar 5, 2024
1 parent fe8f277 commit 761734e
Show file tree
Hide file tree
Showing 3 changed files with 92 additions and 38 deletions.
2 changes: 1 addition & 1 deletion tooling/lsp/src/requests/test_run.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ fn on_test_run_request_inner(
let test_result = run_test(
&state.solver,
&mut context,
test_function,
&test_function,
false,
None,
&CompileOptions::default(),
Expand Down
14 changes: 10 additions & 4 deletions tooling/nargo/src/ops/test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,16 @@ pub enum TestStatus {
CompileError(FileDiagnostic),
}

impl TestStatus {
pub fn failed(&self) -> bool {
!matches!(self, TestStatus::Pass)
}
}

pub fn run_test<B: BlackBoxFunctionSolver>(
blackbox_solver: &B,
context: &mut Context,
test_function: TestFunction,
test_function: &TestFunction,
show_output: bool,
foreign_call_resolver_url: Option<&str>,
config: &CompileOptions,
Expand Down Expand Up @@ -45,7 +51,7 @@ pub fn run_test<B: BlackBoxFunctionSolver>(
/// that a constraint was never satisfiable.
/// An example of this is the program `assert(false)`
/// In that case, we check if the test function should fail, and if so, we return `TestStatus::Pass`.
fn test_status_program_compile_fail(err: CompileError, test_function: TestFunction) -> TestStatus {
fn test_status_program_compile_fail(err: CompileError, test_function: &TestFunction) -> TestStatus {
// The test has failed compilation, but it should never fail. Report error.
if !test_function.should_fail() {
return TestStatus::CompileError(err.into());
Expand All @@ -70,7 +76,7 @@ fn test_status_program_compile_fail(err: CompileError, test_function: TestFuncti
/// We now check whether execution passed/failed and whether it should have
/// passed/failed to determine the test status.
fn test_status_program_compile_pass(
test_function: TestFunction,
test_function: &TestFunction,
debug: DebugInfo,
circuit_execution: Result<WitnessMap, NargoError>,
) -> TestStatus {
Expand Down Expand Up @@ -109,7 +115,7 @@ fn test_status_program_compile_pass(
}

fn check_expected_failure_message(
test_function: TestFunction,
test_function: &TestFunction,
failed_assertion: Option<String>,
error_diagnostic: Option<FileDiagnostic>,
) -> TestStatus {
Expand Down
114 changes: 81 additions & 33 deletions tooling/nargo_cli/src/cli/test_cmd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,14 @@ use nargo::{
parse_all, prepare_package,
};
use nargo_toml::{get_package_manifest, resolve_workspace_from_toml, PackageSelection};
use noirc_driver::{file_manager_with_stdlib, CompileOptions, NOIR_ARTIFACT_VERSION_STRING};
use noirc_driver::{
check_crate, file_manager_with_stdlib, CompileOptions, NOIR_ARTIFACT_VERSION_STRING,
};
use noirc_frontend::{
graph::CrateName,
hir::{FunctionNameMatch, ParsedFiles},
};
use rayon::prelude::{IntoParallelIterator, ParallelIterator};
use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};

use crate::{backends::Backend, cli::check_cmd::check_crate_and_report_errors, errors::CliError};
Expand Down Expand Up @@ -82,15 +85,12 @@ pub(crate) fn run(
None => FunctionNameMatch::Anything,
};

let blackbox_solver = Bn254BlackBoxSolver::new();

let test_reports: Vec<Vec<(String, TestStatus)>> = workspace
.into_iter()
.map(|package| {
run_tests(
run_tests::<Bn254BlackBoxSolver>(
&workspace_file_manager,
&parsed_files,
&blackbox_solver,
package,
pattern,
args.show_output,
Expand All @@ -116,24 +116,77 @@ pub(crate) fn run(
};
}

if test_report.iter().any(|(_, status)| matches!(status, TestStatus::Fail { .. })) {
if test_report.iter().any(|(_, status)| status.failed()) {
Err(CliError::Generic(String::new()))
} else {
Ok(())
}
}

#[allow(clippy::too_many_arguments)]
fn run_tests<S: BlackBoxFunctionSolver>(
fn run_tests<S: BlackBoxFunctionSolver + Default>(
file_manager: &FileManager,
parsed_files: &ParsedFiles,
blackbox_solver: &S,
package: &Package,
fn_name: FunctionNameMatch,
show_output: bool,
foreign_call_resolver_url: Option<&str>,
compile_options: &CompileOptions,
) -> Result<Vec<(String, TestStatus)>, CliError> {
let test_functions =
get_tests_in_package(file_manager, parsed_files, package, fn_name, compile_options)?;

let count_all = test_functions.len();

let plural = if count_all == 1 { "" } else { "s" };
println!("[{}] Running {count_all} test function{plural}", package.name);

let test_report: Vec<(String, TestStatus)> = test_functions
.into_par_iter()
.map(|test_name| {
// This is really hacky but we can't share `Context` or `S` across threads.
// We then need to construct a separate copy for each test.

let (mut context, crate_id) = prepare_package(file_manager, parsed_files, package);
check_crate(
&mut context,
crate_id,
compile_options.deny_warnings,
compile_options.disable_macros,
)
.expect("Any errors should have occurred when collecting test functions");

let test_functions = context.get_all_test_functions_in_crate_matching(
&crate_id,
FunctionNameMatch::Exact(&test_name),
);
let (_, test_function) = test_functions.first().expect("Test function should exist");

let blackbox_solver = S::default();

let test_output = run_test(
&blackbox_solver,
&mut context,
test_function,
show_output,
foreign_call_resolver_url,
compile_options,
);

(test_name, test_output)
})
.collect();

display_test_report(file_manager, package, compile_options, &test_report)?;
Ok(test_report)
}

fn get_tests_in_package(
file_manager: &FileManager,
parsed_files: &ParsedFiles,
package: &Package,
fn_name: FunctionNameMatch,
compile_options: &CompileOptions,
) -> Result<Vec<String>, CliError> {
let (mut context, crate_id) = prepare_package(file_manager, parsed_files, package);
check_crate_and_report_errors(
&mut context,
Expand All @@ -143,30 +196,27 @@ fn run_tests<S: BlackBoxFunctionSolver>(
compile_options.silence_warnings,
)?;

let test_functions = context.get_all_test_functions_in_crate_matching(&crate_id, fn_name);
let count_all = test_functions.len();

let plural = if count_all == 1 { "" } else { "s" };
println!("[{}] Running {count_all} test function{plural}", package.name);
Ok(context
.get_all_test_functions_in_crate_matching(&crate_id, fn_name)
.into_iter()
.map(|(test_name, _)| test_name)
.collect())
}

fn display_test_report(
file_manager: &FileManager,
package: &Package,
compile_options: &CompileOptions,
test_report: &[(String, TestStatus)],
) -> Result<(), CliError> {
let writer = StandardStream::stderr(ColorChoice::Always);
let mut writer = writer.lock();

let mut test_report: Vec<(String, TestStatus)> = Vec::new();
for (test_name, test_function) in test_functions {
for (test_name, test_status) in test_report {
write!(writer, "[{}] Testing {test_name}... ", package.name)
.expect("Failed to write to stderr");
writer.flush().expect("Failed to flush writer");

let test_status = run_test(
blackbox_solver,
&mut context,
test_function,
show_output,
foreign_call_resolver_url,
compile_options,
);

match &test_status {
TestStatus::Pass { .. } => {
writer
Expand All @@ -181,7 +231,7 @@ fn run_tests<S: BlackBoxFunctionSolver>(
writeln!(writer, "FAIL\n{message}\n").expect("Failed to write to stderr");
if let Some(diag) = error_diagnostic {
noirc_errors::reporter::report_all(
context.file_manager.as_file_map(),
file_manager.as_file_map(),
&[diag.clone()],
compile_options.deny_warnings,
compile_options.silence_warnings,
Expand All @@ -190,23 +240,21 @@ fn run_tests<S: BlackBoxFunctionSolver>(
}
TestStatus::CompileError(err) => {
noirc_errors::reporter::report_all(
context.file_manager.as_file_map(),
file_manager.as_file_map(),
&[err.clone()],
compile_options.deny_warnings,
compile_options.silence_warnings,
);
}
}

test_report.push((test_name, test_status));

writer.reset().expect("Failed to reset writer");
}

write!(writer, "[{}] ", package.name).expect("Failed to write to stderr");

let count_failed =
test_report.iter().filter(|(_, status)| !matches!(status, TestStatus::Pass)).count();
let count_all = test_report.len();
let count_failed = test_report.iter().filter(|(_, status)| status.failed()).count();
let plural = if count_all == 1 { "" } else { "s" };
if count_failed == 0 {
writer.set_color(ColorSpec::new().set_fg(Some(Color::Green))).expect("Failed to set color");
write!(writer, "{count_all} test{plural} passed").expect("Failed to write to stderr");
Expand All @@ -231,5 +279,5 @@ fn run_tests<S: BlackBoxFunctionSolver>(
writer.reset().expect("Failed to reset writer");
}

Ok(test_report)
Ok(())
}

0 comments on commit 761734e

Please sign in to comment.