From 761734e6cb3ff5911aa85d0cee96ad26092b4905 Mon Sep 17 00:00:00 2001 From: Tom French <15848336+TomAFrench@users.noreply.github.com> Date: Tue, 5 Mar 2024 21:12:58 +0000 Subject: [PATCH] feat: run tests in parallel in `nargo test` (#4484) # Description ## Problem\* Resolves ## Summary\* This is a dirty hack to get tests running in parallel. To do this, we've given up printing the test results in a stream as they're run but we instead wait for all tests in a package to be run before we print them out in one go. This takes the noir-protocol-circuits test suite from taking 1:46s to 27s. ## Additional Context ## Documentation\* Check one: - [x] No documentation needed. - [ ] Documentation included in this PR. - [ ] **[Exceptional Case]** Documentation to be submitted in a separate PR. # PR Checklist\* - [x] I have tested the changes locally. - [x] I have formatted the changes with [Prettier](https://prettier.io/) and/or `cargo fmt` on default settings. --- tooling/lsp/src/requests/test_run.rs | 2 +- tooling/nargo/src/ops/test.rs | 14 +++- tooling/nargo_cli/src/cli/test_cmd.rs | 114 ++++++++++++++++++-------- 3 files changed, 92 insertions(+), 38 deletions(-) diff --git a/tooling/lsp/src/requests/test_run.rs b/tooling/lsp/src/requests/test_run.rs index 0b88d814265..1844a3d9bf0 100644 --- a/tooling/lsp/src/requests/test_run.rs +++ b/tooling/lsp/src/requests/test_run.rs @@ -84,7 +84,7 @@ fn on_test_run_request_inner( let test_result = run_test( &state.solver, &mut context, - test_function, + &test_function, false, None, &CompileOptions::default(), diff --git a/tooling/nargo/src/ops/test.rs b/tooling/nargo/src/ops/test.rs index 92c09ec889e..8ddcb5cf8d2 100644 --- a/tooling/nargo/src/ops/test.rs +++ b/tooling/nargo/src/ops/test.rs @@ -14,10 +14,16 @@ pub enum TestStatus { CompileError(FileDiagnostic), } +impl TestStatus { + pub fn failed(&self) -> bool { + !matches!(self, TestStatus::Pass) + } +} + pub fn run_test( blackbox_solver: &B, context: &mut Context, - test_function: TestFunction, + test_function: &TestFunction, show_output: bool, foreign_call_resolver_url: Option<&str>, config: &CompileOptions, @@ -45,7 +51,7 @@ pub fn run_test( /// that a constraint was never satisfiable. /// An example of this is the program `assert(false)` /// In that case, we check if the test function should fail, and if so, we return `TestStatus::Pass`. -fn test_status_program_compile_fail(err: CompileError, test_function: TestFunction) -> TestStatus { +fn test_status_program_compile_fail(err: CompileError, test_function: &TestFunction) -> TestStatus { // The test has failed compilation, but it should never fail. Report error. if !test_function.should_fail() { return TestStatus::CompileError(err.into()); @@ -70,7 +76,7 @@ fn test_status_program_compile_fail(err: CompileError, test_function: TestFuncti /// We now check whether execution passed/failed and whether it should have /// passed/failed to determine the test status. fn test_status_program_compile_pass( - test_function: TestFunction, + test_function: &TestFunction, debug: DebugInfo, circuit_execution: Result, ) -> TestStatus { @@ -109,7 +115,7 @@ fn test_status_program_compile_pass( } fn check_expected_failure_message( - test_function: TestFunction, + test_function: &TestFunction, failed_assertion: Option, error_diagnostic: Option, ) -> TestStatus { diff --git a/tooling/nargo_cli/src/cli/test_cmd.rs b/tooling/nargo_cli/src/cli/test_cmd.rs index 68660d62d23..47face9943a 100644 --- a/tooling/nargo_cli/src/cli/test_cmd.rs +++ b/tooling/nargo_cli/src/cli/test_cmd.rs @@ -11,11 +11,14 @@ use nargo::{ parse_all, prepare_package, }; use nargo_toml::{get_package_manifest, resolve_workspace_from_toml, PackageSelection}; -use noirc_driver::{file_manager_with_stdlib, CompileOptions, NOIR_ARTIFACT_VERSION_STRING}; +use noirc_driver::{ + check_crate, file_manager_with_stdlib, CompileOptions, NOIR_ARTIFACT_VERSION_STRING, +}; use noirc_frontend::{ graph::CrateName, hir::{FunctionNameMatch, ParsedFiles}, }; +use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor}; use crate::{backends::Backend, cli::check_cmd::check_crate_and_report_errors, errors::CliError}; @@ -82,15 +85,12 @@ pub(crate) fn run( None => FunctionNameMatch::Anything, }; - let blackbox_solver = Bn254BlackBoxSolver::new(); - let test_reports: Vec> = workspace .into_iter() .map(|package| { - run_tests( + run_tests::( &workspace_file_manager, &parsed_files, - &blackbox_solver, package, pattern, args.show_output, @@ -116,24 +116,77 @@ pub(crate) fn run( }; } - if test_report.iter().any(|(_, status)| matches!(status, TestStatus::Fail { .. })) { + if test_report.iter().any(|(_, status)| status.failed()) { Err(CliError::Generic(String::new())) } else { Ok(()) } } -#[allow(clippy::too_many_arguments)] -fn run_tests( +fn run_tests( file_manager: &FileManager, parsed_files: &ParsedFiles, - blackbox_solver: &S, package: &Package, fn_name: FunctionNameMatch, show_output: bool, foreign_call_resolver_url: Option<&str>, compile_options: &CompileOptions, ) -> Result, CliError> { + let test_functions = + get_tests_in_package(file_manager, parsed_files, package, fn_name, compile_options)?; + + let count_all = test_functions.len(); + + let plural = if count_all == 1 { "" } else { "s" }; + println!("[{}] Running {count_all} test function{plural}", package.name); + + let test_report: Vec<(String, TestStatus)> = test_functions + .into_par_iter() + .map(|test_name| { + // This is really hacky but we can't share `Context` or `S` across threads. + // We then need to construct a separate copy for each test. + + let (mut context, crate_id) = prepare_package(file_manager, parsed_files, package); + check_crate( + &mut context, + crate_id, + compile_options.deny_warnings, + compile_options.disable_macros, + ) + .expect("Any errors should have occurred when collecting test functions"); + + let test_functions = context.get_all_test_functions_in_crate_matching( + &crate_id, + FunctionNameMatch::Exact(&test_name), + ); + let (_, test_function) = test_functions.first().expect("Test function should exist"); + + let blackbox_solver = S::default(); + + let test_output = run_test( + &blackbox_solver, + &mut context, + test_function, + show_output, + foreign_call_resolver_url, + compile_options, + ); + + (test_name, test_output) + }) + .collect(); + + display_test_report(file_manager, package, compile_options, &test_report)?; + Ok(test_report) +} + +fn get_tests_in_package( + file_manager: &FileManager, + parsed_files: &ParsedFiles, + package: &Package, + fn_name: FunctionNameMatch, + compile_options: &CompileOptions, +) -> Result, CliError> { let (mut context, crate_id) = prepare_package(file_manager, parsed_files, package); check_crate_and_report_errors( &mut context, @@ -143,30 +196,27 @@ fn run_tests( compile_options.silence_warnings, )?; - let test_functions = context.get_all_test_functions_in_crate_matching(&crate_id, fn_name); - let count_all = test_functions.len(); - - let plural = if count_all == 1 { "" } else { "s" }; - println!("[{}] Running {count_all} test function{plural}", package.name); + Ok(context + .get_all_test_functions_in_crate_matching(&crate_id, fn_name) + .into_iter() + .map(|(test_name, _)| test_name) + .collect()) +} +fn display_test_report( + file_manager: &FileManager, + package: &Package, + compile_options: &CompileOptions, + test_report: &[(String, TestStatus)], +) -> Result<(), CliError> { let writer = StandardStream::stderr(ColorChoice::Always); let mut writer = writer.lock(); - let mut test_report: Vec<(String, TestStatus)> = Vec::new(); - for (test_name, test_function) in test_functions { + for (test_name, test_status) in test_report { write!(writer, "[{}] Testing {test_name}... ", package.name) .expect("Failed to write to stderr"); writer.flush().expect("Failed to flush writer"); - let test_status = run_test( - blackbox_solver, - &mut context, - test_function, - show_output, - foreign_call_resolver_url, - compile_options, - ); - match &test_status { TestStatus::Pass { .. } => { writer @@ -181,7 +231,7 @@ fn run_tests( writeln!(writer, "FAIL\n{message}\n").expect("Failed to write to stderr"); if let Some(diag) = error_diagnostic { noirc_errors::reporter::report_all( - context.file_manager.as_file_map(), + file_manager.as_file_map(), &[diag.clone()], compile_options.deny_warnings, compile_options.silence_warnings, @@ -190,23 +240,21 @@ fn run_tests( } TestStatus::CompileError(err) => { noirc_errors::reporter::report_all( - context.file_manager.as_file_map(), + file_manager.as_file_map(), &[err.clone()], compile_options.deny_warnings, compile_options.silence_warnings, ); } } - - test_report.push((test_name, test_status)); - writer.reset().expect("Failed to reset writer"); } write!(writer, "[{}] ", package.name).expect("Failed to write to stderr"); - let count_failed = - test_report.iter().filter(|(_, status)| !matches!(status, TestStatus::Pass)).count(); + let count_all = test_report.len(); + let count_failed = test_report.iter().filter(|(_, status)| status.failed()).count(); + let plural = if count_all == 1 { "" } else { "s" }; if count_failed == 0 { writer.set_color(ColorSpec::new().set_fg(Some(Color::Green))).expect("Failed to set color"); write!(writer, "{count_all} test{plural} passed").expect("Failed to write to stderr"); @@ -231,5 +279,5 @@ fn run_tests( writer.reset().expect("Failed to reset writer"); } - Ok(test_report) + Ok(()) }