diff --git a/CHANGELOG.md b/CHANGELOG.md index 104b5ef..a1f6eff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,15 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). ## [Unreleased] +### Added +- JSON Format Output [#34] +- No-Op Commandline Options to satisfy intellij IDEs [#34] + +### Changed +- Gate multi-threaded execution behind a enabled-by-default feature [#34] +- Allow Trials with non-static lifetime [#34] + +[#34]: https://github.com/LukasKalbertodt/libtest-mimic/pull/34 ## [0.6.1] - 2022-11-05 ### Fixed diff --git a/Cargo.toml b/Cargo.toml index de2b701..334e343 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,10 +18,16 @@ readme = "README.md" exclude = [".github"] +[features] +default = ["multithreaded"] +multithreaded = ["dep:crossbeam", "dep:num_cpus"] + [dependencies] clap = { version = "4.0.8", features = ["derive"] } -threadpool = "1.8.1" termcolor = "1.0.5" +escape8259 = "0.5.2" +crossbeam = { version = "0.8.3", optional = true } +num_cpus = { version = "1.16.0", optional = true } [dev-dependencies] fastrand = "1.8.0" diff --git a/examples/simple.rs b/examples/simple.rs index 0249596..8ed715c 100644 --- a/examples/simple.rs +++ b/examples/simple.rs @@ -1,8 +1,7 @@ extern crate libtest_mimic; +use libtest_mimic::{Arguments, Failed, Trial}; use std::{thread, time}; -use libtest_mimic::{Arguments, Trial, Failed}; - fn main() { let args = Arguments::from_args(); @@ -18,7 +17,6 @@ fn main() { libtest_mimic::run(&args, tests).exit(); } - // Tests fn check_toph() -> Result<(), Failed> { diff --git a/examples/tidy.rs b/examples/tidy.rs index 18b26bf..12d9b72 100644 --- a/examples/tidy.rs +++ b/examples/tidy.rs @@ -1,15 +1,8 @@ extern crate libtest_mimic; -use libtest_mimic::{Arguments, Trial, Failed}; - -use std::{ - env, - error::Error, - ffi::OsStr, - fs, - path::Path, -}; +use libtest_mimic::{Arguments, Failed, Trial}; +use std::{env, error::Error, ffi::OsStr, fs, path::Path}; fn main() -> Result<(), Box> { let args = Arguments::from_args(); @@ -19,7 +12,7 @@ fn main() -> Result<(), Box> { /// Creates one test for each `.rs` file in the current directory or /// sub-directories of the current directory. -fn collect_tests() -> Result, Box> { +fn collect_tests() -> Result>, Box> { fn visit_dir(path: &Path, tests: &mut Vec) -> Result<(), Box> { for entry in fs::read_dir(path)? { let entry = entry?; @@ -34,8 +27,7 @@ fn collect_tests() -> Result, Box> { .display() .to_string(); - let test = Trial::test(name, move || check_file(&path)) - .with_kind("tidy"); + let test = Trial::test(name, move || check_file(&path)).with_kind("tidy"); tests.push(test); } } else if file_type.is_dir() { diff --git a/src/args.rs b/src/args.rs index 26cc26b..f0f69df 100644 --- a/src/args.rs +++ b/src/args.rs @@ -12,9 +12,7 @@ use clap::{Parser, ValueEnum}; #[derive(Parser, Debug, Clone, Default)] #[command( help_template = "USAGE: [OPTIONS] [FILTER]\n\n{all-args}\n\n\n{after-help}", - disable_version_flag = true, - after_help = "By default, all tests are run in parallel. This can be altered with the \n\ - --test-threads flag when running tests (set it to 1).", + disable_version_flag = true )] pub struct Arguments { // ============== FLAGS =================================================== @@ -30,7 +28,7 @@ pub struct Arguments { #[arg( long = "test", conflicts_with = "bench", - help = "Run tests and not benchmarks", + help = "Run tests and not benchmarks" )] pub test: bool, @@ -43,13 +41,24 @@ pub struct Arguments { pub list: bool, /// No-op, ignored (libtest-mimic always runs in no-capture mode) - #[arg(long = "nocapture", help = "No-op (libtest-mimic always runs in no-capture mode)")] + #[arg( + long = "nocapture", + help = "No-op (libtest-mimic always runs in no-capture mode)" + )] pub nocapture: bool, + /// No-op, ignored. libtest-mimic does not currently capture stdout. + #[arg(long = "show-output")] + pub show_output: bool, + + /// No-op, ignored. Flag only exists for CLI compatibility with libtest. + #[arg(short = 'Z')] + pub unstable_flags: Option, + /// If set, filters are matched exactly rather than by substring. #[arg( long = "exact", - help = "Exactly match filters rather than by substring", + help = "Exactly match filters rather than by substring" )] pub exact: bool, @@ -62,16 +71,17 @@ pub struct Arguments { short = 'q', long = "quiet", conflicts_with = "format", - help = "Display one character per test instead of one line. Alias to --format=terse", + help = "Display one character per test instead of one line. Alias to --format=terse" )] pub quiet: bool, // ============== OPTIONS ================================================= + #[cfg(feature = "multithreaded")] /// Number of threads used for parallel testing. #[arg( long = "test-threads", - help = "Number of threads used for running tests in parallel. If set to 1, \n\ - all tests are run in the main thread.", + help = "Number of threads used for running tests in parallel. By default, all tests are run in parallel. If set to 1, \n\ + all tests are run in the main thread." )] pub test_threads: Option, @@ -80,7 +90,7 @@ pub struct Arguments { #[arg( long = "logfile", value_name = "PATH", - help = "Write logs to the specified file instead of stdout", + help = "Write logs to the specified file instead of stdout" )] pub logfile: Option, @@ -89,7 +99,7 @@ pub struct Arguments { #[arg( long = "skip", value_name = "FILTER", - help = "Skip tests whose names contain FILTER (this flag can be used multiple times)", + help = "Skip tests whose names contain FILTER (this flag can be used multiple times)" )] pub skip: Vec, @@ -101,7 +111,7 @@ pub struct Arguments { help = "Configure coloring of output: \n\ - auto = colorize if stdout is a tty and tests are run on serially (default)\n\ - always = always colorize output\n\ - - never = never colorize output\n", + - never = never colorize output\n" )] pub color: Option, @@ -112,7 +122,8 @@ pub struct Arguments { value_name = "pretty|terse|json", help = "Configure formatting of output: \n\ - pretty = Print verbose output\n\ - - terse = Display one character per test\n", + - terse = Display one character per test\n\ + - json = Print json events\n" )] pub format: Option, @@ -121,7 +132,7 @@ pub struct Arguments { #[arg( value_name = "FILTER", help = "The FILTER string is tested against the name of all tests, and only those tests \ - whose names contain the filter are run.", + whose names contain the filter are run." )] pub filter: Option, } @@ -168,6 +179,12 @@ impl Default for ColorSetting { } } +/// Possible values for the `-Z` option +#[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)] +pub enum UnstableFlags { + UnstableOptions, +} + /// Possible values for the `--format` option. #[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)] pub enum FormatSetting { @@ -176,6 +193,9 @@ pub enum FormatSetting { /// One character per test. Usefull for test suites with many tests. Terse, + + /// Json output + Json, } impl Default for FormatSetting { diff --git a/src/lib.rs b/src/lib.rs index 334e44f..7fef1a9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -71,18 +71,15 @@ #![forbid(unsafe_code)] -use std::{process, sync::mpsc, fmt, time::Instant}; +use std::{fmt, process, time::Instant}; mod args; mod printer; use printer::Printer; -use threadpool::ThreadPool; pub use crate::args::{Arguments, ColorSetting, FormatSetting}; - - /// A single test or benchmark. /// /// The original `libtest` often calls benchmarks "tests", which is a bit @@ -94,19 +91,46 @@ pub use crate::args::{Arguments, ColorSetting, FormatSetting}; /// the trial is considered "failed". If you need the behavior of /// `#[should_panic]` you need to catch the panic yourself. You likely want to /// compare the panic payload to an expected value anyway. -pub struct Trial { - runner: Box Outcome + Send>, +pub struct Trial<'a> { + #[cfg(feature = "multithreaded")] + runner: Box Outcome + Send + 'a>, + #[cfg(not(feature = "multithreaded"))] + runner: Box Outcome + 'a>, info: TestInfo, } -impl Trial { +impl<'a> Trial<'a> { + /// Creates a (non-benchmark) test with the given name and runner. + /// + /// The runner returning `Ok(())` is interpreted as the test passing. If the + /// runner returns `Err(_)`, the test is considered failed. + #[cfg(feature = "multithreaded")] + pub fn test(name: impl Into, runner: R) -> Self + where + R: FnOnce() -> Result<(), Failed> + Send + 'a, + { + Self { + runner: Box::new(move |_test_mode| match runner() { + Ok(()) => Outcome::Passed, + Err(failed) => Outcome::Failed(failed), + }), + info: TestInfo { + name: name.into(), + kind: String::new(), + is_ignored: false, + is_bench: false, + }, + } + } + /// Creates a (non-benchmark) test with the given name and runner. /// /// The runner returning `Ok(())` is interpreted as the test passing. If the /// runner returns `Err(_)`, the test is considered failed. + #[cfg(not(feature = "multithreaded"))] pub fn test(name: impl Into, runner: R) -> Self where - R: FnOnce() -> Result<(), Failed> + Send + 'static, + R: FnOnce() -> Result<(), Failed> + 'a, { Self { runner: Box::new(move |_test_mode| match runner() { @@ -134,17 +158,54 @@ impl Trial { /// `test_mode` is `true` if neither `--bench` nor `--test` are set, and /// `false` when `--bench` is set. If `--test` is set, benchmarks are not /// ran at all, and both flags cannot be set at the same time. + #[cfg(feature = "multithreaded")] + pub fn bench(name: impl Into, runner: R) -> Self + where + R: FnOnce(bool) -> Result, Failed> + Send + 'a, + { + Self { + runner: Box::new(move |test_mode| match runner(test_mode) { + Err(failed) => Outcome::Failed(failed), + Ok(_) if test_mode => Outcome::Passed, + Ok(Some(measurement)) => Outcome::Measured(measurement), + Ok(None) => { + Outcome::Failed("bench runner returned `Ok(None)` in bench mode".into()) + } + }), + info: TestInfo { + name: name.into(), + kind: String::new(), + is_ignored: false, + is_bench: true, + }, + } + } + + /// Creates a benchmark with the given name and runner. + /// + /// If the runner's parameter `test_mode` is `true`, the runner function + /// should run all code just once, without measuring, just to make sure it + /// does not panic. If the parameter is `false`, it should perform the + /// actual benchmark. If `test_mode` is `true` you may return `Ok(None)`, + /// but if it's `false`, you have to return a `Measurement`, or else the + /// benchmark is considered a failure. + /// + /// `test_mode` is `true` if neither `--bench` nor `--test` are set, and + /// `false` when `--bench` is set. If `--test` is set, benchmarks are not + /// ran at all, and both flags cannot be set at the same time. + #[cfg(not(feature = "multithreaded"))] pub fn bench(name: impl Into, runner: R) -> Self where - R: FnOnce(bool) -> Result, Failed> + Send + 'static, + R: FnOnce(bool) -> Result, Failed> + 'a, { Self { runner: Box::new(move |test_mode| match runner(test_mode) { Err(failed) => Outcome::Failed(failed), Ok(_) if test_mode => Outcome::Passed, Ok(Some(measurement)) => Outcome::Measured(measurement), - Ok(None) - => Outcome::Failed("bench runner returned `Ok(None)` in bench mode".into()), + Ok(None) => { + Outcome::Failed("bench runner returned `Ok(None)` in bench mode".into()) + } }), info: TestInfo { name: name.into(), @@ -213,7 +274,7 @@ impl Trial { } } -impl fmt::Debug for Trial { +impl<'a> fmt::Debug for Trial<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { struct OpaqueRunner; impl fmt::Debug for OpaqueRunner { @@ -274,13 +335,11 @@ impl Failed { impl From for Failed { fn from(msg: M) -> Self { Self { - msg: Some(msg.to_string()) + msg: Some(msg.to_string()), } } } - - /// The outcome of performing a test/benchmark. #[derive(Debug, Clone)] enum Outcome { @@ -392,6 +451,34 @@ impl Arguments { } } +#[cfg(feature = "multithreaded")] +type Task<'a> = Box; + +#[cfg(feature = "multithreaded")] +fn create_thread_pool<'a, F>(max_threads: Option, task_generator: F) +where + F: FnOnce(crossbeam::channel::Sender>) + 'a, +{ + use crossbeam::channel::bounded; //multi-producer, multi-consumer queue + + let num_threads = max_threads.unwrap_or_else(num_cpus::get); + + std::thread::scope(|scope| { + let (sender, receiver) = bounded::(num_threads); + + for _ in 0..num_threads { + let rx = receiver.clone(); + scope.spawn(move || { + for task in rx.iter() { + task(); + } + }); + } + + task_generator(sender); + }) +} + /// Runs all given trials (tests & benchmarks). /// /// This is the central function of this crate. It provides the framework for @@ -403,7 +490,6 @@ impl Arguments { pub fn run(args: &Arguments, mut tests: Vec) -> Conclusion { let start_instant = Instant::now(); let mut conclusion = Conclusion::empty(); - // Apply filtering if args.filter.is_some() || !args.skip.is_empty() || args.ignored { let len_before = tests.len() as u64; @@ -426,7 +512,7 @@ pub fn run(args: &Arguments, mut tests: Vec) -> Conclusion { let mut failed_tests = Vec::new(); let mut handle_outcome = |outcome: Outcome, test: TestInfo, printer: &mut Printer| { - printer.print_single_outcome(&outcome); + printer.print_single_outcome(&test, &outcome); // Handle outcome match outcome { @@ -434,7 +520,7 @@ pub fn run(args: &Arguments, mut tests: Vec) -> Conclusion { Outcome::Failed(failed) => { failed_tests.push((test, failed.msg)); conclusion.num_failed += 1; - }, + } Outcome::Ignored => conclusion.num_ignored += 1, Outcome::Measured(_) => conclusion.num_measured += 1, } @@ -442,7 +528,7 @@ pub fn run(args: &Arguments, mut tests: Vec) -> Conclusion { // Execute all tests. let test_mode = !args.bench; - if args.test_threads == Some(1) { + let mut sequentially = |tests: Vec| { // Run test sequentially in main thread for test in tests { // Print `test foo ...`, run the test, then print the outcome in @@ -455,38 +541,47 @@ pub fn run(args: &Arguments, mut tests: Vec) -> Conclusion { }; handle_outcome(outcome, test.info, &mut printer); } - } else { - // Run test in thread pool. - let pool = match args.test_threads { - Some(num_threads) => ThreadPool::new(num_threads), - None => ThreadPool::default() - }; - let (sender, receiver) = mpsc::channel(); + }; - let num_tests = tests.len(); - for test in tests { - if args.is_ignored(&test) { - sender.send((Outcome::Ignored, test.info)).unwrap(); - } else { - let sender = sender.clone(); - pool.execute(move || { - // It's fine to ignore the result of sending. If the - // receiver has hung up, everything will wind down soon - // anyway. - let outcome = run_single(test.runner, test_mode); - let _ = sender.send((outcome, test.info)); - }); - } - } + #[cfg(not(feature = "multithreaded"))] + sequentially(tests); - for (outcome, test_info) in receiver.iter().take(num_tests) { - // In multithreaded mode, we do only print the start of the line - // after the test ran, as otherwise it would lead to terribly - // interleaved output. - printer.print_test(&test_info); - handle_outcome(outcome, test_info, &mut printer); + #[cfg(feature = "multithreaded")] + { + use std::sync::mpsc; + + if args.test_threads == Some(1) { + sequentially(tests); + } else { + // Run test in scoped thread pool. + create_thread_pool(args.test_threads, |task_sender| { + let (outcome_sender, outcome_receiver) = mpsc::channel(); + let num_tests = tests.len(); + for test in tests { + if args.is_ignored(&test) { + outcome_sender.send((Outcome::Ignored, test.info)).unwrap(); + } else { + let outcome_sender = outcome_sender.clone(); + // It's fine to ignore the result of sending. If the + // receiver has hung up, everything will wind down soon + // anyway. + let _ = task_sender.send(Box::new(move || { + let outcome = run_single(test.runner, test_mode); + let _ = outcome_sender.send((outcome, test.info)); + })); + } + } + + for (outcome, test_info) in outcome_receiver.iter().take(num_tests) { + // In multithreaded mode, we do only print the start of the line + // after the test ran, as otherwise it would lead to terribly + // interleaved output. + printer.print_test(&test_info); + handle_outcome(outcome, test_info, &mut printer); + } + }); } - } + }; // Print failures if there were any, and the final summary. if !failed_tests.is_empty() { @@ -499,14 +594,19 @@ pub fn run(args: &Arguments, mut tests: Vec) -> Conclusion { } /// Runs the given runner, catching any panics and treating them as a failed test. -fn run_single(runner: Box Outcome + Send>, test_mode: bool) -> Outcome { +fn run_single<'a>( + #[cfg(feature = "multithreaded")] runner: Box Outcome + Send + 'a>, + #[cfg(not(feature = "multithreaded"))] runner: Box Outcome + 'a>, + test_mode: bool, +) -> Outcome { use std::panic::{catch_unwind, AssertUnwindSafe}; catch_unwind(AssertUnwindSafe(move || runner(test_mode))).unwrap_or_else(|e| { // The `panic` information is just an `Any` object representing the // value the panic was invoked with. For most panics (which use // `panic!` like `println!`), this is either `&str` or `String`. - let payload = e.downcast_ref::() + let payload = e + .downcast_ref::() .map(|s| s.as_str()) .or(e.downcast_ref::<&str>().map(|s| *s)); diff --git a/src/printer.rs b/src/printer.rs index d0766f0..6662a40 100644 --- a/src/printer.rs +++ b/src/printer.rs @@ -11,8 +11,8 @@ use std::{fs::File, time::Duration}; use termcolor::{Ansi, Color, ColorChoice, ColorSpec, NoColor, StandardStream, WriteColor}; use crate::{ - Arguments, ColorSetting, Conclusion, FormatSetting, Outcome, Trial, Failed, - Measurement, TestInfo, + Arguments, ColorSetting, Conclusion, Failed, FormatSetting, Measurement, Outcome, TestInfo, + Trial, }; pub(crate) struct Printer { @@ -58,12 +58,14 @@ impl Printer { // test names and outcomes. Counting the number of code points is just // a cheap way that works in most cases. Usually, these names are // ASCII. - let name_width = tests.iter() + let name_width = tests + .iter() .map(|test| test.info.name.chars().count()) .max() .unwrap_or(0); - let kind_width = tests.iter() + let kind_width = tests + .iter() .map(|test| { if test.info.kind.is_empty() { 0 @@ -92,6 +94,12 @@ impl Printer { writeln!(self.out).unwrap(); writeln!(self.out, "running {} test{}", num_tests, plural_s).unwrap(); } + FormatSetting::Json => writeln!( + self.out, + r#"{{ "type": "suite", "event": "started", "test_count": {} }}"#, + num_tests + ) + .unwrap(), } } @@ -102,7 +110,7 @@ impl Printer { match self.format { FormatSetting::Pretty => { let kind = if kind.is_empty() { - format!("") + String::new() } else { format!("[{}] ", kind) }; @@ -110,23 +118,29 @@ impl Printer { write!( self.out, "test {: <2$}{: <3$} ... ", - kind, - name, - self.kind_width, - self.name_width, - ).unwrap(); + kind, name, self.kind_width, self.name_width, + ) + .unwrap(); self.out.flush().unwrap(); } FormatSetting::Terse => { // In terse mode, nothing is printed before the job. Only // `print_single_outcome` prints one character. } + FormatSetting::Json => { + writeln!( + self.out, + r#"{{ "type": "test", "event": "started", "name": "{}" }}"#, + escape8259::escape(name) + ) + .unwrap(); + } } } /// Prints the outcome of a single tests. `ok` or `FAILED` in pretty mode /// and `.` or `F` in terse mode. - pub(crate) fn print_single_outcome(&mut self, outcome: &Outcome) { + pub(crate) fn print_single_outcome(&mut self, info: &TestInfo, outcome: &Outcome) { match self.format { FormatSetting::Pretty => { self.print_outcome_pretty(outcome); @@ -150,6 +164,36 @@ impl Printer { write!(self.out, "{}", c).unwrap(); self.out.reset().unwrap(); } + FormatSetting::Json => { + if let Outcome::Measured(Measurement { avg, variance }) = outcome { + writeln!( + self.out, + r#"{{ "type": "bench", "name": "{}", "median": {}, "deviation": {} }}"#, + escape8259::escape(&info.name), + avg, + variance + ) + .unwrap(); + } else { + writeln!( + self.out, + r#"{{ "type": "test", "name": "{}", "event": "{}"{} }}"#, + escape8259::escape(&info.name), + match outcome { + Outcome::Passed => "ok", + Outcome::Failed(_) => "failed", + Outcome::Ignored => "ignored", + Outcome::Measured(_) => unreachable!(), + }, + match outcome { + Outcome::Failed(Failed { msg: Some(msg) }) => + format!(r#", "stdout": "Error: \"{}\"\n""#, escape8259::escape(msg)), + _ => "".into(), + } + ) + .unwrap(); + } + } } } @@ -176,9 +220,24 @@ impl Printer { conclusion.num_measured, conclusion.num_filtered_out, execution_time.as_secs_f64() - ).unwrap(); + ) + .unwrap(); writeln!(self.out).unwrap(); } + FormatSetting::Json => { + writeln!( + self.out, + r#"{{ "type": "suite", "event": "{}", "passed": {}, "failed": {}, "ignored": {}, "measured": {}, "filtered_out": {}, "exec_time": {} }}"#, + if conclusion.num_failed > 0 { "failed" } else { "ok" }, + conclusion.num_passed, + conclusion.num_failed, + conclusion.num_ignored, + conclusion.num_measured, + conclusion.num_filtered_out, + execution_time.as_secs_f64() + ) + .unwrap(); + } } } @@ -201,7 +260,7 @@ impl Printer { } let kind = if test.info.kind.is_empty() { - format!("") + String::new() } else { format!("[{}] ", test.info.kind) }; @@ -221,6 +280,9 @@ impl Printer { /// Prints a list of failed tests with their messages. This is only called /// if there were any failures. pub(crate) fn print_failures(&mut self, fails: &[(TestInfo, Option)]) { + if self.format == FormatSetting::Json { + return; + } writeln!(self.out).unwrap(); writeln!(self.out, "failures:").unwrap(); writeln!(self.out).unwrap(); @@ -261,7 +323,8 @@ impl Printer { ": {:>11} ns/iter (+/- {})", fmt_with_thousand_sep(*avg), fmt_with_thousand_sep(*variance), - ).unwrap(); + ) + .unwrap(); } } } diff --git a/tests/all_passing.rs b/tests/all_passing.rs index b5c5552..cf94087 100644 --- a/tests/all_passing.rs +++ b/tests/all_passing.rs @@ -1,5 +1,5 @@ use common::{args, check}; -use libtest_mimic::{Trial, Conclusion}; +use libtest_mimic::{Conclusion, Trial}; use pretty_assertions::assert_eq; use crate::common::do_run; @@ -7,8 +7,7 @@ use crate::common::do_run; #[macro_use] mod common; - -fn tests() -> Vec { +fn tests() -> Vec> { vec![ Trial::test("foo", || Ok(())), Trial::test("bar", || Ok(())), @@ -18,7 +17,10 @@ fn tests() -> Vec { #[test] fn normal() { - check(args([]), tests, 3, + check( + args([]), + tests, + 3, Conclusion { num_filtered_out: 0, num_passed: 3, @@ -30,13 +32,16 @@ fn normal() { test foo ... ok test bar ... ok test barro ... ok - " + ", ); } #[test] fn filter_one() { - check(args(["foo"]), tests, 1, + check( + args(["foo"]), + tests, + 1, Conclusion { num_filtered_out: 2, num_passed: 1, @@ -50,7 +55,10 @@ fn filter_one() { #[test] fn filter_two() { - check(args(["bar"]), tests, 2, + check( + args(["bar"]), + tests, + 2, Conclusion { num_filtered_out: 1, num_passed: 2, @@ -65,10 +73,12 @@ fn filter_two() { ); } - #[test] fn filter_exact() { - check(args(["bar", "--exact"]), tests, 1, + check( + args(["bar", "--exact"]), + tests, + 1, Conclusion { num_filtered_out: 2, num_passed: 1, @@ -82,7 +92,10 @@ fn filter_exact() { #[test] fn filter_two_and_skip() { - check(args(["--skip", "barro", "bar"]), tests, 1, + check( + args(["--skip", "barro", "bar"]), + tests, + 1, Conclusion { num_filtered_out: 2, num_passed: 1, @@ -96,7 +109,10 @@ fn filter_two_and_skip() { #[test] fn skip_nothing() { - check(args(["--skip", "peter"]), tests, 3, + check( + args(["--skip", "peter"]), + tests, + 3, Conclusion { num_filtered_out: 0, num_passed: 3, @@ -108,13 +124,16 @@ fn skip_nothing() { test foo ... ok test bar ... ok test barro ... ok - " + ", ); } #[test] fn skip_two() { - check(args(["--skip", "bar"]), tests, 1, + check( + args(["--skip", "bar"]), + tests, + 1, Conclusion { num_filtered_out: 2, num_passed: 1, @@ -122,13 +141,16 @@ fn skip_two() { num_ignored: 0, num_measured: 0, }, - "test foo ... ok" + "test foo ... ok", ); } #[test] fn skip_exact() { - check(args(["--exact", "--skip", "bar"]), tests, 2, + check( + args(["--exact", "--skip", "bar"]), + tests, + 2, Conclusion { num_filtered_out: 1, num_passed: 2, @@ -139,24 +161,30 @@ fn skip_exact() { " test foo ... ok test barro ... ok - " + ", ); } #[test] fn terse_output() { let (c, out) = do_run(args(["--format", "terse"]), tests()); - assert_eq!(c, Conclusion { - num_filtered_out: 0, - num_passed: 3, - num_failed: 0, - num_ignored: 0, - num_measured: 0, - }); - assert_log!(out, " + assert_eq!( + c, + Conclusion { + num_filtered_out: 0, + num_passed: 3, + num_failed: 0, + num_ignored: 0, + num_measured: 0, + } + ); + assert_log!( + out, + " running 3 tests ... test result: ok. 3 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; \ finished in 0.00s - "); + " + ); } diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 33bc417..8b8972b 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,9 +1,8 @@ -use std::{path::Path, iter::repeat_with, collections::HashMap}; use pretty_assertions::assert_eq; +use std::{iter::repeat_with, path::Path}; use libtest_mimic::{run, Arguments, Conclusion, Trial}; - const TEMPDIR: &str = env!("CARGO_TARGET_TMPDIR"); pub fn args(args: [&str; N]) -> Arguments { @@ -14,23 +13,24 @@ pub fn args(args: [&str; N]) -> Arguments { pub fn do_run(mut args: Arguments, tests: Vec) -> (Conclusion, String) { // Create path to temporary file. - let suffix = repeat_with(fastrand::alphanumeric).take(10).collect::(); + let suffix = repeat_with(fastrand::alphanumeric) + .take(10) + .collect::(); let path = Path::new(&TEMPDIR).join(format!("libtest_mimic_output_{suffix}.txt")); args.logfile = Some(path.display().to_string()); let c = run(&args, tests); - let output = std::fs::read_to_string(&path) - .expect("Can't read temporary logfile"); - std::fs::remove_file(&path) - .expect("Can't remove temporary logfile"); + let output = std::fs::read_to_string(&path).expect("Can't read temporary logfile"); + std::fs::remove_file(&path).expect("Can't remove temporary logfile"); (c, output) } /// Removes shared indentation so that at least one line has no indentation /// (no leading spaces). pub fn clean_expected_log(s: &str) -> String { - let shared_indent = s.lines() + let shared_indent = s + .lines() .filter(|l| l.contains(|c| c != ' ')) .map(|l| l.bytes().take_while(|b| *b == b' ').count()) .min() @@ -52,21 +52,30 @@ pub fn clean_expected_log(s: &str) -> String { /// Best effort tool to check certain things about a log that might have all /// tests randomly ordered. +#[cfg(feature = "multithreaded")] pub fn assert_reordered_log(actual: &str, num: u64, expected_lines: &[&str], tail: &str) { let actual = actual.trim(); let (first_line, rest) = actual.split_once('\n').expect("log has too few lines"); let (middle, last_line) = rest.rsplit_once('\n').expect("log has too few lines"); - - assert_eq!(first_line, &format!("running {} test{}", num, if num == 1 { "" } else { "s" })); + assert_eq!( + first_line, + &format!("running {} test{}", num, if num == 1 { "" } else { "s" }) + ); assert!(last_line.contains(tail)); + use std::collections::HashMap; + let mut actual_lines = HashMap::new(); for line in middle.lines().map(|l| l.trim()).filter(|l| !l.is_empty()) { *actual_lines.entry(line).or_insert(0) += 1; } - for expected in expected_lines.iter().map(|l| l.trim()).filter(|l| !l.is_empty()) { + for expected in expected_lines + .iter() + .map(|l| l.trim()) + .filter(|l| !l.is_empty()) + { match actual_lines.get_mut(expected) { None | Some(0) => panic!("expected line \"{expected}\" not in log"), Some(num) => *num -= 1, @@ -97,19 +106,27 @@ macro_rules! assert_log { } } + if let Some(pos) = actual.rfind("\"exec_time\":") { + actual.truncate(pos); + actual.push_str("\"exec_time\": 0.000000000 }"); + } + assert_eq!(actual, expected); }; } -pub fn check( - mut args: Arguments, - mut tests: impl FnMut() -> Vec, - num_running_tests: u64, +pub fn check<'a>( + #[allow(unused_mut)] mut args: Arguments, + mut tests: impl FnMut() -> Vec>, + #[allow(unused_variables)] num_running_tests: u64, expected_conclusion: Conclusion, expected_output: &str, ) { // Run in single threaded mode - args.test_threads = Some(1); + #[cfg(feature = "multithreaded")] + { + args.test_threads = Some(1); + } let (c, out) = do_run(args.clone(), tests()); let expected = crate::common::clean_expected_log(expected_output); let actual = { @@ -119,19 +136,29 @@ pub fn check( assert_eq!(actual.trim(), expected.trim()); assert_eq!(c, expected_conclusion); - // Run in multithreaded mode. - let (c, out) = do_run(args, tests()); - assert_reordered_log( - &out, - num_running_tests, - &expected_output.lines().collect::>(), - &conclusion_to_output(&c), - ); - assert_eq!(c, expected_conclusion); + #[cfg(feature = "multithreaded")] + { + // Run in multithreaded mode. + let (c, out) = do_run(args, tests()); + assert_reordered_log( + &out, + num_running_tests, + &expected_output.lines().collect::>(), + &conclusion_to_output(&c), + ); + assert_eq!(c, expected_conclusion); + } } +#[cfg(feature = "multithreaded")] fn conclusion_to_output(c: &Conclusion) -> String { - let Conclusion { num_filtered_out, num_passed, num_failed, num_ignored, num_measured } = *c; + let Conclusion { + num_filtered_out, + num_passed, + num_failed, + num_ignored, + num_measured, + } = *c; format!( "test result: {}. {} passed; {} failed; {} ignored; {} measured; {} filtered out;", if num_failed > 0 { "FAILED" } else { "ok" }, diff --git a/tests/main_thread.rs b/tests/main_thread.rs index c6cc24a..e154801 100644 --- a/tests/main_thread.rs +++ b/tests/main_thread.rs @@ -1,16 +1,23 @@ -use libtest_mimic::{Trial, Arguments}; - +use libtest_mimic::{Arguments, Trial}; #[test] fn check_test_on_main_thread() { let outer_thread = std::thread::current().id(); + #[allow(unused_mut)] let mut args = Arguments::default(); - args.test_threads = Some(1); - let conclusion = libtest_mimic::run(&args, vec![Trial::test("check", move || { - assert_eq!(outer_thread, std::thread::current().id()); - Ok(()) - })]); + + #[cfg(feature = "multithreaded")] + { + args.test_threads = Some(1); + } + let conclusion = libtest_mimic::run( + &args, + vec![Trial::test("check", move || { + assert_eq!(outer_thread, std::thread::current().id()); + Ok(()) + })], + ); assert_eq!(conclusion.num_passed, 1); } diff --git a/tests/mixed_bag.rs b/tests/mixed_bag.rs index a6fe52f..613e226 100644 --- a/tests/mixed_bag.rs +++ b/tests/mixed_bag.rs @@ -1,49 +1,60 @@ -use pretty_assertions::assert_eq; -use libtest_mimic::{Trial, Conclusion, Measurement}; use crate::common::{args, check, do_run}; +use libtest_mimic::{Conclusion, Measurement, Trial}; +use pretty_assertions::assert_eq; #[macro_use] mod common; - -fn tests() -> Vec { +fn tests() -> Vec> { fn meas(avg: u64, variance: u64) -> Option { Some(Measurement { avg, variance }) } vec![ Trial::test("cat", || Ok(())), + Trial::test("\"ups\"", || Err("failed to parse \"abc\"".into())), Trial::test("dog", || Err("was not a good boy".into())), Trial::test("fox", || Ok(())).with_kind("apple"), Trial::test("bunny", || Err("jumped too high".into())).with_kind("apple"), Trial::test("frog", || Ok(())).with_ignored_flag(true), Trial::test("owl", || Err("broke neck".into())).with_ignored_flag(true), - Trial::test("fly", || Ok(())).with_ignored_flag(true).with_kind("banana"), - Trial::test("bear", || Err("no honey".into())).with_ignored_flag(true).with_kind("banana"), - + Trial::test("fly", || Ok(())) + .with_ignored_flag(true) + .with_kind("banana"), + Trial::test("bear", || Err("no honey".into())) + .with_ignored_flag(true) + .with_kind("banana"), Trial::bench("red", |_| Ok(meas(32, 3))), Trial::bench("blue", |_| Err("sky fell down".into())), Trial::bench("yellow", |_| Ok(meas(64, 4))).with_kind("kiwi"), Trial::bench("green", |_| Err("was poisoned".into())).with_kind("kiwi"), Trial::bench("purple", |_| Ok(meas(100, 5))).with_ignored_flag(true), Trial::bench("cyan", |_| Err("not creative enough".into())).with_ignored_flag(true), - Trial::bench("orange", |_| Ok(meas(17, 6))).with_ignored_flag(true).with_kind("banana"), - Trial::bench("pink", |_| Err("bad".into())).with_ignored_flag(true).with_kind("banana"), + Trial::bench("orange", |_| Ok(meas(17, 6))) + .with_ignored_flag(true) + .with_kind("banana"), + Trial::bench("pink", |_| Err("bad".into())) + .with_ignored_flag(true) + .with_kind("banana"), ] } #[test] fn normal() { - check(args([]), tests, 16, + check( + args([]), + tests, + 17, Conclusion { num_filtered_out: 0, num_passed: 4, - num_failed: 4, + num_failed: 5, num_ignored: 8, num_measured: 0, }, " test cat ... ok + test \"ups\" ... FAILED test dog ... FAILED test [apple] fox ... ok test [apple] bunny ... FAILED @@ -62,6 +73,9 @@ fn normal() { failures: + ---- \"ups\" ---- + failed to parse \"abc\" + ---- dog ---- was not a good boy @@ -76,6 +90,7 @@ fn normal() { failures: + \"ups\" dog bunny blue @@ -86,16 +101,20 @@ fn normal() { #[test] fn test_mode() { - check(args(["--test"]), tests, 16, + check( + args(["--test"]), + tests, + 17, Conclusion { num_filtered_out: 0, num_passed: 2, - num_failed: 2, + num_failed: 3, num_ignored: 12, num_measured: 0, }, " test cat ... ok + test \"ups\" ... FAILED test dog ... FAILED test [apple] fox ... ok test [apple] bunny ... FAILED @@ -114,6 +133,9 @@ fn test_mode() { failures: + ---- \"ups\" ---- + failed to parse \"abc\" + ---- dog ---- was not a good boy @@ -122,6 +144,7 @@ fn test_mode() { failures: + \"ups\" dog bunny ", @@ -130,16 +153,20 @@ fn test_mode() { #[test] fn bench_mode() { - check(args(["--bench"]), tests, 16, + check( + args(["--bench"]), + tests, + 17, Conclusion { num_filtered_out: 0, num_passed: 0, num_failed: 2, - num_ignored: 12, + num_ignored: 13, num_measured: 2, }, " test cat ... ignored + test \"ups\" ... ignored test dog ... ignored test [apple] fox ... ignored test [apple] bunny ... ignored @@ -175,8 +202,11 @@ fn bench_mode() { #[test] fn list() { let (c, out) = common::do_run(args(["--list"]), tests()); - assert_log!(out, " + assert_log!( + out, + " cat: test + \"ups\": test dog: test [apple] fox: test [apple] bunny: test @@ -192,20 +222,26 @@ fn list() { cyan: bench [banana] orange: bench [banana] pink: bench - "); - assert_eq!(c, Conclusion { - num_filtered_out: 0, - num_passed: 0, - num_failed: 0, - num_ignored: 0, - num_measured: 0, - }); + " + ); + assert_eq!( + c, + Conclusion { + num_filtered_out: 0, + num_passed: 0, + num_failed: 0, + num_ignored: 0, + num_measured: 0, + } + ); } #[test] fn list_ignored() { let (c, out) = common::do_run(args(["--list", "--ignored"]), tests()); - assert_log!(out, " + assert_log!( + out, + " frog: test owl: test [banana] fly: test @@ -214,39 +250,52 @@ fn list_ignored() { cyan: bench [banana] orange: bench [banana] pink: bench - "); - assert_eq!(c, Conclusion { - num_filtered_out: 0, - num_passed: 0, - num_failed: 0, - num_ignored: 0, - num_measured: 0, - }); + " + ); + assert_eq!( + c, + Conclusion { + num_filtered_out: 0, + num_passed: 0, + num_failed: 0, + num_ignored: 0, + num_measured: 0, + } + ); } #[test] fn list_with_filter() { let (c, out) = common::do_run(args(["--list", "a"]), tests()); - assert_log!(out, " + assert_log!( + out, + " cat: test [banana] bear: test cyan: bench [banana] orange: bench - "); - assert_eq!(c, Conclusion { - num_filtered_out: 0, - num_passed: 0, - num_failed: 0, - num_ignored: 0, - num_measured: 0, - }); + " + ); + assert_eq!( + c, + Conclusion { + num_filtered_out: 0, + num_passed: 0, + num_failed: 0, + num_ignored: 0, + num_measured: 0, + } + ); } #[test] fn filter_c() { - check(args(["c"]), tests, 2, + check( + args(["c"]), + tests, + 2, Conclusion { - num_filtered_out: 14, + num_filtered_out: 15, num_passed: 1, num_failed: 0, num_ignored: 1, @@ -261,9 +310,12 @@ fn filter_c() { #[test] fn filter_o_test() { - check(args(["--test", "o"]), tests, 6, + check( + args(["--test", "o"]), + tests, + 6, Conclusion { - num_filtered_out: 10, + num_filtered_out: 11, num_passed: 1, num_failed: 1, num_ignored: 4, @@ -291,9 +343,12 @@ fn filter_o_test() { #[test] fn filter_o_test_include_ignored() { - check(args(["--test", "--include-ignored", "o"]), tests, 6, + check( + args(["--test", "--include-ignored", "o"]), + tests, + 6, Conclusion { - num_filtered_out: 10, + num_filtered_out: 11, num_passed: 2, num_failed: 2, num_ignored: 2, @@ -325,9 +380,12 @@ fn filter_o_test_include_ignored() { #[test] fn filter_o_test_ignored() { - check(args(["--test", "--ignored", "o"]), tests, 3, + check( + args(["--test", "--ignored", "o"]), + tests, + 3, Conclusion { - num_filtered_out: 13, + num_filtered_out: 14, num_passed: 1, num_failed: 1, num_ignored: 1, @@ -352,16 +410,20 @@ fn filter_o_test_ignored() { #[test] fn normal_include_ignored() { - check(args(["--include-ignored"]), tests, 16, + check( + args(["--include-ignored"]), + tests, + 17, Conclusion { num_filtered_out: 0, num_passed: 8, - num_failed: 8, + num_failed: 9, num_ignored: 0, num_measured: 0, }, " test cat ... ok + test \"ups\" ... FAILED test dog ... FAILED test [apple] fox ... ok test [apple] bunny ... FAILED @@ -380,6 +442,9 @@ fn normal_include_ignored() { failures: + ---- \"ups\" ---- + failed to parse \"abc\" + ---- dog ---- was not a good boy @@ -406,6 +471,7 @@ fn normal_include_ignored() { failures: + \"ups\" dog bunny owl @@ -420,9 +486,12 @@ fn normal_include_ignored() { #[test] fn normal_ignored() { - check(args(["--ignored"]), tests, 8, + check( + args(["--ignored"]), + tests, + 8, Conclusion { - num_filtered_out: 8, + num_filtered_out: 9, num_passed: 4, num_failed: 4, num_ignored: 0, @@ -464,9 +533,12 @@ fn normal_ignored() { #[test] fn lots_of_flags() { - check(args(["--include-ignored", "--skip", "g", "--test", "o"]), tests, 3, + check( + args(["--include-ignored", "--skip", "g", "--test", "o"]), + tests, + 3, Conclusion { - num_filtered_out: 13, + num_filtered_out: 14, num_passed: 1, num_failed: 1, num_ignored: 1, @@ -491,19 +563,31 @@ fn lots_of_flags() { #[test] fn terse_output() { + #[cfg(feature = "multithreaded")] let (c, out) = do_run(args(["--format", "terse", "--test-threads", "1"]), tests()); - assert_eq!(c, Conclusion { - num_filtered_out: 0, - num_passed: 4, - num_failed: 4, - num_ignored: 8, - num_measured: 0, - }); - assert_log!(out, " - running 16 tests - .F.Fiiii.F.Fiiii + #[cfg(not(feature = "multithreaded"))] + let (c, out) = do_run(args(["--format", "terse"]), tests()); + + assert_eq!( + c, + Conclusion { + num_filtered_out: 0, + num_passed: 4, + num_failed: 5, + num_ignored: 8, + num_measured: 0, + } + ); + assert_log!( + out, + " + running 17 tests + .FF.Fiiii.F.Fiiii failures: + ---- \"ups\" ---- + failed to parse \"abc\" + ---- dog ---- was not a good boy @@ -518,12 +602,74 @@ fn terse_output() { failures: + \"ups\" dog bunny blue green - test result: FAILED. 4 passed; 4 failed; 8 ignored; 0 measured; 0 filtered out; \ + test result: FAILED. 4 passed; 5 failed; 8 ignored; 0 measured; 0 filtered out; \ finished in 0.00s - "); + " + ); +} + +#[test] +fn json_output() { + #[cfg(feature = "multithreaded")] + let (c, out) = do_run(args(["--format", "json", "--test-threads", "1"]), tests()); + #[cfg(not(feature = "multithreaded"))] + let (c, out) = do_run(args(["--format", "json"]), tests()); + + assert_eq!( + c, + Conclusion { + num_filtered_out: 0, + num_passed: 4, + num_failed: 5, + num_ignored: 8, + num_measured: 0, + } + ); + + assert_log!( + out, + r#" + { "type": "suite", "event": "started", "test_count": 17 } + { "type": "test", "event": "started", "name": "cat" } + { "type": "test", "name": "cat", "event": "ok" } + { "type": "test", "event": "started", "name": "\"ups\"" } + { "type": "test", "name": "\"ups\"", "event": "failed", "stdout": "Error: \"failed to parse \"abc\"\"\n" } + { "type": "test", "event": "started", "name": "dog" } + { "type": "test", "name": "dog", "event": "failed", "stdout": "Error: \"was not a good boy\"\n" } + { "type": "test", "event": "started", "name": "fox" } + { "type": "test", "name": "fox", "event": "ok" } + { "type": "test", "event": "started", "name": "bunny" } + { "type": "test", "name": "bunny", "event": "failed", "stdout": "Error: \"jumped too high\"\n" } + { "type": "test", "event": "started", "name": "frog" } + { "type": "test", "name": "frog", "event": "ignored" } + { "type": "test", "event": "started", "name": "owl" } + { "type": "test", "name": "owl", "event": "ignored" } + { "type": "test", "event": "started", "name": "fly" } + { "type": "test", "name": "fly", "event": "ignored" } + { "type": "test", "event": "started", "name": "bear" } + { "type": "test", "name": "bear", "event": "ignored" } + { "type": "test", "event": "started", "name": "red" } + { "type": "test", "name": "red", "event": "ok" } + { "type": "test", "event": "started", "name": "blue" } + { "type": "test", "name": "blue", "event": "failed", "stdout": "Error: \"sky fell down\"\n" } + { "type": "test", "event": "started", "name": "yellow" } + { "type": "test", "name": "yellow", "event": "ok" } + { "type": "test", "event": "started", "name": "green" } + { "type": "test", "name": "green", "event": "failed", "stdout": "Error: \"was poisoned\"\n" } + { "type": "test", "event": "started", "name": "purple" } + { "type": "test", "name": "purple", "event": "ignored" } + { "type": "test", "event": "started", "name": "cyan" } + { "type": "test", "name": "cyan", "event": "ignored" } + { "type": "test", "event": "started", "name": "orange" } + { "type": "test", "name": "orange", "event": "ignored" } + { "type": "test", "event": "started", "name": "pink" } + { "type": "test", "name": "pink", "event": "ignored" } + { "type": "suite", "event": "failed", "passed": 4, "failed": 5, "ignored": 8, "measured": 0, "filtered_out": 0, "exec_time": 0.000000000 }"# + ); } diff --git a/tests/panic.rs b/tests/panic.rs index 503985a..7cdd692 100644 --- a/tests/panic.rs +++ b/tests/panic.rs @@ -1,11 +1,10 @@ use common::{args, check}; -use libtest_mimic::{Trial, Conclusion}; +use libtest_mimic::{Conclusion, Trial}; #[macro_use] mod common; - -fn tests() -> Vec { +fn tests() -> Vec> { vec![ Trial::test("passes", || Ok(())), Trial::test("panics", || panic!("uh oh")), @@ -14,7 +13,10 @@ fn tests() -> Vec { #[test] fn normal() { - check(args([]), tests, 2, + check( + args([]), + tests, + 2, Conclusion { num_filtered_out: 0, num_passed: 1, @@ -34,6 +36,6 @@ fn normal() { failures: panics - " + ", ); }