Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Json Format, Intellij Options and make multi-threaded optional #34

Closed
wants to merge 9 commits into from
Closed
6 changes: 5 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,13 @@ readme = "README.md"

exclude = [".github"]

[features]
default = ["multithreaded"]
multithreaded = ["dep:threadpool"]

[dependencies]
clap = { version = "4.0.8", features = ["derive"] }
threadpool = "1.8.1"
threadpool = { version = "1.8.1", optional = true}
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Missing space before }

termcolor = "1.0.5"

[dev-dependencies]
Expand Down
30 changes: 16 additions & 14 deletions src/args.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,7 @@ use clap::{Parser, ValueEnum};
#[derive(Parser, Debug, Clone, Default)]
#[command(
help_template = "USAGE: [OPTIONS] [FILTER]\n\n{all-args}\n\n\n{after-help}",
disable_version_flag = true,
after_help = "By default, all tests are run in parallel. This can be altered with the \n\
--test-threads flag when running tests (set it to 1).",
disable_version_flag = true
)]
pub struct Arguments {
// ============== FLAGS ===================================================
Expand All @@ -30,7 +28,7 @@ pub struct Arguments {
#[arg(
long = "test",
conflicts_with = "bench",
help = "Run tests and not benchmarks",
help = "Run tests and not benchmarks"
)]
pub test: bool,

Expand All @@ -43,7 +41,10 @@ pub struct Arguments {
pub list: bool,

/// No-op, ignored (libtest-mimic always runs in no-capture mode)
#[arg(long = "nocapture", help = "No-op (libtest-mimic always runs in no-capture mode)")]
#[arg(
long = "nocapture",
help = "No-op (libtest-mimic always runs in no-capture mode)"
)]
pub nocapture: bool,

/// No-op, ignored. libtest-mimic does not currently capture stdout.
Expand All @@ -57,7 +58,7 @@ pub struct Arguments {
/// If set, filters are matched exactly rather than by substring.
#[arg(
long = "exact",
help = "Exactly match filters rather than by substring",
help = "Exactly match filters rather than by substring"
)]
pub exact: bool,

Expand All @@ -70,16 +71,17 @@ pub struct Arguments {
short = 'q',
long = "quiet",
conflicts_with = "format",
help = "Display one character per test instead of one line. Alias to --format=terse",
help = "Display one character per test instead of one line. Alias to --format=terse"
)]
pub quiet: bool,

// ============== OPTIONS =================================================
#[cfg(feature = "multithreaded")]
/// Number of threads used for parallel testing.
#[arg(
long = "test-threads",
help = "Number of threads used for running tests in parallel. If set to 1, \n\
all tests are run in the main thread.",
help = "Number of threads used for running tests in parallel. By default, all tests are run in parallel. If set to 1, \n\
all tests are run in the main thread."
)]
pub test_threads: Option<usize>,

Expand All @@ -88,7 +90,7 @@ pub struct Arguments {
#[arg(
long = "logfile",
value_name = "PATH",
help = "Write logs to the specified file instead of stdout",
help = "Write logs to the specified file instead of stdout"
)]
pub logfile: Option<String>,

Expand All @@ -97,7 +99,7 @@ pub struct Arguments {
#[arg(
long = "skip",
value_name = "FILTER",
help = "Skip tests whose names contain FILTER (this flag can be used multiple times)",
help = "Skip tests whose names contain FILTER (this flag can be used multiple times)"
)]
pub skip: Vec<String>,

Expand All @@ -109,7 +111,7 @@ pub struct Arguments {
help = "Configure coloring of output: \n\
- auto = colorize if stdout is a tty and tests are run on serially (default)\n\
- always = always colorize output\n\
- never = never colorize output\n",
- never = never colorize output\n"
)]
pub color: Option<ColorSetting>,

Expand All @@ -121,7 +123,7 @@ pub struct Arguments {
help = "Configure formatting of output: \n\
- pretty = Print verbose output\n\
- terse = Display one character per test\n\
- json = Print json events\n",
- json = Print json events\n"
)]
pub format: Option<FormatSetting>,

Expand All @@ -130,7 +132,7 @@ pub struct Arguments {
#[arg(
value_name = "FILTER",
help = "The FILTER string is tested against the name of all tests, and only those tests \
whose names contain the filter are run.",
whose names contain the filter are run."
)]
pub filter: Option<String>,
}
Expand Down
159 changes: 117 additions & 42 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,18 +71,15 @@

#![forbid(unsafe_code)]

use std::{process, sync::mpsc, fmt, time::Instant};
use std::{fmt, process, time::Instant};

mod args;
mod printer;

use printer::Printer;
use threadpool::ThreadPool;

pub use crate::args::{Arguments, ColorSetting, FormatSetting};



/// A single test or benchmark.
///
/// The original `libtest` often calls benchmarks "tests", which is a bit
Expand All @@ -95,7 +92,10 @@ pub use crate::args::{Arguments, ColorSetting, FormatSetting};
/// `#[should_panic]` you need to catch the panic yourself. You likely want to
/// compare the panic payload to an expected value anyway.
pub struct Trial {
#[cfg(feature = "multithreaded")]
runner: Box<dyn FnOnce(bool) -> Outcome + Send>,
#[cfg(not(feature = "multithreaded"))]
runner: Box<dyn FnOnce(bool) -> Outcome>,
info: TestInfo,
}

Expand All @@ -104,6 +104,7 @@ impl Trial {
///
/// The runner returning `Ok(())` is interpreted as the test passing. If the
/// runner returns `Err(_)`, the test is considered failed.
#[cfg(feature = "multithreaded")]
pub fn test<R>(name: impl Into<String>, runner: R) -> Self
where
R: FnOnce() -> Result<(), Failed> + Send + 'static,
Expand All @@ -122,6 +123,29 @@ impl Trial {
}
}

/// Creates a (non-benchmark) test with the given name and runner.
///
/// The runner returning `Ok(())` is interpreted as the test passing. If the
/// runner returns `Err(_)`, the test is considered failed.
#[cfg(not(feature = "multithreaded"))]
pub fn test<R>(name: impl Into<String>, runner: R) -> Self
where
R: FnOnce() -> Result<(), Failed> + 'static,
{
Self {
runner: Box::new(move |_test_mode| match runner() {
Ok(()) => Outcome::Passed,
Err(failed) => Outcome::Failed(failed),
}),
info: TestInfo {
name: name.into(),
kind: String::new(),
is_ignored: false,
is_bench: false,
},
}
}

/// Creates a benchmark with the given name and runner.
///
/// If the runner's parameter `test_mode` is `true`, the runner function
Expand All @@ -134,6 +158,7 @@ impl Trial {
/// `test_mode` is `true` if neither `--bench` nor `--test` are set, and
/// `false` when `--bench` is set. If `--test` is set, benchmarks are not
/// ran at all, and both flags cannot be set at the same time.
#[cfg(feature = "multithreaded")]
pub fn bench<R>(name: impl Into<String>, runner: R) -> Self
where
R: FnOnce(bool) -> Result<Option<Measurement>, Failed> + Send + 'static,
Expand All @@ -143,8 +168,44 @@ impl Trial {
Err(failed) => Outcome::Failed(failed),
Ok(_) if test_mode => Outcome::Passed,
Ok(Some(measurement)) => Outcome::Measured(measurement),
Ok(None)
=> Outcome::Failed("bench runner returned `Ok(None)` in bench mode".into()),
Ok(None) => {
Outcome::Failed("bench runner returned `Ok(None)` in bench mode".into())
}
}),
info: TestInfo {
name: name.into(),
kind: String::new(),
is_ignored: false,
is_bench: true,
},
}
}

/// Creates a benchmark with the given name and runner.
///
/// If the runner's parameter `test_mode` is `true`, the runner function
/// should run all code just once, without measuring, just to make sure it
/// does not panic. If the parameter is `false`, it should perform the
/// actual benchmark. If `test_mode` is `true` you may return `Ok(None)`,
/// but if it's `false`, you have to return a `Measurement`, or else the
/// benchmark is considered a failure.
///
/// `test_mode` is `true` if neither `--bench` nor `--test` are set, and
/// `false` when `--bench` is set. If `--test` is set, benchmarks are not
/// ran at all, and both flags cannot be set at the same time.
#[cfg(not(feature = "multithreaded"))]
pub fn bench<R>(name: impl Into<String>, runner: R) -> Self
where
R: FnOnce(bool) -> Result<Option<Measurement>, Failed> + 'static,
{
Self {
runner: Box::new(move |test_mode| match runner(test_mode) {
Err(failed) => Outcome::Failed(failed),
Ok(_) if test_mode => Outcome::Passed,
Ok(Some(measurement)) => Outcome::Measured(measurement),
Ok(None) => {
Outcome::Failed("bench runner returned `Ok(None)` in bench mode".into())
}
}),
info: TestInfo {
name: name.into(),
Expand Down Expand Up @@ -274,13 +335,11 @@ impl Failed {
impl<M: std::fmt::Display> From<M> for Failed {
fn from(msg: M) -> Self {
Self {
msg: Some(msg.to_string())
msg: Some(msg.to_string()),
}
}
}



/// The outcome of performing a test/benchmark.
#[derive(Debug, Clone)]
enum Outcome {
Expand Down Expand Up @@ -403,7 +462,6 @@ impl Arguments {
pub fn run(args: &Arguments, mut tests: Vec<Trial>) -> Conclusion {
let start_instant = Instant::now();
let mut conclusion = Conclusion::empty();

// Apply filtering
if args.filter.is_some() || !args.skip.is_empty() || args.ignored {
let len_before = tests.len() as u64;
Expand Down Expand Up @@ -434,15 +492,15 @@ pub fn run(args: &Arguments, mut tests: Vec<Trial>) -> Conclusion {
Outcome::Failed(failed) => {
failed_tests.push((test, failed.msg));
conclusion.num_failed += 1;
},
}
Outcome::Ignored => conclusion.num_ignored += 1,
Outcome::Measured(_) => conclusion.num_measured += 1,
}
};

// Execute all tests.
let test_mode = !args.bench;
if args.test_threads == Some(1) {
let mut sequentially = |tests: Vec<Trial>| {
// Run test sequentially in main thread
for test in tests {
// Print `test foo ...`, run the test, then print the outcome in
Expand All @@ -455,38 +513,50 @@ pub fn run(args: &Arguments, mut tests: Vec<Trial>) -> Conclusion {
};
handle_outcome(outcome, test.info, &mut printer);
}
} else {
// Run test in thread pool.
let pool = match args.test_threads {
Some(num_threads) => ThreadPool::new(num_threads),
None => ThreadPool::default()
};
let (sender, receiver) = mpsc::channel();
};

let num_tests = tests.len();
for test in tests {
if args.is_ignored(&test) {
sender.send((Outcome::Ignored, test.info)).unwrap();
} else {
let sender = sender.clone();
pool.execute(move || {
// It's fine to ignore the result of sending. If the
// receiver has hung up, everything will wind down soon
// anyway.
let outcome = run_single(test.runner, test_mode);
let _ = sender.send((outcome, test.info));
});
#[cfg(not(feature = "multithreaded"))]
sequentially(tests);

#[cfg(feature = "multithreaded")]
{
use std::sync::mpsc;
use threadpool::ThreadPool;
if args.test_threads == Some(1) {
sequentially(tests);
} else {
// Run test in thread pool.
let pool = match args.test_threads {
Some(num_threads) => ThreadPool::new(num_threads),
None => ThreadPool::default(),
};
let (sender, receiver) = mpsc::channel();

let num_tests = tests.len();
for test in tests {
if args.is_ignored(&test) {
sender.send((Outcome::Ignored, test.info)).unwrap();
} else {
let sender = sender.clone();
pool.execute(move || {
// It's fine to ignore the result of sending. If the
// receiver has hung up, everything will wind down soon
// anyway.
let outcome = run_single(test.runner, test_mode);
let _ = sender.send((outcome, test.info));
});
}
}
}

for (outcome, test_info) in receiver.iter().take(num_tests) {
// In multithreaded mode, we do only print the start of the line
// after the test ran, as otherwise it would lead to terribly
// interleaved output.
printer.print_test(&test_info);
handle_outcome(outcome, test_info, &mut printer);
for (outcome, test_info) in receiver.iter().take(num_tests) {
// In multithreaded mode, we do only print the start of the line
// after the test ran, as otherwise it would lead to terribly
// interleaved output.
printer.print_test(&test_info);
handle_outcome(outcome, test_info, &mut printer);
}
}
}
};

// Print failures if there were any, and the final summary.
if !failed_tests.is_empty() {
Expand All @@ -499,14 +569,19 @@ pub fn run(args: &Arguments, mut tests: Vec<Trial>) -> Conclusion {
}

/// Runs the given runner, catching any panics and treating them as a failed test.
fn run_single(runner: Box<dyn FnOnce(bool) -> Outcome + Send>, test_mode: bool) -> Outcome {
fn run_single(
#[cfg(feature = "multithreaded")] runner: Box<dyn FnOnce(bool) -> Outcome + Send>,
#[cfg(not(feature = "multithreaded"))] runner: Box<dyn FnOnce(bool) -> Outcome>,
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you just define a type alias somewhere (with cfg attributes) so that the cfg attributes only have to be in one place?

test_mode: bool,
) -> Outcome {
use std::panic::{catch_unwind, AssertUnwindSafe};

catch_unwind(AssertUnwindSafe(move || runner(test_mode))).unwrap_or_else(|e| {
// The `panic` information is just an `Any` object representing the
// value the panic was invoked with. For most panics (which use
// `panic!` like `println!`), this is either `&str` or `String`.
let payload = e.downcast_ref::<String>()
let payload = e
.downcast_ref::<String>()
.map(|s| s.as_str())
.or(e.downcast_ref::<&str>().map(|s| *s));

Expand Down