Run benchmarks once, as a test by default.

E.g. if `foo.rs` looks like

    #![feature(test)]
    extern crate test;

    #[bench]
    fn bar(b: &mut test::Bencher) {
        b.iter(|| {
            1
        })
    }

    #[test]
    fn baz() {}

    #[bench]
    fn qux(b: &mut test::Bencher) {
        b.iter(|| {
            panic!()
        })
    }

Then

    $ rustc --test foo.rs
    $ ./foo

    running 3 tests
    test baz ... ok
    test qux ... FAILED
    test bar ... ok

    failures:

    ---- qux stdout ----
    	thread 'qux' panicked at 'explicit panic', bench.rs:17

    failures:
        qux

    test result: FAILED. 2 passed; 1 failed; 0 ignored; 0 measured

    $ ./foo --bench ba

    running 2 tests
    test baz ... ignored
    test bar ... bench:        97 ns/iter (+/- 74)

    test result: ok. 0 passed; 0 failed; 1 ignored; 1 measured

In particular, the two benchmark are being run as tests in the default
mode.

This helps for the main distribution, since benchmarks are only run with
`PLEASE_BENCH=1`, which is rarely set (and never set on the test bots),
and helps for code-coverage tools: benchmarks are run and so don't count
as dead code.

Fixes #15842.
This commit is contained in:
Huon Wilson
2015-05-02 13:38:51 +10:00
parent b594036069
commit d73545ceca
2 changed files with 46 additions and 15 deletions

View File

@@ -269,7 +269,7 @@ pub fn test_opts(config: &Config) -> test::TestOpts {
run_ignored: config.run_ignored, run_ignored: config.run_ignored,
logfile: config.logfile.clone(), logfile: config.logfile.clone(),
run_tests: true, run_tests: true,
run_benchmarks: true, bench_benchmarks: true,
nocapture: env::var("RUST_TEST_NOCAPTURE").is_ok(), nocapture: env::var("RUST_TEST_NOCAPTURE").is_ok(),
color: test::AutoColor, color: test::AutoColor,
} }

View File

@@ -139,7 +139,7 @@ impl TestDesc {
} }
/// Represents a benchmark function. /// Represents a benchmark function.
pub trait TDynBenchFn { pub trait TDynBenchFn: Send {
fn run(&self, harness: &mut Bencher); fn run(&self, harness: &mut Bencher);
} }
@@ -285,7 +285,7 @@ pub struct TestOpts {
pub filter: Option<String>, pub filter: Option<String>,
pub run_ignored: bool, pub run_ignored: bool,
pub run_tests: bool, pub run_tests: bool,
pub run_benchmarks: bool, pub bench_benchmarks: bool,
pub logfile: Option<PathBuf>, pub logfile: Option<PathBuf>,
pub nocapture: bool, pub nocapture: bool,
pub color: ColorConfig, pub color: ColorConfig,
@@ -298,7 +298,7 @@ impl TestOpts {
filter: None, filter: None,
run_ignored: false, run_ignored: false,
run_tests: false, run_tests: false,
run_benchmarks: false, bench_benchmarks: false,
logfile: None, logfile: None,
nocapture: false, nocapture: false,
color: AutoColor, color: AutoColor,
@@ -377,8 +377,8 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
let logfile = matches.opt_str("logfile"); let logfile = matches.opt_str("logfile");
let logfile = logfile.map(|s| PathBuf::from(&s)); let logfile = logfile.map(|s| PathBuf::from(&s));
let run_benchmarks = matches.opt_present("bench"); let bench_benchmarks = matches.opt_present("bench");
let run_tests = ! run_benchmarks || let run_tests = ! bench_benchmarks ||
matches.opt_present("test"); matches.opt_present("test");
let mut nocapture = matches.opt_present("nocapture"); let mut nocapture = matches.opt_present("nocapture");
@@ -400,7 +400,7 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
filter: filter, filter: filter,
run_ignored: run_ignored, run_ignored: run_ignored,
run_tests: run_tests, run_tests: run_tests,
run_benchmarks: run_benchmarks, bench_benchmarks: bench_benchmarks,
logfile: logfile, logfile: logfile,
nocapture: nocapture, nocapture: nocapture,
color: color, color: color,
@@ -778,7 +778,11 @@ fn run_tests<F>(opts: &TestOpts,
mut callback: F) -> io::Result<()> where mut callback: F) -> io::Result<()> where
F: FnMut(TestEvent) -> io::Result<()>, F: FnMut(TestEvent) -> io::Result<()>,
{ {
let filtered_tests = filter_tests(opts, tests); let mut filtered_tests = filter_tests(opts, tests);
if !opts.bench_benchmarks {
filtered_tests = convert_benchmarks_to_tests(filtered_tests);
}
let filtered_descs = filtered_tests.iter() let filtered_descs = filtered_tests.iter()
.map(|t| t.desc.clone()) .map(|t| t.desc.clone())
.collect(); .collect();
@@ -824,14 +828,16 @@ fn run_tests<F>(opts: &TestOpts,
pending -= 1; pending -= 1;
} }
if opts.bench_benchmarks {
// All benchmarks run at the end, in serial. // All benchmarks run at the end, in serial.
// (this includes metric fns) // (this includes metric fns)
for b in filtered_benchs_and_metrics { for b in filtered_benchs_and_metrics {
try!(callback(TeWait(b.desc.clone(), b.testfn.padding()))); try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
run_test(opts, !opts.run_benchmarks, b, tx.clone()); run_test(opts, false, b, tx.clone());
let (test, result, stdout) = rx.recv().unwrap(); let (test, result, stdout) = rx.recv().unwrap();
try!(callback(TeResult(test, result, stdout))); try!(callback(TeResult(test, result, stdout)));
} }
}
Ok(()) Ok(())
} }
@@ -893,6 +899,22 @@ pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescA
filtered filtered
} }
pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
// convert benchmarks to tests, if we're not benchmarking them
tests.into_iter().map(|x| {
let testfn = match x.testfn {
DynBenchFn(bench) => {
DynTestFn(Box::new(move || bench::run_once(|b| bench.run(b))))
}
StaticBenchFn(benchfn) => {
DynTestFn(Box::new(move || bench::run_once(|b| benchfn(b))))
}
f => f
};
TestDescAndFn { desc: x.desc, testfn: testfn }
}).collect()
}
pub fn run_test(opts: &TestOpts, pub fn run_test(opts: &TestOpts,
force_ignore: bool, force_ignore: bool,
test: TestDescAndFn, test: TestDescAndFn,
@@ -1159,6 +1181,15 @@ pub mod bench {
mb_s: mb_s as usize mb_s: mb_s as usize
} }
} }
pub fn run_once<F>(f: F) where F: FnOnce(&mut Bencher) {
let mut bs = Bencher {
iterations: 0,
dur: Duration::nanoseconds(0),
bytes: 0
};
bs.bench_n(1, f);
}
} }
#[cfg(test)] #[cfg(test)]