Files
rust/src/libtest/lib.rs

1651 lines
55 KiB
Rust
Raw Normal View History

2014-02-14 09:49:11 +08:00
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Support code for rustc's built in unit-test and micro-benchmarking
//! framework.
//!
//! Almost all user code will only be interested in `Bencher` and
//! `black_box`. All other interactions (such as writing tests and
//! benchmarks themselves) should be done via the `#[test]` and
//! `#[bench]` attributes.
//!
//! See the [Testing Guide](../guide-testing.html) for more details.
// Currently, not much of this is meant for users. It is intended to
// support the simplest interface possible for representing and
// running tests while providing a base that other test frameworks may
// build off of.
#![crate_id = "test#0.11.0-pre"]
#![comment = "Rust internal test library only used by rustc"]
#![license = "MIT/ASL2"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://static.rust-lang.org/doc/master")]
2014-02-14 09:49:11 +08:00
#![feature(asm, macro_rules)]
#![deny(deprecated_owned_vector)]
2014-02-14 09:49:11 +08:00
extern crate collections;
extern crate getopts;
2014-02-14 09:49:11 +08:00
extern crate serialize;
extern crate term;
2014-02-19 19:08:12 -08:00
extern crate time;
use collections::TreeMap;
use stats::Stats;
2014-02-19 19:08:12 -08:00
use time::precise_time_ns;
2014-02-14 09:49:11 +08:00
use getopts::{OptGroup, optflag, optopt};
use serialize::{json, Decodable};
use serialize::json::{Json, ToJson};
2014-02-14 09:49:11 +08:00
use term::Terminal;
use term::color::{Color, RED, YELLOW, GREEN, CYAN};
2014-02-06 02:34:33 -05:00
use std::cmp;
use std::f64;
use std::fmt;
use std::from_str::FromStr;
2013-11-24 06:53:08 -05:00
use std::io::stdio::StdWriter;
use std::io::{File, ChanReader, ChanWriter};
use std::io;
use std::os;
use std::str;
use std::strbuf::StrBuf;
use std::task::TaskBuilder;
2014-02-14 09:49:11 +08:00
// to be used by rustc to compile tests in libtest
pub mod test {
pub use {Bencher, TestName, TestResult, TestDesc,
2014-02-14 09:49:11 +08:00
TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
Metric, MetricMap, MetricAdded, MetricRemoved,
MetricChange, Improvement, Regression, LikelyNoise,
StaticTestFn, StaticTestName, DynTestName, DynTestFn,
run_test, test_main, test_main_static, filter_tests,
parse_opts, StaticBenchFn, test_main_static_x};
2014-02-14 09:49:11 +08:00
}
pub mod stats;
// The name of a test. By convention this follows the rules for rust
2013-05-09 02:34:47 +09:00
// paths; i.e. it should be a series of identifiers separated by double
// colons. This way if some test runner wants to arrange the tests
// hierarchically it may.
2013-07-02 12:47:32 -07:00
#[deriving(Clone)]
pub enum TestName {
StaticTestName(&'static str),
DynTestName(StrBuf)
}
impl fmt::Show for TestName {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
StaticTestName(s) => f.buf.write_str(s),
DynTestName(ref s) => f.buf.write_str(s.as_slice()),
}
}
}
#[deriving(Clone)]
enum NamePadding { PadNone, PadOnLeft, PadOnRight }
impl TestDesc {
fn padded_name(&self, column_count: uint, align: NamePadding) -> StrBuf {
use std::num::Saturating;
let mut name = StrBuf::from_str(self.name.to_str());
let fill = column_count.saturating_sub(name.len());
let mut pad = StrBuf::from_owned_str(" ".repeat(fill));
match align {
PadNone => name,
PadOnLeft => {
pad.push_str(name.as_slice());
pad
}
PadOnRight => {
name.push_str(pad.as_slice());
name
}
}
}
}
/// Represents a benchmark function.
pub trait TDynBenchFn {
fn run(&self, harness: &mut Bencher);
}
// A function that runs a test. If the function returns successfully,
// the test succeeds; if the function fails then the test fails. We
// may need to come up with a more clever definition of test in order
// to support isolation of tests into tasks.
pub enum TestFn {
StaticTestFn(fn()),
StaticBenchFn(fn(&mut Bencher)),
StaticMetricFn(proc(&mut MetricMap)),
2014-04-07 13:30:48 -07:00
DynTestFn(proc():Send),
DynMetricFn(proc(&mut MetricMap)),
DynBenchFn(Box<TDynBenchFn>)
}
impl TestFn {
fn padding(&self) -> NamePadding {
match self {
2013-11-28 12:22:53 -08:00
&StaticTestFn(..) => PadNone,
&StaticBenchFn(..) => PadOnRight,
&StaticMetricFn(..) => PadOnRight,
&DynTestFn(..) => PadNone,
&DynMetricFn(..) => PadOnRight,
&DynBenchFn(..) => PadOnRight,
}
}
}
/// Manager of the benchmarking runs.
///
/// This is feed into functions marked with `#[bench]` to allow for
/// set-up & tear-down before running a piece of code repeatedly via a
/// call to `iter`.
pub struct Bencher {
iterations: u64,
ns_start: u64,
ns_end: u64,
pub bytes: u64,
}
// The definition of a single test. A test runner will run a list of
// these.
2013-07-02 12:47:32 -07:00
#[deriving(Clone)]
pub struct TestDesc {
pub name: TestName,
pub ignore: bool,
pub should_fail: bool,
}
pub struct TestDescAndFn {
pub desc: TestDesc,
pub testfn: TestFn,
}
#[deriving(Clone, Encodable, Decodable, Eq, Show)]
pub struct Metric {
value: f64,
noise: f64
}
2014-02-14 09:49:11 +08:00
impl Metric {
pub fn new(value: f64, noise: f64) -> Metric {
Metric {value: value, noise: noise}
}
}
2013-07-15 18:50:32 -07:00
#[deriving(Eq)]
pub struct MetricMap(TreeMap<StrBuf,Metric>);
2013-07-17 15:15:34 -07:00
impl Clone for MetricMap {
fn clone(&self) -> MetricMap {
let MetricMap(ref map) = *self;
MetricMap(map.clone())
2013-07-17 15:15:34 -07:00
}
}
/// Analysis of a single change in metric
#[deriving(Eq, Show)]
pub enum MetricChange {
LikelyNoise,
MetricAdded,
MetricRemoved,
Improvement(f64),
Regression(f64)
}
pub type MetricDiff = TreeMap<StrBuf,MetricChange>;
// The default console test runner. It accepts the command line
// arguments and a vector of test_descs.
pub fn test_main(args: &[StrBuf], tests: Vec<TestDescAndFn> ) {
2011-07-27 14:19:39 +02:00
let opts =
2012-08-06 12:34:08 -07:00
match parse_opts(args) {
Some(Ok(o)) => o,
Some(Err(msg)) => fail!("{}", msg),
None => return
2011-07-27 14:19:39 +02:00
};
2014-01-29 17:39:12 -08:00
match run_tests_console(&opts, tests) {
Ok(true) => {}
Ok(false) => fail!("Some tests failed"),
Err(e) => fail!("io error when running tests: {}", e),
}
}
// A variant optimized for invocation with a static test vector.
// This will fail (intentionally) when fed any dynamic tests, because
// it is copying the static values out into a dynamic vector and cannot
// copy dynamic values. It is doing this because from this point on
// a ~[TestDescAndFn] is used in order to effect ownership-transfer
// semantics into parallel test runners, which in turn requires a ~[]
// rather than a &[].
pub fn test_main_static(args: &[StrBuf], tests: &[TestDescAndFn]) {
let owned_tests = tests.iter().map(|t| {
match t.testfn {
StaticTestFn(f) =>
2013-07-02 12:47:32 -07:00
TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
StaticBenchFn(f) =>
2013-07-02 12:47:32 -07:00
TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
_ => {
fail!("non-static tests passed to test::test_main_static");
}
}
}).collect();
test_main(args, owned_tests)
}
pub fn test_main_static_x(args: &[~str], tests: &[TestDescAndFn]) {
test_main_static(args.iter()
.map(|x| x.to_strbuf())
.collect::<Vec<_>>()
.as_slice(),
tests)
}
2013-01-22 08:44:24 -08:00
pub struct TestOpts {
pub filter: Option<StrBuf>,
pub run_ignored: bool,
pub run_tests: bool,
pub run_benchmarks: bool,
pub ratchet_metrics: Option<Path>,
pub ratchet_noise_percent: Option<f64>,
pub save_metrics: Option<Path>,
pub test_shard: Option<(uint,uint)>,
pub logfile: Option<Path>,
pub nocapture: bool,
}
impl TestOpts {
#[cfg(test)]
fn new() -> TestOpts {
TestOpts {
filter: None,
run_ignored: false,
run_tests: false,
run_benchmarks: false,
ratchet_metrics: None,
ratchet_noise_percent: None,
save_metrics: None,
test_shard: None,
logfile: None,
nocapture: false,
}
}
2013-01-22 08:44:24 -08:00
}
/// Result of parsing the options.
pub type OptRes = Result<TestOpts, StrBuf>;
fn optgroups() -> Vec<getopts::OptGroup> {
vec!(getopts::optflag("", "ignored", "Run ignored tests"),
getopts::optflag("", "test", "Run tests and not benchmarks"),
getopts::optflag("", "bench", "Run benchmarks instead of tests"),
getopts::optflag("h", "help", "Display this message (longer with --help)"),
getopts::optopt("", "save-metrics", "Location to save bench metrics",
"PATH"),
getopts::optopt("", "ratchet-metrics",
"Location to load and save metrics from. The metrics \
loaded are cause benchmarks to fail if they run too \
slowly", "PATH"),
getopts::optopt("", "ratchet-noise-percent",
"Tests within N% of the recorded metrics will be \
considered as passing", "PERCENTAGE"),
getopts::optopt("", "logfile", "Write logs to the specified file instead \
of stdout", "PATH"),
getopts::optopt("", "test-shard", "run shard A, of B shards, worth of the testsuite",
"A.B"),
getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
task, allow printing directly"))
}
fn usage(binary: &str, helpstr: &str) {
2013-09-27 20:18:50 -07:00
let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
println!("{}", getopts::usage(message, optgroups().as_slice()));
println!("");
if helpstr == "help" {
println!("{}", "\
The FILTER is matched against the name of all tests to run, and if any tests
have a substring match, only those tests are run.
By default, all tests are run in parallel. This can be altered with the
RUST_TEST_TASKS environment variable when running tests (set it to 1).
All tests have their standard output and standard error captured by default.
This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
environment variable. Logging is not captured by default.
Test Attributes:
#[test] - Indicates a function is a test to be run. This function
takes no arguments.
#[bench] - Indicates a function is a benchmark to be run. This
function takes one argument (test::Bencher).
#[should_fail] - This function (also labeled with #[test]) will only pass if
the code causes a failure (an assertion failure or fail!)
#[ignore] - When applied to a function which is already attributed as a
test, then the test runner will ignore these tests during
normal test runs. Running with --ignored will run these
tests. This may also be written as #[ignore(cfg(...))] to
ignore the test on certain configurations.");
}
}
// Parses command line arguments into test options
pub fn parse_opts(args: &[StrBuf]) -> Option<OptRes> {
let args_ = args.tail();
let matches =
match getopts::getopts(args_.iter()
.map(|x| x.to_owned())
.collect::<Vec<_>>()
.as_slice(),
optgroups().as_slice()) {
2013-02-15 02:30:30 -05:00
Ok(m) => m,
Err(f) => return Some(Err(f.to_err_msg().to_strbuf()))
2011-07-27 14:19:39 +02:00
};
if matches.opt_present("h") {
usage(args[0].as_slice(), "h");
return None;
}
if matches.opt_present("help") {
usage(args[0].as_slice(), "help");
return None;
}
2011-07-27 14:19:39 +02:00
let filter =
2013-05-14 18:52:12 +09:00
if matches.free.len() > 0 {
Some((*matches.free.get(0)).to_strbuf())
2013-07-02 12:47:32 -07:00
} else {
None
};
let run_ignored = matches.opt_present("ignored");
let logfile = matches.opt_str("logfile");
let logfile = logfile.map(|s| Path::new(s));
let run_benchmarks = matches.opt_present("bench");
let run_tests = ! run_benchmarks ||
matches.opt_present("test");
let ratchet_metrics = matches.opt_str("ratchet-metrics");
let ratchet_metrics = ratchet_metrics.map(|s| Path::new(s));
let ratchet_noise_percent = matches.opt_str("ratchet-noise-percent");
let ratchet_noise_percent = ratchet_noise_percent.map(|s| from_str::<f64>(s).unwrap());
let save_metrics = matches.opt_str("save-metrics");
let save_metrics = save_metrics.map(|s| Path::new(s));
let test_shard = matches.opt_str("test-shard");
let test_shard = opt_shard(test_shard.map(|x| x.to_strbuf()));
let mut nocapture = matches.opt_present("nocapture");
if !nocapture {
nocapture = os::getenv("RUST_TEST_NOCAPTURE").is_some();
}
2013-01-22 08:44:24 -08:00
let test_opts = TestOpts {
filter: filter,
run_ignored: run_ignored,
run_tests: run_tests,
run_benchmarks: run_benchmarks,
ratchet_metrics: ratchet_metrics,
ratchet_noise_percent: ratchet_noise_percent,
save_metrics: save_metrics,
test_shard: test_shard,
logfile: logfile,
nocapture: nocapture,
2013-01-22 08:44:24 -08:00
};
Some(Ok(test_opts))
}
pub fn opt_shard(maybestr: Option<StrBuf>) -> Option<(uint,uint)> {
match maybestr {
None => None,
Some(s) => {
let mut it = s.as_slice().split('.');
match (it.next().and_then(from_str), it.next().and_then(from_str), it.next()) {
(Some(a), Some(b), None) => Some((a, b)),
_ => None,
}
}
}
}
2013-07-02 12:47:32 -07:00
#[deriving(Clone, Eq)]
pub struct BenchSamples {
ns_iter_summ: stats::Summary<f64>,
mb_s: uint,
}
2013-07-02 12:47:32 -07:00
#[deriving(Clone, Eq)]
2013-07-15 18:50:32 -07:00
pub enum TestResult {
TrOk,
TrFailed,
TrIgnored,
TrMetrics(MetricMap),
2013-07-02 12:47:32 -07:00
TrBench(BenchSamples),
2013-07-15 18:50:32 -07:00
}
2013-12-25 21:55:05 -08:00
enum OutputLocation<T> {
Pretty(term::Terminal<T>),
Raw(T),
}
2013-11-24 06:53:08 -05:00
struct ConsoleTestState<T> {
log_out: Option<File>,
2013-12-25 21:55:05 -08:00
out: OutputLocation<T>,
use_color: bool,
2013-03-24 12:41:19 -04:00
total: uint,
passed: uint,
failed: uint,
ignored: uint,
2013-07-15 18:50:32 -07:00
measured: uint,
metrics: MetricMap,
failures: Vec<(TestDesc, Vec<u8> )> ,
max_name_len: uint, // number of columns to fill when aligning names
}
2012-03-12 17:31:03 -07:00
2013-11-24 06:53:08 -05:00
impl<T: Writer> ConsoleTestState<T> {
2014-01-29 17:39:12 -08:00
pub fn new(opts: &TestOpts,
_: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> {
let log_out = match opts.logfile {
Some(ref path) => Some(try!(File::create(path))),
None => None
};
let out = match term::Terminal::new(io::stdio::stdout_raw()) {
Err(_) => Raw(io::stdio::stdout_raw()),
2013-12-25 21:55:05 -08:00
Ok(t) => Pretty(t)
};
2014-01-29 17:39:12 -08:00
Ok(ConsoleTestState {
out: out,
log_out: log_out,
use_color: use_color(),
total: 0u,
passed: 0u,
failed: 0u,
ignored: 0u,
2013-07-15 18:50:32 -07:00
measured: 0u,
metrics: MetricMap::new(),
failures: Vec::new(),
max_name_len: 0u,
2014-01-29 17:39:12 -08:00
})
}
2014-01-29 17:39:12 -08:00
pub fn write_ok(&mut self) -> io::IoResult<()> {
self.write_pretty("ok", term::color::GREEN)
}
2014-01-29 17:39:12 -08:00
pub fn write_failed(&mut self) -> io::IoResult<()> {
self.write_pretty("FAILED", term::color::RED)
}
2014-01-29 17:39:12 -08:00
pub fn write_ignored(&mut self) -> io::IoResult<()> {
self.write_pretty("ignored", term::color::YELLOW)
}
2014-01-29 17:39:12 -08:00
pub fn write_metric(&mut self) -> io::IoResult<()> {
self.write_pretty("metric", term::color::CYAN)
2013-07-15 18:50:32 -07:00
}
2014-01-29 17:39:12 -08:00
pub fn write_bench(&mut self) -> io::IoResult<()> {
self.write_pretty("bench", term::color::CYAN)
}
2014-01-29 17:39:12 -08:00
pub fn write_added(&mut self) -> io::IoResult<()> {
self.write_pretty("added", term::color::GREEN)
}
2014-01-29 17:39:12 -08:00
pub fn write_improved(&mut self) -> io::IoResult<()> {
self.write_pretty("improved", term::color::GREEN)
}
2014-01-29 17:39:12 -08:00
pub fn write_removed(&mut self) -> io::IoResult<()> {
self.write_pretty("removed", term::color::YELLOW)
}
2014-01-29 17:39:12 -08:00
pub fn write_regressed(&mut self) -> io::IoResult<()> {
self.write_pretty("regressed", term::color::RED)
}
2013-11-24 06:53:08 -05:00
pub fn write_pretty(&mut self,
word: &str,
2014-01-29 17:39:12 -08:00
color: term::color::Color) -> io::IoResult<()> {
2013-11-24 06:53:08 -05:00
match self.out {
2013-12-25 21:55:05 -08:00
Pretty(ref mut term) => {
if self.use_color {
try!(term.fg(color));
}
try!(term.write(word.as_bytes()));
if self.use_color {
try!(term.reset());
}
2014-01-29 17:39:12 -08:00
Ok(())
}
2013-12-25 21:55:05 -08:00
Raw(ref mut stdout) => stdout.write(word.as_bytes())
2013-11-24 06:53:08 -05:00
}
}
2014-01-29 17:39:12 -08:00
pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> {
2013-11-24 06:53:08 -05:00
match self.out {
2013-12-25 21:55:05 -08:00
Pretty(ref mut term) => term.write(s.as_bytes()),
Raw(ref mut stdout) => stdout.write(s.as_bytes())
}
}
2014-01-29 17:39:12 -08:00
pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> {
self.total = len;
let noun = if len != 1 { "tests" } else { "test" };
2014-01-29 17:39:12 -08:00
self.write_plain(format!("\nrunning {} {}\n", len, noun))
}
2014-01-29 17:39:12 -08:00
pub fn write_test_start(&mut self, test: &TestDesc,
align: NamePadding) -> io::IoResult<()> {
let name = test.padded_name(self.max_name_len, align);
2014-01-29 17:39:12 -08:00
self.write_plain(format!("test {} ... ", name))
2011-07-27 14:19:39 +02:00
}
2014-01-29 17:39:12 -08:00
pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> {
try!(match *result {
TrOk => self.write_ok(),
TrFailed => self.write_failed(),
TrIgnored => self.write_ignored(),
2013-07-15 18:50:32 -07:00
TrMetrics(ref mm) => {
try!(self.write_metric());
2014-01-29 17:39:12 -08:00
self.write_plain(format!(": {}", fmt_metrics(mm)))
2013-07-15 18:50:32 -07:00
}
TrBench(ref bs) => {
try!(self.write_bench());
2014-01-29 17:39:12 -08:00
self.write_plain(format!(": {}", fmt_bench_samples(bs)))
}
2014-01-29 17:39:12 -08:00
});
self.write_plain("\n")
}
2014-01-29 17:39:12 -08:00
pub fn write_log(&mut self, test: &TestDesc,
result: &TestResult) -> io::IoResult<()> {
match self.log_out {
2014-01-29 17:39:12 -08:00
None => Ok(()),
2013-11-24 06:53:08 -05:00
Some(ref mut o) => {
2014-01-16 09:35:47 -06:00
let s = format!("{} {}\n", match *result {
TrOk => "ok".to_strbuf(),
TrFailed => "failed".to_strbuf(),
TrIgnored => "ignored".to_strbuf(),
2013-11-24 06:53:08 -05:00
TrMetrics(ref mm) => fmt_metrics(mm),
TrBench(ref bs) => fmt_bench_samples(bs)
}, test.name.to_str());
2014-01-29 17:39:12 -08:00
o.write(s.as_bytes())
}
}
2011-07-15 00:31:00 -07:00
}
2014-01-29 17:39:12 -08:00
pub fn write_failures(&mut self) -> io::IoResult<()> {
try!(self.write_plain("\nfailures:\n"));
let mut failures = Vec::new();
let mut fail_out = StrBuf::new();
for &(ref f, ref stdout) in self.failures.iter() {
failures.push(f.name.to_str());
if stdout.len() > 0 {
fail_out.push_str(format!("---- {} stdout ----\n\t",
f.name.to_str()));
let output = str::from_utf8_lossy(stdout.as_slice());
fail_out.push_str(output.as_slice().replace("\n", "\n\t"));
fail_out.push_str("\n");
}
}
if fail_out.len() > 0 {
try!(self.write_plain("\n"));
try!(self.write_plain(fail_out.as_slice()));
}
try!(self.write_plain("\nfailures:\n"));
failures.as_mut_slice().sort();
for name in failures.iter() {
try!(self.write_plain(format!(" {}\n", name.to_str())));
}
2014-01-29 17:39:12 -08:00
Ok(())
}
2014-01-29 17:39:12 -08:00
pub fn write_metric_diff(&mut self, diff: &MetricDiff) -> io::IoResult<()> {
let mut noise = 0;
let mut improved = 0;
let mut regressed = 0;
let mut added = 0;
let mut removed = 0;
for (k, v) in diff.iter() {
match *v {
LikelyNoise => noise += 1,
MetricAdded => {
added += 1;
try!(self.write_added());
try!(self.write_plain(format!(": {}\n", *k)));
}
MetricRemoved => {
removed += 1;
try!(self.write_removed());
try!(self.write_plain(format!(": {}\n", *k)));
}
Improvement(pct) => {
improved += 1;
try!(self.write_plain(format!(": {}", *k)));
try!(self.write_improved());
try!(self.write_plain(format!(" by {:.2f}%\n", pct as f64)));
}
Regression(pct) => {
regressed += 1;
try!(self.write_plain(format!(": {}", *k)));
try!(self.write_regressed());
try!(self.write_plain(format!(" by {:.2f}%\n", pct as f64)));
}
}
}
try!(self.write_plain(format!("result of ratchet: {} metrics added, \
2014-01-29 17:39:12 -08:00
{} removed, {} improved, {} regressed, \
{} noise\n",
added, removed, improved, regressed,
noise)));
if regressed == 0 {
try!(self.write_plain("updated ratchet file\n"));
} else {
try!(self.write_plain("left ratchet file untouched\n"));
}
2014-01-29 17:39:12 -08:00
Ok(())
}
2013-11-24 06:53:08 -05:00
pub fn write_run_finish(&mut self,
ratchet_metrics: &Option<Path>,
2014-01-29 17:39:12 -08:00
ratchet_pct: Option<f64>) -> io::IoResult<bool> {
2013-07-15 18:50:32 -07:00
assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
let ratchet_success = match *ratchet_metrics {
None => true,
Some(ref pth) => {
2014-02-21 23:53:37 -08:00
try!(self.write_plain(format!("\nusing metrics ratchet: {}\n",
2014-01-29 17:39:12 -08:00
pth.display())));
match ratchet_pct {
None => (),
Some(pct) =>
try!(self.write_plain(format!("with noise-tolerance \
2014-01-29 17:39:12 -08:00
forced to: {}%\n",
pct)))
}
let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
try!(self.write_metric_diff(&diff));
ok
}
};
let test_success = self.failed == 0u;
if !test_success {
try!(self.write_failures());
}
let success = ratchet_success && test_success;
try!(self.write_plain("\ntest result: "));
if success {
// There's no parallelism at this point so it's safe to use color
try!(self.write_ok());
} else {
try!(self.write_failed());
}
2013-11-24 06:53:08 -05:00
let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
self.passed, self.failed, self.ignored, self.measured);
try!(self.write_plain(s));
2014-01-29 17:39:12 -08:00
return Ok(success);
}
}
pub fn fmt_metrics(mm: &MetricMap) -> StrBuf {
let MetricMap(ref mm) = *mm;
let v : Vec<StrBuf> = mm.iter()
.map(|(k,v)| format_strbuf!("{}: {} (+/- {})",
*k,
v.value as f64,
v.noise as f64))
2013-07-15 18:50:32 -07:00
.collect();
v.connect(", ").to_strbuf()
2013-07-15 18:50:32 -07:00
}
pub fn fmt_bench_samples(bs: &BenchSamples) -> StrBuf {
if bs.mb_s != 0 {
format_strbuf!("{:>9} ns/iter (+/- {}) = {} MB/s",
bs.ns_iter_summ.median as uint,
(bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
bs.mb_s)
} else {
format_strbuf!("{:>9} ns/iter (+/- {})",
bs.ns_iter_summ.median as uint,
(bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
}
}
// A simple console test runner
pub fn run_tests_console(opts: &TestOpts,
tests: Vec<TestDescAndFn> ) -> io::IoResult<bool> {
2014-01-29 17:39:12 -08:00
fn callback<T: Writer>(event: &TestEvent,
st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
2013-07-02 12:47:32 -07:00
match (*event).clone() {
TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
TeWait(ref test, padding) => st.write_test_start(test, padding),
TeResult(test, result, stdout) => {
try!(st.write_log(&test, &result));
try!(st.write_result(&result));
match result {
TrOk => st.passed += 1,
TrIgnored => st.ignored += 1,
2013-07-15 18:50:32 -07:00
TrMetrics(mm) => {
let tname = test.name.to_str();
let MetricMap(mm) = mm;
for (k,v) in mm.iter() {
st.metrics
.insert_metric(tname + "." + k.as_slice(),
v.value,
v.noise);
2013-07-15 18:50:32 -07:00
}
st.measured += 1
}
TrBench(bs) => {
st.metrics.insert_metric(test.name.to_str(),
bs.ns_iter_summ.median,
bs.ns_iter_summ.max - bs.ns_iter_summ.min);
2013-07-15 18:50:32 -07:00
st.measured += 1
}
TrFailed => {
st.failed += 1;
st.failures.push((test, stdout));
}
}
2014-01-29 17:39:12 -08:00
Ok(())
}
}
2012-03-12 17:31:03 -07:00
}
let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
fn len_if_padded(t: &TestDescAndFn) -> uint {
match t.testfn.padding() {
PadNone => 0u,
PadOnLeft | PadOnRight => t.desc.name.to_str().len(),
}
}
match tests.iter().max_by(|t|len_if_padded(*t)) {
Some(t) => {
let n = t.desc.name.to_str();
st.max_name_len = n.len();
},
None => {}
}
try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
match opts.save_metrics {
None => (),
Some(ref pth) => {
try!(st.metrics.save(pth));
try!(st.write_plain(format!("\nmetrics saved to: {}",
2014-01-29 17:39:12 -08:00
pth.display())));
}
}
return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent);
2012-03-12 17:31:03 -07:00
}
#[test]
fn should_sort_failures_before_printing_them() {
use std::io::MemWriter;
use std::str;
let test_a = TestDesc {
name: StaticTestName("a"),
ignore: false,
should_fail: false
};
2012-03-12 17:31:03 -07:00
let test_b = TestDesc {
name: StaticTestName("b"),
ignore: false,
should_fail: false
};
2012-03-12 17:31:03 -07:00
2013-11-24 06:53:08 -05:00
let mut st = ConsoleTestState {
log_out: None,
2013-12-25 21:55:05 -08:00
out: Raw(MemWriter::new()),
use_color: false,
total: 0u,
passed: 0u,
failed: 0u,
ignored: 0u,
measured: 0u,
max_name_len: 10u,
metrics: MetricMap::new(),
failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
};
2012-03-12 17:31:03 -07:00
2014-01-30 14:28:20 -08:00
st.write_failures().unwrap();
2013-11-24 06:53:08 -05:00
let s = match st.out {
Raw(ref m) => str::from_utf8_lossy(m.get_ref()),
2013-12-25 21:55:05 -08:00
Pretty(_) => unreachable!()
2013-11-24 06:53:08 -05:00
};
let apos = s.as_slice().find_str("a").unwrap();
let bpos = s.as_slice().find_str("b").unwrap();
2013-03-28 18:39:09 -07:00
assert!(apos < bpos);
2012-03-12 17:31:03 -07:00
}
fn use_color() -> bool { return get_concurrency() == 1; }
2013-07-02 12:47:32 -07:00
#[deriving(Clone)]
2012-09-04 18:05:57 -07:00
enum TestEvent {
TeFiltered(Vec<TestDesc> ),
TeWait(TestDesc, NamePadding),
TeResult(TestDesc, TestResult, Vec<u8> ),
}
pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
fn run_tests(opts: &TestOpts,
tests: Vec<TestDescAndFn> ,
2014-01-29 17:39:12 -08:00
callback: |e: TestEvent| -> io::IoResult<()>) -> io::IoResult<()> {
2013-05-09 13:27:24 -07:00
let filtered_tests = filter_tests(opts, tests);
let filtered_descs = filtered_tests.iter()
.map(|t| t.desc.clone())
.collect();
2013-05-09 13:27:24 -07:00
try!(callback(TeFiltered(filtered_descs)));
2013-07-15 18:50:32 -07:00
let (filtered_tests, filtered_benchs_and_metrics) =
filtered_tests.partition(|e| {
match e.testfn {
StaticTestFn(_) | DynTestFn(_) => true,
_ => false
}
});
2012-01-19 13:44:07 -08:00
// It's tempting to just spawn all the tests at once, but since we have
// many tests that run in other processes we would be making a big mess.
let concurrency = get_concurrency();
let mut remaining = filtered_tests;
remaining.reverse();
let mut pending = 0;
let (tx, rx) = channel::<MonitorMsg>();
while pending > 0 || !remaining.is_empty() {
while pending < concurrency && !remaining.is_empty() {
let test = remaining.pop().unwrap();
if concurrency == 1 {
// We are doing one test at a time so we can print the name
// of the test before we run it. Useful for debugging tests
// that hang forever.
try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
}
run_test(opts, !opts.run_tests, test, tx.clone());
pending += 1;
}
let (desc, result, stdout) = rx.recv();
if concurrency != 1 {
try!(callback(TeWait(desc.clone(), PadNone)));
}
try!(callback(TeResult(desc, result, stdout)));
pending -= 1;
}
// All benchmarks run at the end, in serial.
2013-07-15 18:50:32 -07:00
// (this includes metric fns)
for b in filtered_benchs_and_metrics.move_iter() {
try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
run_test(opts, !opts.run_benchmarks, b, tx.clone());
let (test, result, stdout) = rx.recv();
try!(callback(TeResult(test, result, stdout)));
}
2014-01-29 17:39:12 -08:00
Ok(())
}
fn get_concurrency() -> uint {
use std::rt;
match os::getenv("RUST_TEST_TASKS") {
Some(s) => {
let opt_n: Option<uint> = FromStr::from_str(s);
match opt_n {
Some(n) if n > 0 => n,
_ => fail!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
}
}
None => {
rt::default_sched_threads()
}
}
}
pub fn filter_tests(
opts: &TestOpts,
tests: Vec<TestDescAndFn> ) -> Vec<TestDescAndFn> {
let mut filtered = tests;
// Remove tests that don't match the test filter
2012-09-21 19:37:57 -07:00
filtered = if opts.filter.is_none() {
2013-02-15 02:30:30 -05:00
filtered
} else {
2013-05-29 19:59:33 -04:00
let filter_str = match opts.filter {
2013-07-02 12:47:32 -07:00
Some(ref f) => (*f).clone(),
None => "".to_strbuf()
2011-07-27 14:19:39 +02:00
};
fn filter_fn(test: TestDescAndFn, filter_str: &str) ->
Option<TestDescAndFn> {
if test.desc.name.to_str().contains(filter_str) {
return Some(test);
} else {
return None;
}
}
filtered.move_iter()
.filter_map(|x| filter_fn(x, filter_str.as_slice()))
.collect()
};
// Maybe pull out the ignored test and unignore them
filtered = if !opts.run_ignored {
2013-02-15 02:30:30 -05:00
filtered
} else {
fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
if test.desc.ignore {
let TestDescAndFn {desc, testfn} = test;
Some(TestDescAndFn {
desc: TestDesc {ignore: false, ..desc},
testfn: testfn
})
} else {
None
}
};
filtered.move_iter().filter_map(|x| filter(x)).collect()
};
// Sort the tests alphabetically
filtered.sort_by(|t1, t2| t1.desc.name.to_str().cmp(&t2.desc.name.to_str()));
// Shard the remaining tests, if sharding requested.
match opts.test_shard {
None => filtered,
Some((a,b)) => {
filtered.move_iter().enumerate()
.filter(|&(i,_)| i % b == a)
.map(|(_,t)| t)
.collect()
}
}
}
pub fn run_test(opts: &TestOpts,
force_ignore: bool,
test: TestDescAndFn,
monitor_ch: Sender<MonitorMsg>) {
let TestDescAndFn {desc, testfn} = test;
if force_ignore || desc.ignore {
monitor_ch.send((desc, TrIgnored, Vec::new()));
2012-08-01 17:30:05 -07:00
return;
}
#[allow(deprecated_owned_vector)]
fn run_test_inner(desc: TestDesc,
monitor_ch: Sender<MonitorMsg>,
nocapture: bool,
2014-04-07 13:30:48 -07:00
testfn: proc():Send) {
2014-01-26 23:13:24 -05:00
spawn(proc() {
let (tx, rx) = channel();
let mut reader = ChanReader::new(rx);
let stdout = ChanWriter::new(tx.clone());
let stderr = ChanWriter::new(tx);
let mut task = TaskBuilder::new().named(match desc.name {
DynTestName(ref name) => name.clone().to_owned(),
StaticTestName(name) => name.to_owned(),
2014-02-12 22:03:36 -08:00
});
if nocapture {
drop((stdout, stderr));
} else {
task.opts.stdout = Some(box stdout as Box<Writer:Send>);
task.opts.stderr = Some(box stderr as Box<Writer:Send>);
}
let result_future = task.future_result();
task.spawn(testfn);
let stdout = reader.read_to_end().unwrap().move_iter().collect();
let task_result = result_future.recv();
let test_result = calc_result(&desc, task_result.is_ok());
monitor_ch.send((desc.clone(), test_result, stdout));
})
}
match testfn {
DynBenchFn(bencher) => {
2014-02-14 09:49:11 +08:00
let bs = ::bench::benchmark(|harness| bencher.run(harness));
monitor_ch.send((desc, TrBench(bs), Vec::new()));
return;
}
StaticBenchFn(benchfn) => {
2014-02-14 09:49:11 +08:00
let bs = ::bench::benchmark(|harness| benchfn(harness));
monitor_ch.send((desc, TrBench(bs), Vec::new()));
return;
}
2013-07-15 18:50:32 -07:00
DynMetricFn(f) => {
let mut mm = MetricMap::new();
f(&mut mm);
monitor_ch.send((desc, TrMetrics(mm), Vec::new()));
2013-07-15 18:50:32 -07:00
return;
}
StaticMetricFn(f) => {
let mut mm = MetricMap::new();
f(&mut mm);
monitor_ch.send((desc, TrMetrics(mm), Vec::new()));
2013-07-15 18:50:32 -07:00
return;
}
DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
proc() f())
}
}
fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult {
if task_succeeded {
if desc.should_fail { TrFailed }
2012-09-04 18:05:57 -07:00
else { TrOk }
} else {
if desc.should_fail { TrOk }
2012-09-04 18:05:57 -07:00
else { TrFailed }
}
}
impl ToJson for Metric {
fn to_json(&self) -> json::Json {
2014-04-25 01:08:02 -07:00
let mut map = box TreeMap::new();
2014-04-15 18:17:48 -07:00
map.insert("value".to_owned(), json::Number(self.value));
map.insert("noise".to_owned(), json::Number(self.noise));
json::Object(map)
}
}
impl MetricMap {
2013-07-15 18:50:32 -07:00
pub fn new() -> MetricMap {
MetricMap(TreeMap::new())
}
/// Load MetricDiff from a file.
2014-01-29 17:39:12 -08:00
///
/// # Failure
///
/// This function will fail if the path does not exist or the path does not
/// contain a valid metric map.
2013-07-15 18:50:32 -07:00
pub fn load(p: &Path) -> MetricMap {
assert!(p.exists());
2014-01-29 17:39:12 -08:00
let mut f = File::open(p).unwrap();
2013-11-29 21:26:03 -08:00
let value = json::from_reader(&mut f as &mut io::Reader).unwrap();
2014-03-29 01:05:46 +01:00
let mut decoder = json::Decoder::new(value);
MetricMap(match Decodable::decode(&mut decoder) {
Ok(t) => t,
Err(e) => fail!("failure decoding JSON: {}", e)
})
}
/// Write MetricDiff to a file.
2014-01-29 17:39:12 -08:00
pub fn save(&self, p: &Path) -> io::IoResult<()> {
let mut file = try!(File::create(p));
let MetricMap(ref map) = *self;
// FIXME(pcwalton): Yuck.
let mut new_map = TreeMap::new();
for (ref key, ref value) in map.iter() {
new_map.insert(key.to_owned(), (*value).clone());
}
new_map.to_json().to_pretty_writer(&mut file)
}
/// Compare against another MetricMap. Optionally compare all
/// measurements in the maps using the provided `noise_pct` as a
/// percentage of each value to consider noise. If `None`, each
/// measurement's noise threshold is independently chosen as the
/// maximum of that measurement's recorded noise quantity in either
/// map.
pub fn compare_to_old(&self, old: &MetricMap,
noise_pct: Option<f64>) -> MetricDiff {
let mut diff : MetricDiff = TreeMap::new();
let MetricMap(ref selfmap) = *self;
let MetricMap(ref old) = *old;
for (k, vold) in old.iter() {
let r = match selfmap.find(k) {
None => MetricRemoved,
Some(v) => {
let delta = v.value - vold.value;
let noise = match noise_pct {
None => vold.noise.abs().max(v.noise.abs()),
Some(pct) => vold.value * pct / 100.0
};
2013-07-15 18:50:32 -07:00
if delta.abs() <= noise {
LikelyNoise
} else {
let pct = delta.abs() / vold.value.max(f64::EPSILON) * 100.0;
if vold.noise < 0.0 {
// When 'noise' is negative, it means we want
// to see deltas that go up over time, and can
// only tolerate slight negative movement.
if delta < 0.0 {
Regression(pct)
} else {
Improvement(pct)
}
} else {
// When 'noise' is positive, it means we want
// to see deltas that go down over time, and
// can only tolerate slight positive movements.
if delta < 0.0 {
Improvement(pct)
} else {
Regression(pct)
}
}
}
}
};
2013-07-02 12:47:32 -07:00
diff.insert((*k).clone(), r);
}
let MetricMap(ref map) = *self;
for (k, _) in map.iter() {
if !diff.contains_key(k) {
2013-07-02 12:47:32 -07:00
diff.insert((*k).clone(), MetricAdded);
}
}
diff
}
/// Insert a named `value` (+/- `noise`) metric into the map. The value
/// must be non-negative. The `noise` indicates the uncertainty of the
/// metric, which doubles as the "noise range" of acceptable
/// pairwise-regressions on this named value, when comparing from one
/// metric to the next using `compare_to_old`.
///
/// If `noise` is positive, then it means this metric is of a value
/// you want to see grow smaller, so a change larger than `noise` in the
/// positive direction represents a regression.
///
/// If `noise` is negative, then it means this metric is of a value
/// you want to see grow larger, so a change larger than `noise` in the
/// negative direction represents a regression.
pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
let m = Metric {
value: value,
noise: noise
};
let MetricMap(ref mut map) = *self;
map.insert(name.to_strbuf(), m);
}
/// Attempt to "ratchet" an external metric file. This involves loading
/// metrics from a metric file (if it exists), comparing against
/// the metrics in `self` using `compare_to_old`, and rewriting the
/// file to contain the metrics in `self` if none of the
/// `MetricChange`s are `Regression`. Returns the diff as well
/// as a boolean indicating whether the ratchet succeeded.
pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
let old = if p.exists() {
MetricMap::load(p)
} else {
MetricMap::new()
};
let diff : MetricDiff = self.compare_to_old(&old, pct);
let ok = diff.iter().all(|(_, v)| {
match *v {
Regression(_) => false,
_ => true
}
});
if ok {
2014-01-29 17:39:12 -08:00
self.save(p).unwrap();
}
return (diff, ok)
}
}
// Benchmarking
2014-02-17 22:53:45 +11:00
/// A function that is opaque to the optimizer, to allow benchmarks to
/// pretend to use outputs to assist in avoiding dead-code
/// elimination.
///
/// This function is a no-op, and does not even read from `dummy`.
pub fn black_box<T>(dummy: T) {
// we need to "use" the argument in some way LLVM can't
// introspect.
unsafe {asm!("" : : "r"(&dummy))}
}
impl Bencher {
/// Callback for benchmark functions to run in their body.
pub fn iter<T>(&mut self, inner: || -> T) {
self.ns_start = precise_time_ns();
let k = self.iterations;
for _ in range(0u64, k) {
black_box(inner());
}
self.ns_end = precise_time_ns();
}
pub fn ns_elapsed(&mut self) -> u64 {
if self.ns_start == 0 || self.ns_end == 0 {
0
} else {
self.ns_end - self.ns_start
}
}
pub fn ns_per_iter(&mut self) -> u64 {
if self.iterations == 0 {
0
} else {
2014-02-06 02:34:33 -05:00
self.ns_elapsed() / cmp::max(self.iterations, 1)
}
}
pub fn bench_n(&mut self, n: u64, f: |&mut Bencher|) {
self.iterations = n;
f(self);
}
// This is a more statistics-driven benchmark algorithm
pub fn auto_bench(&mut self, f: |&mut Bencher|) -> stats::Summary<f64> {
// Initial bench run to get ballpark figure.
let mut n = 1_u64;
self.bench_n(n, |x| f(x));
// Try to estimate iter count for 1ms falling back to 1m
// iterations if first run took < 1ns.
if self.ns_per_iter() == 0 {
n = 1_000_000;
} else {
2014-02-06 02:34:33 -05:00
n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
}
// if the first run took more than 1ms we don't want to just
// be left doing 0 iterations on every loop. The unfortunate
// side effect of not being able to do as many runs is
// automatically handled by the statistical analysis below
// (i.e. larger error bars).
if n == 0 { n = 1; }
let mut total_run = 0;
let samples : &mut [f64] = [0.0_f64, ..50];
loop {
let loop_start = precise_time_ns();
for p in samples.mut_iter() {
self.bench_n(n, |x| f(x));
*p = self.ns_per_iter() as f64;
};
stats::winsorize(samples, 5.0);
let summ = stats::Summary::new(samples);
for p in samples.mut_iter() {
self.bench_n(5 * n, |x| f(x));
*p = self.ns_per_iter() as f64;
};
stats::winsorize(samples, 5.0);
let summ5 = stats::Summary::new(samples);
let now = precise_time_ns();
let loop_run = now - loop_start;
// If we've run for 100ms and seem to have converged to a
// stable median.
if loop_run > 100_000_000 &&
summ.median_abs_dev_pct < 1.0 &&
summ.median - summ5.median < summ5.median_abs_dev {
return summ5;
}
total_run += loop_run;
// Longest we ever run for is 3s.
if total_run > 3_000_000_000 {
return summ5;
}
n *= 2;
}
}
}
pub mod bench {
2014-02-06 02:34:33 -05:00
use std::cmp;
use super::{Bencher, BenchSamples};
pub fn benchmark(f: |&mut Bencher|) -> BenchSamples {
let mut bs = Bencher {
iterations: 0,
ns_start: 0,
ns_end: 0,
bytes: 0
};
let ns_iter_summ = bs.auto_bench(f);
2014-02-06 02:34:33 -05:00
let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
let iter_s = 1_000_000_000 / ns_iter;
let mb_s = (bs.bytes * iter_s) / 1_000_000;
BenchSamples {
ns_iter_summ: ns_iter_summ,
mb_s: mb_s as uint
}
}
}
2012-01-17 19:05:07 -08:00
#[cfg(test)]
mod tests {
use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
2014-02-14 09:49:11 +08:00
TestDesc, TestDescAndFn, TestOpts, run_test,
Metric, MetricMap, MetricAdded, MetricRemoved,
Improvement, Regression, LikelyNoise,
StaticTestName, DynTestName, DynTestFn};
use std::io::TempDir;
2012-01-17 19:05:07 -08:00
#[test]
2013-01-29 12:06:09 -08:00
pub fn do_not_run_ignored_tests() {
fn f() { fail!(); }
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: true,
should_fail: false
},
testfn: DynTestFn(proc() f()),
2012-01-17 19:05:07 -08:00
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv();
2013-03-28 18:39:09 -07:00
assert!(res != TrOk);
2012-01-17 19:05:07 -08:00
}
#[test]
2013-01-29 12:06:09 -08:00
pub fn ignored_tests_result_in_ignored() {
2012-01-17 19:05:07 -08:00
fn f() { }
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: true,
should_fail: false
},
testfn: DynTestFn(proc() f()),
2012-01-17 19:05:07 -08:00
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv();
assert!(res == TrIgnored);
2012-01-17 19:05:07 -08:00
}
#[test]
fn test_should_fail() {
fn f() { fail!(); }
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: false,
should_fail: true
},
testfn: DynTestFn(proc() f()),
2012-01-17 19:05:07 -08:00
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv();
assert!(res == TrOk);
2012-01-17 19:05:07 -08:00
}
#[test]
fn test_should_fail_but_succeeds() {
2012-01-17 19:05:07 -08:00
fn f() { }
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: false,
should_fail: true
},
testfn: DynTestFn(proc() f()),
2012-01-17 19:05:07 -08:00
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv();
assert!(res == TrFailed);
2012-01-17 19:05:07 -08:00
}
#[test]
fn first_free_arg_should_be_a_filter() {
let args = vec!("progname".to_strbuf(), "filter".to_strbuf());
let opts = match parse_opts(args.as_slice()) {
Some(Ok(o)) => o,
_ => fail!("Malformed arg in first_free_arg_should_be_a_filter")
2012-08-03 19:59:04 -07:00
};
assert!("filter" == opts.filter.clone().unwrap().as_slice());
2012-01-17 19:05:07 -08:00
}
#[test]
fn parse_ignored_flag() {
let args = vec!("progname".to_strbuf(),
"filter".to_strbuf(),
"--ignored".to_strbuf());
let opts = match parse_opts(args.as_slice()) {
Some(Ok(o)) => o,
_ => fail!("Malformed arg in parse_ignored_flag")
2012-08-03 19:59:04 -07:00
};
2013-03-28 18:39:09 -07:00
assert!((opts.run_ignored));
2012-01-17 19:05:07 -08:00
}
#[test]
2013-01-29 12:06:09 -08:00
pub fn filter_for_ignored_option() {
2012-01-17 19:05:07 -08:00
// When we run ignored tests the test filter should filter out all the
// unignored tests and flip the ignore flag on the rest to false
let mut opts = TestOpts::new();
opts.run_tests = true;
opts.run_ignored = true;
2013-01-22 08:44:24 -08:00
let tests = vec!(
TestDescAndFn {
desc: TestDesc {
name: StaticTestName("1"),
ignore: true,
should_fail: false,
},
testfn: DynTestFn(proc() {}),
2013-01-22 08:44:24 -08:00
},
TestDescAndFn {
desc: TestDesc {
name: StaticTestName("2"),
ignore: false,
should_fail: false
},
testfn: DynTestFn(proc() {}),
});
2012-09-19 18:51:35 -07:00
let filtered = filter_tests(&opts, tests);
2012-01-17 19:05:07 -08:00
assert_eq!(filtered.len(), 1);
assert_eq!(filtered.get(0).desc.name.to_str().to_strbuf(),
"1".to_strbuf());
assert!(filtered.get(0).desc.ignore == false);
2012-01-17 19:05:07 -08:00
}
#[test]
2013-01-29 12:06:09 -08:00
pub fn sort_tests() {
let mut opts = TestOpts::new();
opts.run_tests = true;
2012-01-17 19:05:07 -08:00
let names =
vec!("sha1::test".to_strbuf(),
"int::test_to_str".to_strbuf(),
"int::test_pow".to_strbuf(),
"test::do_not_run_ignored_tests".to_strbuf(),
"test::ignored_tests_result_in_ignored".to_strbuf(),
"test::first_free_arg_should_be_a_filter".to_strbuf(),
"test::parse_ignored_flag".to_strbuf(),
"test::filter_for_ignored_option".to_strbuf(),
"test::sort_tests".to_strbuf());
2012-01-17 19:05:07 -08:00
let tests =
{
fn testfn() { }
let mut tests = Vec::new();
for name in names.iter() {
let test = TestDescAndFn {
desc: TestDesc {
2013-07-02 12:47:32 -07:00
name: DynTestName((*name).clone()),
ignore: false,
should_fail: false
},
2013-07-11 12:05:17 -07:00
testfn: DynTestFn(testfn),
};
2013-02-15 02:30:30 -05:00
tests.push(test);
}
2013-02-15 02:30:30 -05:00
tests
};
2012-09-19 18:51:35 -07:00
let filtered = filter_tests(&opts, tests);
2012-01-17 19:05:07 -08:00
let expected =
vec!("int::test_pow".to_strbuf(),
"int::test_to_str".to_strbuf(),
"sha1::test".to_strbuf(),
"test::do_not_run_ignored_tests".to_strbuf(),
"test::filter_for_ignored_option".to_strbuf(),
"test::first_free_arg_should_be_a_filter".to_strbuf(),
"test::ignored_tests_result_in_ignored".to_strbuf(),
"test::parse_ignored_flag".to_strbuf(),
"test::sort_tests".to_strbuf());
2012-01-17 19:05:07 -08:00
for (a, b) in expected.iter().zip(filtered.iter()) {
assert!(*a == b.desc.name.to_str().to_strbuf());
}
}
#[test]
pub fn test_metricmap_compare() {
let mut m1 = MetricMap::new();
let mut m2 = MetricMap::new();
m1.insert_metric("in-both-noise", 1000.0, 200.0);
m2.insert_metric("in-both-noise", 1100.0, 200.0);
m1.insert_metric("in-first-noise", 1000.0, 2.0);
m2.insert_metric("in-second-noise", 1000.0, 2.0);
m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
let diff1 = m2.compare_to_old(&m1, None);
assert_eq!(*(diff1.find(&"in-both-noise".to_strbuf()).unwrap()), LikelyNoise);
assert_eq!(*(diff1.find(&"in-first-noise".to_strbuf()).unwrap()), MetricRemoved);
assert_eq!(*(diff1.find(&"in-second-noise".to_strbuf()).unwrap()), MetricAdded);
assert_eq!(*(diff1.find(&"in-both-want-downwards-but-regressed".to_strbuf()).unwrap()),
2013-07-15 18:50:32 -07:00
Regression(100.0));
assert_eq!(*(diff1.find(&"in-both-want-downwards-and-improved".to_strbuf()).unwrap()),
2013-07-15 18:50:32 -07:00
Improvement(50.0));
assert_eq!(*(diff1.find(&"in-both-want-upwards-but-regressed".to_strbuf()).unwrap()),
2013-07-15 18:50:32 -07:00
Regression(50.0));
assert_eq!(*(diff1.find(&"in-both-want-upwards-and-improved".to_strbuf()).unwrap()),
2013-07-15 18:50:32 -07:00
Improvement(100.0));
assert_eq!(diff1.len(), 7);
let diff2 = m2.compare_to_old(&m1, Some(200.0));
assert_eq!(*(diff2.find(&"in-both-noise".to_strbuf()).unwrap()), LikelyNoise);
assert_eq!(*(diff2.find(&"in-first-noise".to_strbuf()).unwrap()), MetricRemoved);
assert_eq!(*(diff2.find(&"in-second-noise".to_strbuf()).unwrap()), MetricAdded);
assert_eq!(*(diff2.find(&"in-both-want-downwards-but-regressed".to_strbuf()).unwrap()),
2014-04-15 18:17:48 -07:00
LikelyNoise);
assert_eq!(*(diff2.find(&"in-both-want-downwards-and-improved".to_strbuf()).unwrap()),
2014-04-15 18:17:48 -07:00
LikelyNoise);
assert_eq!(*(diff2.find(&"in-both-want-upwards-but-regressed".to_strbuf()).unwrap()),
2014-04-15 18:17:48 -07:00
LikelyNoise);
assert_eq!(*(diff2.find(&"in-both-want-upwards-and-improved".to_strbuf()).unwrap()),
2014-04-15 18:17:48 -07:00
LikelyNoise);
assert_eq!(diff2.len(), 7);
}
#[test]
pub fn ratchet_test() {
let dpth = TempDir::new("test-ratchet").expect("missing test for ratchet");
let pth = dpth.path().join("ratchet.json");
let mut m1 = MetricMap::new();
m1.insert_metric("runtime", 1000.0, 2.0);
m1.insert_metric("throughput", 50.0, 2.0);
let mut m2 = MetricMap::new();
m2.insert_metric("runtime", 1100.0, 2.0);
m2.insert_metric("throughput", 50.0, 2.0);
2014-01-30 14:28:20 -08:00
m1.save(&pth).unwrap();
// Ask for a ratchet that should fail to advance.
let (diff1, ok1) = m2.ratchet(&pth, None);
assert_eq!(ok1, false);
assert_eq!(diff1.len(), 2);
assert_eq!(*(diff1.find(&"runtime".to_strbuf()).unwrap()), Regression(10.0));
assert_eq!(*(diff1.find(&"throughput".to_strbuf()).unwrap()), LikelyNoise);
// Check that it was not rewritten.
let m3 = MetricMap::load(&pth);
let MetricMap(m3) = m3;
assert_eq!(m3.len(), 2);
assert_eq!(*(m3.find(&"runtime".to_strbuf()).unwrap()), Metric::new(1000.0, 2.0));
assert_eq!(*(m3.find(&"throughput".to_strbuf()).unwrap()), Metric::new(50.0, 2.0));
// Ask for a ratchet with an explicit noise-percentage override,
// that should advance.
let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
assert_eq!(ok2, true);
assert_eq!(diff2.len(), 2);
assert_eq!(*(diff2.find(&"runtime".to_strbuf()).unwrap()), LikelyNoise);
assert_eq!(*(diff2.find(&"throughput".to_strbuf()).unwrap()), LikelyNoise);
// Check that it was rewritten.
let m4 = MetricMap::load(&pth);
let MetricMap(m4) = m4;
assert_eq!(m4.len(), 2);
assert_eq!(*(m4.find(&"runtime".to_strbuf()).unwrap()), Metric::new(1100.0, 2.0));
assert_eq!(*(m4.find(&"throughput".to_strbuf()).unwrap()), Metric::new(50.0, 2.0));
}
2012-01-17 19:05:07 -08:00
}